diff options
Diffstat (limited to 'drivers')
2873 files changed, 112804 insertions, 44537 deletions
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig index 682c53245286..9e055b5ce03d 100644 --- a/drivers/accel/ivpu/Kconfig +++ b/drivers/accel/ivpu/Kconfig @@ -8,6 +8,7 @@ config DRM_ACCEL_IVPU select FW_LOADER select DRM_GEM_SHMEM_HELPER select GENERIC_ALLOCATOR + select WANT_DEV_COREDUMP help Choose this option if you have a system with an 14th generation Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU) @@ -15,3 +16,12 @@ config DRM_ACCEL_IVPU and Deep Learning applications. If "M" is selected, the module will be called intel_vpu. + +config DRM_ACCEL_IVPU_DEBUG + bool "Intel NPU debug mode" + depends on DRM_ACCEL_IVPU + help + Choose this option to enable additional + debug features for the Intel NPU driver: + - Always print debug messages regardless of dyndbg config, + - Enable unsafe module params. diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index ebd682a42eb1..1029e0bab061 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -16,8 +16,14 @@ intel_vpu-y := \ ivpu_mmu_context.o \ ivpu_ms.o \ ivpu_pm.o \ - ivpu_sysfs.o + ivpu_sysfs.o \ + ivpu_trace_points.o intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o +intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o + +subdir-ccflags-$(CONFIG_DRM_ACCEL_IVPU_DEBUG) += -DDEBUG + +CFLAGS_ivpu_trace_points.o = -I$(src) diff --git a/drivers/accel/ivpu/ivpu_coredump.c b/drivers/accel/ivpu/ivpu_coredump.c new file mode 100644 index 000000000000..16ad0c30818c --- /dev/null +++ b/drivers/accel/ivpu/ivpu_coredump.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#include <linux/devcoredump.h> +#include <linux/firmware.h> + +#include "ivpu_coredump.h" +#include "ivpu_fw.h" +#include "ivpu_gem.h" +#include "vpu_boot_api.h" + +#define CRASH_DUMP_HEADER "Intel NPU crash dump" +#define CRASH_DUMP_HEADERS_SIZE SZ_4K + +void ivpu_dev_coredump(struct ivpu_device *vdev) +{ + struct drm_print_iterator pi = {}; + struct drm_printer p; + size_t coredump_size; + char *coredump; + + coredump_size = CRASH_DUMP_HEADERS_SIZE + FW_VERSION_HEADER_SIZE + + ivpu_bo_size(vdev->fw->mem_log_crit) + ivpu_bo_size(vdev->fw->mem_log_verb); + coredump = vmalloc(coredump_size); + if (!coredump) + return; + + pi.data = coredump; + pi.remain = coredump_size; + p = drm_coredump_printer(&pi); + + drm_printf(&p, "%s\n", CRASH_DUMP_HEADER); + drm_printf(&p, "FW version: %s\n", vdev->fw->version); + ivpu_fw_log_print(vdev, false, &p); + + dev_coredumpv(vdev->drm.dev, coredump, pi.offset, GFP_KERNEL); +} diff --git a/drivers/accel/ivpu/ivpu_coredump.h b/drivers/accel/ivpu/ivpu_coredump.h new file mode 100644 index 000000000000..8efb09d02441 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_coredump.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#ifndef __IVPU_COREDUMP_H__ +#define __IVPU_COREDUMP_H__ + +#include <drm/drm_print.h> + +#include "ivpu_drv.h" +#include "ivpu_fw_log.h" + +#ifdef CONFIG_DEV_COREDUMP +void ivpu_dev_coredump(struct ivpu_device *vdev); +#else +static inline void ivpu_dev_coredump(struct ivpu_device *vdev) +{ + struct drm_printer p = drm_info_printer(vdev->drm.dev); + + ivpu_fw_log_print(vdev, false, &p); +} +#endif + +#endif /* __IVPU_COREDUMP_H__ */ diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 8d50981594d1..8180b95ed69d 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -45,6 +45,14 @@ static int fw_name_show(struct seq_file *s, void *v) return 0; } +static int fw_version_show(struct seq_file *s, void *v) +{ + struct ivpu_device *vdev = seq_to_ivpu(s); + + seq_printf(s, "%s\n", vdev->fw->version); + return 0; +} + static int fw_trace_capability_show(struct seq_file *s, void *v) { struct ivpu_device *vdev = seq_to_ivpu(s); @@ -119,6 +127,7 @@ static int firewall_irq_counter_show(struct seq_file *s, void *v) static const struct drm_debugfs_info vdev_debugfs_list[] = { {"bo_list", bo_list_show, 0}, {"fw_name", fw_name_show, 0}, + {"fw_version", fw_version_show, 0}, {"fw_trace_capability", fw_trace_capability_show, 0}, {"fw_trace_config", fw_trace_config_show, 0}, {"last_bootmode", last_bootmode_show, 0}, @@ -127,32 +136,23 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = { {"firewall_irq_counter", firewall_irq_counter_show, 0}, }; -static ssize_t -dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +static int dvfs_mode_get(void *data, u64 *dvfs_mode) { - struct ivpu_device *vdev = file->private_data; - struct ivpu_fw_info *fw = vdev->fw; - u32 dvfs_mode; - int ret; - - ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode); - if (ret < 0) - return ret; + struct ivpu_device *vdev = (struct ivpu_device *)data; - fw->dvfs_mode = dvfs_mode; + *dvfs_mode = vdev->fw->dvfs_mode; + return 0; +} - ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); - if (ret) - return ret; +static int dvfs_mode_set(void *data, u64 dvfs_mode) +{ + struct ivpu_device *vdev = (struct ivpu_device *)data; - return size; + vdev->fw->dvfs_mode = (u32)dvfs_mode; + return pci_try_reset_function(to_pci_dev(vdev->drm.dev)); } -static const struct file_operations dvfs_mode_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = dvfs_mode_fops_write, -}; +DEFINE_DEBUGFS_ATTRIBUTE(dvfs_mode_fops, dvfs_mode_get, dvfs_mode_set, "%llu\n"); static ssize_t fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) @@ -201,7 +201,7 @@ fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, l if (!size) return -EINVAL; - ivpu_fw_log_clear(vdev); + ivpu_fw_log_mark_read(vdev); return size; } @@ -346,49 +346,23 @@ static const struct file_operations ivpu_force_recovery_fops = { .write = ivpu_force_recovery_fn, }; -static ssize_t -ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +static int ivpu_reset_engine_fn(void *data, u64 val) { - struct ivpu_device *vdev = file->private_data; - - if (!size) - return -EINVAL; + struct ivpu_device *vdev = (struct ivpu_device *)data; - if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE)) - return -ENODEV; - if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY)) - return -ENODEV; - - return size; + return ivpu_jsm_reset_engine(vdev, (u32)val); } -static const struct file_operations ivpu_reset_engine_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = ivpu_reset_engine_fn, -}; +DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n"); -static ssize_t -ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +static int ivpu_resume_engine_fn(void *data, u64 val) { - struct ivpu_device *vdev = file->private_data; - - if (!size) - return -EINVAL; + struct ivpu_device *vdev = (struct ivpu_device *)data; - if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE)) - return -ENODEV; - if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY)) - return -ENODEV; - - return size; + return ivpu_jsm_hws_resume_engine(vdev, (u32)val); } -static const struct file_operations ivpu_resume_engine_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = ivpu_resume_engine_fn, -}; +DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n"); static int dct_active_get(void *data, u64 *active_percent) { @@ -432,7 +406,7 @@ void ivpu_debugfs_init(struct ivpu_device *vdev) debugfs_create_file("force_recovery", 0200, debugfs_root, vdev, &ivpu_force_recovery_fops); - debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev, + debugfs_create_file("dvfs_mode", 0644, debugfs_root, vdev, &dvfs_mode_fops); debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev, diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index c91400ecf926..ca2bf47ce248 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> +#include <generated/utsrelease.h> #include <drm/drm_accel.h> #include <drm/drm_file.h> @@ -14,7 +15,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_prime.h> -#include "vpu_boot_api.h" +#include "ivpu_coredump.h" #include "ivpu_debugfs.h" #include "ivpu_drv.h" #include "ivpu_fw.h" @@ -29,10 +30,10 @@ #include "ivpu_ms.h" #include "ivpu_pm.h" #include "ivpu_sysfs.h" +#include "vpu_boot_api.h" #ifndef DRIVER_VERSION_STR -#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ - __stringify(DRM_IVPU_DRIVER_MINOR) "." +#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE #endif static struct lock_class_key submitted_jobs_xa_lock_class_key; @@ -42,8 +43,10 @@ module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); int ivpu_test_mode; +#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG) module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros."); +#endif u8 ivpu_pll_min_ratio; module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); @@ -53,9 +56,9 @@ u8 ivpu_pll_max_ratio = U8_MAX; module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); -int ivpu_sched_mode; +int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO; module_param_named(sched_mode, ivpu_sched_mode, int, 0444); -MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler"); +MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler"); bool ivpu_disable_mmu_cont_pages; module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); @@ -85,7 +88,7 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi ivpu_cmdq_release_all_locked(file_priv); ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx); - ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); + ivpu_mmu_context_fini(vdev, &file_priv->ctx); file_priv->bound = false; drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id)); } @@ -103,6 +106,8 @@ static void file_priv_release(struct kref *ref) pm_runtime_get_sync(vdev->drm.dev); mutex_lock(&vdev->context_list_lock); file_priv_unbind(vdev, file_priv); + drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa)); + xa_destroy(&file_priv->cmdq_xa); mutex_unlock(&vdev->context_list_lock); pm_runtime_put_autosuspend(vdev->drm.dev); @@ -116,8 +121,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link) struct ivpu_file_priv *file_priv = *link; struct ivpu_device *vdev = file_priv->vdev; - drm_WARN_ON(&vdev->drm, !file_priv); - ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n", file_priv->ctx.id, kref_read(&file_priv->ref)); @@ -255,9 +258,14 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) goto err_unlock; } - ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); - if (ret) - goto err_xa_erase; + ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id); + + file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); + file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK; + + xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1); + file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID; + file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID; mutex_unlock(&vdev->context_list_lock); drm_dev_exit(idx); @@ -269,8 +277,6 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) return 0; -err_xa_erase: - xa_erase_irq(&vdev->context_xa, ctx_id); err_unlock: mutex_unlock(&vdev->context_list_lock); mutex_destroy(&file_priv->ms_lock); @@ -346,7 +352,7 @@ static int ivpu_hw_sched_init(struct ivpu_device *vdev) { int ret = 0; - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) { + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { ret = ivpu_jsm_hws_setup_priority_bands(vdev); if (ret) { ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret); @@ -380,10 +386,7 @@ int ivpu_boot(struct ivpu_device *vdev) ret = ivpu_wait_for_ready(vdev); if (ret) { ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); - ivpu_hw_diagnose_failure(vdev); - ivpu_mmu_evtq_dump(vdev); - ivpu_fw_log_dump(vdev); - return ret; + goto err_diagnose_failure; } ivpu_hw_irq_clear(vdev); @@ -394,12 +397,20 @@ int ivpu_boot(struct ivpu_device *vdev) if (ivpu_fw_is_cold_boot(vdev)) { ret = ivpu_pm_dct_init(vdev); if (ret) - return ret; + goto err_diagnose_failure; - return ivpu_hw_sched_init(vdev); + ret = ivpu_hw_sched_init(vdev); + if (ret) + goto err_diagnose_failure; } return 0; + +err_diagnose_failure: + ivpu_hw_diagnose_failure(vdev); + ivpu_mmu_evtq_dump(vdev); + ivpu_dev_coredump(vdev); + return ret; } void ivpu_prepare_for_reset(struct ivpu_device *vdev) @@ -446,9 +457,16 @@ static const struct drm_driver driver = { .name = DRIVER_NAME, .desc = DRIVER_DESC, + +#ifdef DRIVER_DATE .date = DRIVER_DATE, - .major = DRM_IVPU_DRIVER_MAJOR, - .minor = DRM_IVPU_DRIVER_MINOR, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +#else + .date = UTS_RELEASE, + .major = 1, +#endif }; static void ivpu_context_abort_invalid(struct ivpu_device *vdev) @@ -606,6 +624,9 @@ static int ivpu_dev_init(struct ivpu_device *vdev) lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); INIT_LIST_HEAD(&vdev->bo_list); + vdev->db_limit.min = IVPU_MIN_DB; + vdev->db_limit.max = IVPU_MAX_DB; + ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock); if (ret) goto err_xa_destroy; @@ -632,9 +653,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev) if (ret) goto err_shutdown; - ret = ivpu_mmu_global_context_init(vdev); - if (ret) - goto err_shutdown; + ivpu_mmu_global_context_init(vdev); ret = ivpu_mmu_init(vdev); if (ret) @@ -722,6 +741,7 @@ static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) }, { } }; MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 63f13b697eed..3fdff3f6cffd 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -21,11 +21,11 @@ #define DRIVER_NAME "intel_vpu" #define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)" -#define DRIVER_DATE "20230117" -#define PCI_DEVICE_ID_MTL 0x7d1d -#define PCI_DEVICE_ID_ARL 0xad1d -#define PCI_DEVICE_ID_LNL 0x643e +#define PCI_DEVICE_ID_MTL 0x7d1d +#define PCI_DEVICE_ID_ARL 0xad1d +#define PCI_DEVICE_ID_LNL 0x643e +#define PCI_DEVICE_ID_PTL_P 0xb03e #define IVPU_HW_IP_37XX 37 #define IVPU_HW_IP_40XX 40 @@ -46,17 +46,22 @@ #define IVPU_MIN_DB 1 #define IVPU_MAX_DB 255 -#define IVPU_NUM_ENGINES 2 +#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0) +#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8) + #define IVPU_NUM_PRIORITIES 4 -#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES) +#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES) -#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority)) +#define IVPU_CMDQ_MIN_ID 1 +#define IVPU_CMDQ_MAX_ID 255 #define IVPU_PLATFORM_SILICON 0 #define IVPU_PLATFORM_SIMICS 2 #define IVPU_PLATFORM_FPGA 3 #define IVPU_PLATFORM_INVALID 8 +#define IVPU_SCHED_MODE_AUTO -1 + #define IVPU_DBG_REG BIT(0) #define IVPU_DBG_IRQ BIT(1) #define IVPU_DBG_MMU BIT(2) @@ -134,6 +139,8 @@ struct ivpu_device { struct xa_limit context_xa_limit; struct xarray db_xa; + struct xa_limit db_limit; + u32 db_next; struct mutex bo_list_lock; /* Protects bo_list */ struct list_head bo_list; @@ -152,6 +159,7 @@ struct ivpu_device { int tdr; int autosuspend; int d0i3_entry_msg; + int state_dump_msg; } timeout; }; @@ -163,11 +171,15 @@ struct ivpu_file_priv { struct kref ref; struct ivpu_device *vdev; struct mutex lock; /* Protects cmdq */ - struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX]; + struct xarray cmdq_xa; struct ivpu_mmu_context ctx; struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */ struct list_head ms_instance_list; struct ivpu_bo *ms_info_bo; + struct xa_limit job_limit; + u32 job_id_next; + struct xa_limit cmdq_limit; + u32 cmdq_id_next; bool has_mmu_faults; bool bound; bool aborted; @@ -185,9 +197,9 @@ extern bool ivpu_force_snoop; #define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2) #define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4) #define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5) -#define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6) -#define IVPU_TEST_MODE_HWS_EXTRA_EVENTS BIT(7) +#define IVPU_TEST_MODE_MIP_DISABLE BIT(6) #define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8) +#define IVPU_TEST_MODE_TURBO BIT(9) extern int ivpu_test_mode; struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); @@ -215,6 +227,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev) return IVPU_HW_IP_37XX; case PCI_DEVICE_ID_LNL: return IVPU_HW_IP_40XX; + case PCI_DEVICE_ID_PTL_P: + return IVPU_HW_IP_50XX; default: dump_stack(); ivpu_err(vdev, "Unknown NPU IP generation\n"); @@ -229,6 +243,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_ARL: return IVPU_HW_BTRS_MTL; case PCI_DEVICE_ID_LNL: + case PCI_DEVICE_ID_PTL_P: return IVPU_HW_BTRS_LNL; default: dump_stack(); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index ede6165e09d9..6037ec0b3096 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -25,7 +25,6 @@ #define FW_SHAVE_NN_MAX_SIZE SZ_2M #define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) #define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) -#define FW_VERSION_HEADER_SIZE SZ_4K #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) #define WATCHDOG_MSS_REDIRECT 32 @@ -47,8 +46,10 @@ #define IVPU_FOCUS_PRESENT_TIMER_MS 1000 static char *ivpu_firmware; +#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG) module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644); MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/.."); +#endif static struct { int gen; @@ -58,11 +59,14 @@ static struct { { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" }, { IVPU_HW_IP_40XX, "vpu_40xx.bin" }, { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, + { IVPU_HW_IP_50XX, "vpu_50xx.bin" }, + { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" }, }; /* Production fw_names from the table above */ MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin"); MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin"); static int ivpu_fw_request(struct ivpu_device *vdev) { @@ -135,6 +139,15 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range return true; } +static u32 +ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) +{ + if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO) + return ivpu_sched_mode; + + return VPU_SCHEDULING_MODE_OS; +} + static int ivpu_fw_parse(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; @@ -191,8 +204,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", fw_hdr->header_version, fw_hdr->image_format); - ivpu_info(vdev, "Firmware: %s, version: %s", fw->name, - (const char *)fw_hdr + VPU_FW_HEADER_SIZE); + if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE)) + ivpu_warn(vdev, "Missing firmware version\n"); + + ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version); if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3)) return -EINVAL; @@ -208,14 +223,16 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->cold_boot_entry_point = fw_hdr->entry_point; fw->entry_point = fw->cold_boot_entry_point; - fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL); + fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL); fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING; fw->trace_hw_component_mask = -1; fw->dvfs_mode = 0; + fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; + ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, @@ -311,7 +328,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) goto err_free_fw_mem; } - if (ivpu_log_level <= IVPU_FW_LOG_INFO) + if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO) log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE; else log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE; @@ -567,8 +584,10 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2; boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2; - boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start; - boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user); + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { + boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start; + boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user); + } /* Allow configuration for L2C_PAGE_TABLE with boot param value */ boot_params->autoconfig = 1; @@ -604,8 +623,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev); boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev); boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev); - boot_params->vpu_scheduling_mode = vdev->hw->sched_mode; - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) + boot_params->vpu_scheduling_mode = vdev->fw->sched_mode; + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS; boot_params->dvfs_mode = vdev->fw->dvfs_mode; if (!IVPU_WA(disable_d0i3_msg)) diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 40d9d17be3f5..1d0b2bd9d65c 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -1,11 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __IVPU_FW_H__ #define __IVPU_FW_H__ +#include "vpu_jsm_api.h" + +#define FW_VERSION_HEADER_SIZE SZ_4K +#define FW_VERSION_STR_SIZE SZ_256 + struct ivpu_device; struct ivpu_bo; struct vpu_boot_params; @@ -13,6 +18,7 @@ struct vpu_boot_params; struct ivpu_fw_info { const struct firmware *file; const char *name; + char version[FW_VERSION_STR_SIZE]; struct ivpu_bo *mem; struct ivpu_bo *mem_shave_nn; struct ivpu_bo *mem_log_crit; @@ -32,6 +38,7 @@ struct ivpu_fw_info { u32 secondary_preempt_buf_size; u64 read_only_addr; u32 read_only_size; + u32 sched_mode; }; int ivpu_fw_init(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c index ef0adb5e0fbe..337c906b0210 100644 --- a/drivers/accel/ivpu/ivpu_fw_log.c +++ b/drivers/accel/ivpu/ivpu_fw_log.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #include <linux/ctype.h> @@ -15,19 +15,19 @@ #include "ivpu_fw_log.h" #include "ivpu_gem.h" -#define IVPU_FW_LOG_LINE_LENGTH 256 +#define IVPU_FW_LOG_LINE_LENGTH 256 -unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR; -module_param(ivpu_log_level, uint, 0444); -MODULE_PARM_DESC(ivpu_log_level, - "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG) +unsigned int ivpu_fw_log_level = IVPU_FW_LOG_ERROR; +module_param_named(fw_log_level, ivpu_fw_log_level, uint, 0444); +MODULE_PARM_DESC(fw_log_level, + "NPU firmware default log level: debug=" __stringify(IVPU_FW_LOG_DEBUG) " info=" __stringify(IVPU_FW_LOG_INFO) " warn=" __stringify(IVPU_FW_LOG_WARN) " error=" __stringify(IVPU_FW_LOG_ERROR) " fatal=" __stringify(IVPU_FW_LOG_FATAL)); -static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset, - struct vpu_tracing_buffer_header **log_header) +static int fw_log_from_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset, + struct vpu_tracing_buffer_header **out_log) { struct vpu_tracing_buffer_header *log; @@ -48,7 +48,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset, return -EINVAL; } - *log_header = log; + *out_log = log; *offset += log->size; ivpu_dbg(vdev, FW_BOOT, @@ -59,7 +59,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset, return 0; } -static void buffer_print(char *buffer, u32 size, struct drm_printer *p) +static void fw_log_print_lines(char *buffer, u32 size, struct drm_printer *p) { char line[IVPU_FW_LOG_LINE_LENGTH]; u32 index = 0; @@ -87,56 +87,89 @@ static void buffer_print(char *buffer, u32 size, struct drm_printer *p) } line[index] = 0; if (index != 0) - drm_printf(p, "%s\n", line); + drm_printf(p, "%s", line); } -static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log, - const char *prefix, bool only_new_msgs, struct drm_printer *p) +static void fw_log_print_buffer(struct vpu_tracing_buffer_header *log, const char *prefix, + bool only_new_msgs, struct drm_printer *p) { - char *log_buffer = (void *)log + log->header_size; - u32 log_size = log->size - log->header_size; - u32 log_start = log->read_index; - u32 log_end = log->write_index; - - if (!(log->write_index || log->wrap_count) || - (log->write_index == log->read_index && only_new_msgs)) { - drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name); - return; + char *log_data = (void *)log + log->header_size; + u32 data_size = log->size - log->header_size; + u32 log_start = only_new_msgs ? READ_ONCE(log->read_index) : 0; + u32 log_end = READ_ONCE(log->write_index); + + if (log->wrap_count == log->read_wrap_count) { + if (log_end <= log_start) { + drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name); + return; + } + } else if (log->wrap_count == log->read_wrap_count + 1) { + if (log_end > log_start) + log_start = log_end; + } else { + log_start = log_end; } drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name); - if (log->write_index > log->read_index) { - buffer_print(log_buffer + log_start, log_end - log_start, p); + if (log_end > log_start) { + fw_log_print_lines(log_data + log_start, log_end - log_start, p); } else { - buffer_print(log_buffer + log_end, log_size - log_end, p); - buffer_print(log_buffer, log_end, p); + fw_log_print_lines(log_data + log_start, data_size - log_start, p); + fw_log_print_lines(log_data, log_end, p); } - drm_printf(p, "\x1b[0m"); + drm_printf(p, "\n\x1b[0m"); /* add new line and clear formatting */ drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name); } -void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p) +static void +fw_log_print_all_in_bo(struct ivpu_device *vdev, const char *name, + struct ivpu_bo *bo, bool only_new_msgs, struct drm_printer *p) { - struct vpu_tracing_buffer_header *log_header; + struct vpu_tracing_buffer_header *log; u32 next = 0; - while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0) - fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p); + while (fw_log_from_bo(vdev, bo, &next, &log) == 0) + fw_log_print_buffer(log, name, only_new_msgs, p); +} + +void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p) +{ + fw_log_print_all_in_bo(vdev, "NPU critical", vdev->fw->mem_log_crit, only_new_msgs, p); + fw_log_print_all_in_bo(vdev, "NPU verbose", vdev->fw->mem_log_verb, only_new_msgs, p); +} + +void ivpu_fw_log_mark_read(struct ivpu_device *vdev) +{ + struct vpu_tracing_buffer_header *log; + u32 next; + + next = 0; + while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) { + log->read_index = READ_ONCE(log->write_index); + log->read_wrap_count = READ_ONCE(log->wrap_count); + } next = 0; - while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0) - fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p); + while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) { + log->read_index = READ_ONCE(log->write_index); + log->read_wrap_count = READ_ONCE(log->wrap_count); + } } -void ivpu_fw_log_clear(struct ivpu_device *vdev) +void ivpu_fw_log_reset(struct ivpu_device *vdev) { - struct vpu_tracing_buffer_header *log_header; - u32 next = 0; + struct vpu_tracing_buffer_header *log; + u32 next; - while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0) - log_header->read_index = log_header->write_index; + next = 0; + while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) { + log->read_index = 0; + log->read_wrap_count = 0; + } next = 0; - while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0) - log_header->read_index = log_header->write_index; + while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) { + log->read_index = 0; + log->read_wrap_count = 0; + } } diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h index 0b2573f6f315..8bb528a73cb7 100644 --- a/drivers/accel/ivpu/ivpu_fw_log.h +++ b/drivers/accel/ivpu/ivpu_fw_log.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __IVPU_FW_LOG_H__ @@ -8,8 +8,6 @@ #include <linux/types.h> -#include <drm/drm_print.h> - #include "ivpu_drv.h" #define IVPU_FW_LOG_DEFAULT 0 @@ -19,20 +17,15 @@ #define IVPU_FW_LOG_ERROR 4 #define IVPU_FW_LOG_FATAL 5 -extern unsigned int ivpu_log_level; - #define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M #define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M #define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K -void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p); -void ivpu_fw_log_clear(struct ivpu_device *vdev); +extern unsigned int ivpu_fw_log_level; -static inline void ivpu_fw_log_dump(struct ivpu_device *vdev) -{ - struct drm_printer p = drm_info_printer(vdev->drm.dev); +void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p); +void ivpu_fw_log_mark_read(struct ivpu_device *vdev); +void ivpu_fw_log_reset(struct ivpu_device *vdev); - ivpu_fw_log_print(vdev, false, &p); -} #endif /* __IVPU_FW_LOG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 1b409dbd332d..d8e97a760fbc 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -384,6 +384,9 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); + /* Add 1 jiffy to ensure the wait function never times out before intended timeout_ns */ + timeout += 1; + obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -EINVAL; diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index e69c0613513f..4e1054f3466e 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -89,12 +89,14 @@ static void timeouts_init(struct ivpu_device *vdev) vdev->timeout.tdr = 2000000; vdev->timeout.autosuspend = -1; vdev->timeout.d0i3_entry_msg = 500; + vdev->timeout.state_dump_msg = 10; } else if (ivpu_is_simics(vdev)) { vdev->timeout.boot = 50; vdev->timeout.jsm = 500; vdev->timeout.tdr = 10000; - vdev->timeout.autosuspend = -1; + vdev->timeout.autosuspend = 100; vdev->timeout.d0i3_entry_msg = 100; + vdev->timeout.state_dump_msg = 10; } else { vdev->timeout.boot = 1000; vdev->timeout.jsm = 500; @@ -104,6 +106,7 @@ static void timeouts_init(struct ivpu_device *vdev) else vdev->timeout.autosuspend = 100; vdev->timeout.d0i3_entry_msg = 5; + vdev->timeout.state_dump_msg = 10; } } @@ -111,14 +114,14 @@ static void memory_ranges_init(struct ivpu_device *vdev) { if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M); + ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M); ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); + ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G); } else { ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M); - ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); + ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G); + ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G); + vdev->hw->ranges.dma = vdev->hw->ranges.user; } } diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index a96a05b2acda..fc4dbfc980c8 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -46,7 +46,6 @@ struct ivpu_hw_info { u32 profiling_freq; } pll; u32 tile_fuse; - u32 sched_mode; u32 sku; u16 config; int dma_bits; diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h index d0b795b344c7..fc0ee8d637f9 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h @@ -115,6 +115,8 @@ #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0) +#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST1_DLY_MASK GENMASK(15, 8) +#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST2_DLY_MASK GENMASK(23, 16) #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0) diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c index 745e5248803d..3212c99f3682 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.c +++ b/drivers/accel/ivpu/ivpu_hw_btrs.c @@ -141,16 +141,10 @@ static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config } config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse); - if (!tile_disable_check(config)) { - ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config); - return -EIO; - } + if (!tile_disable_check(config)) + ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config); - if (config) - ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", - BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1); - else - ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM); + ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config); *tile_fuse_config = config; return 0; @@ -163,7 +157,6 @@ static int info_init_mtl(struct ivpu_device *vdev) hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH; hw->sku = BTRS_MTL_TILE_SKU_BOTH; hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO; - hw->sched_mode = ivpu_sched_mode; return 0; } @@ -178,7 +171,6 @@ static int info_init_lnl(struct ivpu_device *vdev) if (ret) return ret; - hw->sched_mode = ivpu_sched_mode; hw->tile_fuse = tile_fuse_config; hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; @@ -315,10 +307,6 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0; wp->epp = enable ? PLL_EPP_DEFAULT : 0; } - - /* Simics cannot start without at least one tile */ - if (enable && ivpu_is_simics(vdev)) - wp->cfg = 1; } static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) @@ -465,9 +453,6 @@ int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev) if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) return 0; - if (ivpu_is_simics(vdev)) - return 0; - return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US); } diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c index 60b33fc59d96..029dd065614b 100644 --- a/drivers/accel/ivpu/ivpu_hw_ip.c +++ b/drivers/accel/ivpu/ivpu_hw_ip.c @@ -8,15 +8,12 @@ #include "ivpu_hw.h" #include "ivpu_hw_37xx_reg.h" #include "ivpu_hw_40xx_reg.h" +#include "ivpu_hw_btrs.h" #include "ivpu_hw_ip.h" #include "ivpu_hw_reg_io.h" #include "ivpu_mmu.h" #include "ivpu_pm.h" -#define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0 -#define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH 18 -#define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT 3 -#define PWR_ISLAND_STATUS_DLY_FREQ_HIGH 46 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) #define TIM_SAFE_ENABLE 0xf1d0dead @@ -268,20 +265,15 @@ void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev) idle_gen_drive_40xx(vdev, false); } -static void pwr_island_delay_set_50xx(struct ivpu_device *vdev) +static void +pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status) { - u32 val, post, status; - - if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) { - post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT; - status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT; - } else { - post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH; - status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH; - } + u32 val; val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY); val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val); + val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val); + val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val); REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val); val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY); @@ -311,9 +303,6 @@ static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable) val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); - - if (enable) - ndelay(500); } static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable) @@ -326,9 +315,6 @@ static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable) val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val); - - if (!enable) - ndelay(500); } static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable) @@ -347,9 +333,11 @@ static void pwr_island_enable(struct ivpu_device *vdev) { if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { pwr_island_trickle_drive_37xx(vdev, true); + ndelay(500); pwr_island_drive_37xx(vdev, true); } else { pwr_island_trickle_drive_40xx(vdev, true); + ndelay(500); pwr_island_drive_40xx(vdev, true); } } @@ -686,13 +674,36 @@ static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable) REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); } +static void pwr_island_delay_set(struct ivpu_device *vdev) +{ + bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH; + u32 post, post1, post2, status; + + if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX) + return; + + switch (ivpu_device_id(vdev)) { + case PCI_DEVICE_ID_PTL_P: + post = high ? 18 : 0; + post1 = 0; + post2 = 0; + status = high ? 46 : 3; + break; + + default: + dump_stack(); + ivpu_err(vdev, "Unknown device ID\n"); + return; + } + + pwr_island_delay_set_50xx(vdev, post, post1, post2, status); +} + int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev) { int ret; - if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX) - pwr_island_delay_set_50xx(vdev); - + pwr_island_delay_set(vdev); pwr_island_enable(vdev); ret = wait_for_pwr_island_status(vdev, 0x1); diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 78b32a823241..01ebf88fe6ef 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -15,6 +15,7 @@ #include "ivpu_ipc.h" #include "ivpu_jsm_msg.h" #include "ivpu_pm.h" +#include "ivpu_trace.h" #define IPC_MAX_RX_MSG 128 @@ -227,6 +228,7 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, stru goto unlock; ivpu_ipc_tx(vdev, cons->tx_vpu_addr); + trace_jsm("[tx]", req); unlock: mutex_unlock(&ipc->lock); @@ -278,12 +280,13 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg)); if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) { - ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result); + ivpu_err(vdev, "IPC resp result error: %d\n", rx_msg->jsm_msg->result); ret = -EBADMSG; } if (jsm_msg) memcpy(jsm_msg, rx_msg->jsm_msg, size); + trace_jsm("[rx]", rx_msg->jsm_msg); } ivpu_ipc_rx_msg_del(vdev, rx_msg); @@ -291,15 +294,16 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, return ret; } -static int +int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req, enum vpu_ipc_msg_type expected_resp_type, - struct vpu_jsm_msg *resp, u32 channel, - unsigned long timeout_ms) + struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms) { struct ivpu_ipc_consumer cons; int ret; + drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); + ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); ret = ivpu_ipc_send(vdev, &cons, req); @@ -325,19 +329,21 @@ consumer_del: return ret; } -int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, - enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, - u32 channel, unsigned long timeout_ms) +int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, + u32 channel, unsigned long timeout_ms) { struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; struct vpu_jsm_msg hb_resp; int ret, hb_ret; - drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms); if (ret != -ETIMEDOUT) - return ret; + goto rpm_put; hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &hb_resp, VPU_IPC_CHAN_ASYNC_CMD, @@ -345,21 +351,33 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r if (hb_ret == -ETIMEDOUT) ivpu_pm_trigger_recovery(vdev, "IPC timeout"); +rpm_put: + ivpu_rpm_put(vdev); return ret; } -int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, - enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, - u32 channel, unsigned long timeout_ms) +int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + u32 channel, unsigned long timeout_ms) { + struct ivpu_ipc_consumer cons; int ret; ret = ivpu_rpm_get(vdev); if (ret < 0) return ret; - ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms); + ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); + ret = ivpu_ipc_send(vdev, &cons, req); + if (ret) { + ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret); + goto consumer_del; + } + + msleep(timeout_ms); + +consumer_del: + ivpu_ipc_consumer_del(vdev, &cons); ivpu_rpm_put(vdev); return ret; } @@ -518,7 +536,6 @@ void ivpu_ipc_fini(struct ivpu_device *vdev) { struct ivpu_ipc_info *ipc = vdev->ipc; - drm_WARN_ON(&vdev->drm, ipc->on); drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list)); drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h index 4fe38141045e..b4dfb504679b 100644 --- a/drivers/accel/ivpu/ivpu_ipc.h +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -101,12 +101,13 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms); - -int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, - enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, - u32 channel, unsigned long timeout_ms); +int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp_type, + struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms); int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms); +int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + u32 channel, unsigned long timeout_ms); #endif /* __IVPU_IPC_H__ */ diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index be2e2bf0f43f..7149312f16e1 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -18,11 +18,10 @@ #include "ivpu_job.h" #include "ivpu_jsm_msg.h" #include "ivpu_pm.h" +#include "ivpu_trace.h" #include "vpu_boot_api.h" #define CMD_BUF_IDX 0 -#define JOB_ID_JOB_MASK GENMASK(7, 0) -#define JOB_ID_CONTEXT_MASK GENMASK(31, 8) #define JOB_MAX_BUFFER_COUNT 65535 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) @@ -35,24 +34,20 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, { u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); - struct ivpu_addr_range range; - if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW) + if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || + ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) return 0; - range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX); - range.end = vdev->hw->ranges.user.end; - cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, primary_size, - DRM_IVPU_BO_WC); + cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, + primary_size, DRM_IVPU_BO_WC); if (!cmdq->primary_preempt_buf) { ivpu_err(vdev, "Failed to create primary preemption buffer\n"); return -ENOMEM; } - range.start = vdev->hw->ranges.shave.end - (secondary_size * IVPU_NUM_CMDQS_PER_CTX); - range.end = vdev->hw->ranges.shave.end; - cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, secondary_size, - DRM_IVPU_BO_WC); + cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, + secondary_size, DRM_IVPU_BO_WC); if (!cmdq->secondary_preempt_buf) { ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); goto err_free_primary; @@ -62,24 +57,24 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, err_free_primary: ivpu_bo_free(cmdq->primary_preempt_buf); + cmdq->primary_preempt_buf = NULL; return -ENOMEM; } static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW) + if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) return; - drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf); - drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf); - ivpu_bo_free(cmdq->primary_preempt_buf); - ivpu_bo_free(cmdq->secondary_preempt_buf); + if (cmdq->primary_preempt_buf) + ivpu_bo_free(cmdq->primary_preempt_buf); + if (cmdq->secondary_preempt_buf) + ivpu_bo_free(cmdq->secondary_preempt_buf); } static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) { - struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB}; struct ivpu_device *vdev = file_priv->vdev; struct ivpu_cmdq *cmdq; int ret; @@ -88,25 +83,33 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq) return NULL; - ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL); - if (ret) { + ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next, + GFP_KERNEL); + if (ret < 0) { ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret); goto err_free_cmdq; } + ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit, + &file_priv->cmdq_id_next, GFP_KERNEL); + if (ret < 0) { + ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret); + goto err_erase_db_xa; + } + cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); if (!cmdq->mem) - goto err_erase_xa; + goto err_erase_cmdq_xa; ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); if (ret) - goto err_free_cmdq_mem; + ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); return cmdq; -err_free_cmdq_mem: - ivpu_bo_free(cmdq->mem); -err_erase_xa: +err_erase_cmdq_xa: + xa_erase(&file_priv->cmdq_xa, cmdq->id); +err_erase_db_xa: xa_erase(&vdev->db_xa, cmdq->db_id); err_free_cmdq: kfree(cmdq); @@ -130,13 +133,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq struct ivpu_device *vdev = file_priv->vdev; int ret; - ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id, + ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id, task_pid_nr(current), engine, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); if (ret) return ret; - ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id, + ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id, priority); if (ret) return ret; @@ -149,21 +152,22 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq * struct ivpu_device *vdev = file_priv->vdev; int ret; - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) - ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id, + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) + ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); else ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); if (!ret) - ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id); + ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n", + cmdq->db_id, cmdq->id, file_priv->ctx.id); return ret; } static int -ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority) +ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority) { struct ivpu_device *vdev = file_priv->vdev; struct vpu_job_queue_header *jobq_header; @@ -179,13 +183,18 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem); jobq_header = &cmdq->jobq->header; - jobq_header->engine_idx = engine; + jobq_header->engine_idx = VPU_ENGINE_COMPUTE; jobq_header->head = 0; jobq_header->tail = 0; + if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) { + ivpu_dbg(vdev, JOB, "Turbo mode enabled"); + jobq_header->flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE; + } + wmb(); /* Flush WC buffer for jobq->header */ - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) { - ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority); + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { + ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority); if (ret) return ret; } @@ -211,10 +220,10 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm cmdq->db_registered = false; - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) { - ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id); + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { + ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id); if (!ret) - ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id); + ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id); } ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id); @@ -224,55 +233,46 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm return 0; } -static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine, - u8 priority) +static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); - struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx]; + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; int ret; lockdep_assert_held(&file_priv->lock); + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) + if (cmdq->priority == priority) + break; + if (!cmdq) { cmdq = ivpu_cmdq_alloc(file_priv); if (!cmdq) return NULL; - file_priv->cmdq[cmdq_idx] = cmdq; + cmdq->priority = priority; } - ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority); + ret = ivpu_cmdq_init(file_priv, cmdq, priority); if (ret) return NULL; return cmdq; } -static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority) +void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); - struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx]; + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; lockdep_assert_held(&file_priv->lock); - if (cmdq) { - file_priv->cmdq[cmdq_idx] = NULL; + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) { + xa_erase(&file_priv->cmdq_xa, cmdq_id); ivpu_cmdq_fini(file_priv, cmdq); ivpu_cmdq_free(file_priv, cmdq); } } -void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) -{ - u16 engine; - u8 priority; - - lockdep_assert_held(&file_priv->lock); - - for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) - for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) - ivpu_cmdq_release_locked(file_priv, engine, priority); -} - /* * Mark the doorbell as unregistered * This function needs to be called when the VPU hardware is restarted @@ -281,20 +281,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) */ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv) { - u16 engine; - u8 priority; + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; mutex_lock(&file_priv->lock); - for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) { - for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); - struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx]; - - if (cmdq) - cmdq->db_registered = false; - } - } + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) + cmdq->db_registered = false; mutex_unlock(&file_priv->lock); } @@ -314,17 +307,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv) { - u16 engine; - u8 priority; - - for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) { - for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; - if (file_priv->cmdq[cmdq_idx]) - ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]); - } - } + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) + ivpu_cmdq_fini(file_priv, cmdq); } void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv) @@ -335,7 +322,7 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv) ivpu_cmdq_fini_all(file_priv); - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS) + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) ivpu_jsm_context_release(vdev, file_priv->ctx.id); } @@ -349,24 +336,29 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) /* Check if there is space left in job queue */ if (next_entry == header->head) { - ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n", - job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail); + ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n", + job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail); return -EBUSY; } - entry = &cmdq->jobq->job[tail]; + entry = &cmdq->jobq->slot[tail].job; entry->batch_buf_addr = job->cmd_buf_vpu_addr; entry->job_id = job->job_id; entry->flags = 0; if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; - if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW && - (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) { - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; - entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf); + if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { + if (cmdq->primary_preempt_buf) { + entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; + entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); + } + + if (cmdq->secondary_preempt_buf) { + entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; + entry->secondary_preempt_buf_size = + ivpu_bo_size(cmdq->secondary_preempt_buf); + } } wmb(); /* Ensure that tail is updated after filling entry */ @@ -457,6 +449,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) job->file_priv = ivpu_file_priv_get(file_priv); + trace_job("create", job); ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx); return job; @@ -496,6 +489,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job->bos[CMD_BUF_IDX]->job_status = job_status; dma_fence_signal(job->done_fence); + trace_job("done", job); ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n", job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status); @@ -519,7 +513,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority) { struct ivpu_file_priv *file_priv = job->file_priv; struct ivpu_device *vdev = job->vdev; - struct xa_limit job_id_range; struct ivpu_cmdq *cmdq; bool is_first_job; int ret; @@ -530,7 +523,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority) mutex_lock(&file_priv->lock); - cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority); + cmdq = ivpu_cmdq_acquire(file_priv, priority); if (!cmdq) { ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n", file_priv->ctx.id, job->engine_idx, priority); @@ -538,13 +531,11 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority) goto err_unlock_file_priv; } - job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); - job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK; - xa_lock(&vdev->submitted_jobs_xa); is_first_job = xa_empty(&vdev->submitted_jobs_xa); - ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL); - if (ret) { + ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit, + &file_priv->job_id_next, GFP_KERNEL); + if (ret < 0) { ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n", file_priv->ctx.id); ret = -EBUSY; @@ -566,6 +557,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority) vdev->busy_start_ts = ktime_get(); } + trace_job("submit", job); ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n", job->job_id, file_priv->ctx.id, job->engine_idx, priority, job->cmd_buf_vpu_addr, cmdq->jobq->header.tail); @@ -673,7 +665,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) int idx, ret; u8 priority; - if (params->engine > DRM_IVPU_ENGINE_COPY) + if (params->engine != DRM_IVPU_ENGINE_COMPUTE) return -EINVAL; if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 6accb94028c7..8b19e3f8b4cf 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -28,8 +28,10 @@ struct ivpu_cmdq { struct ivpu_bo *secondary_preempt_buf; struct ivpu_bo *mem; u32 entry_count; + u32 id; u32 db_id; bool db_registered; + u8 priority; }; /** diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c index 46ef16c3c069..30a40be76930 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.c +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -48,9 +48,10 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type) IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE); IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP); IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP); - IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT); + IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED); IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL); IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED); IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE); IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE); IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE); @@ -131,7 +132,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat) struct vpu_jsm_msg resp; int ret; - if (engine > VPU_ENGINE_COPY) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.query_engine_hb.engine_idx = engine; @@ -154,7 +155,7 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine) struct vpu_jsm_msg resp; int ret; - if (engine > VPU_ENGINE_COPY) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.engine_reset.engine_idx = engine; @@ -173,7 +174,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id struct vpu_jsm_msg resp; int ret; - if (engine > VPU_ENGINE_COPY) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.engine_preempt.engine_idx = engine; @@ -196,7 +197,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN); ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp, - VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm); if (ret) ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n", command, ret); @@ -270,9 +271,8 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev) req.payload.pwr_d0i3_enter.send_response = 1; - ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, - &resp, VPU_IPC_CHAN_GEN_CMD, - vdev->timeout.d0i3_entry_msg); + ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp, + VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg); if (ret) return ret; @@ -346,7 +346,7 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine) struct vpu_jsm_msg resp; int ret; - if (engine >= VPU_ENGINE_NB) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.hws_resume_engine.engine_idx = engine; @@ -394,8 +394,6 @@ int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u3 req.payload.hws_set_scheduling_log.host_ssid = host_ssid; req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va; req.payload.hws_set_scheduling_log.notify_index = 0; - req.payload.hws_set_scheduling_log.enable_extra_events = - ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS; ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); @@ -430,8 +428,8 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev) req.payload.hws_priority_band_setup.normal_band_percentage = 10; - ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP, - &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP, + &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); if (ret) ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret); @@ -544,9 +542,8 @@ int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us req.payload.pwr_dct_control.dct_active_us = active_us; req.payload.pwr_dct_control.dct_inactive_us = inactive_us; - return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, - &resp, VPU_IPC_CHAN_ASYNC_CMD, - vdev->timeout.jsm); + return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); } int ivpu_jsm_dct_disable(struct ivpu_device *vdev) @@ -554,7 +551,14 @@ int ivpu_jsm_dct_disable(struct ivpu_device *vdev) struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE }; struct vpu_jsm_msg resp; - return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, - &resp, VPU_IPC_CHAN_ASYNC_CMD, - vdev->timeout.jsm); + return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); +} + +int ivpu_jsm_state_dump(struct ivpu_device *vdev) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP }; + + return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD, + vdev->timeout.state_dump_msg); } diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h index e4e42c0ff6e6..9e84d3526a14 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.h +++ b/drivers/accel/ivpu/ivpu_jsm_msg.h @@ -43,4 +43,6 @@ int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mas u64 buffer_size, u32 *sample_size, u64 *info_size); int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us); int ivpu_jsm_dct_disable(struct ivpu_device *vdev); +int ivpu_jsm_state_dump(struct ivpu_device *vdev); + #endif diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index c078e214b221..26ef52fbb93e 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -696,7 +696,7 @@ unlock: return ret; } -static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) +static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_dma, bool valid) { struct ivpu_mmu_info *mmu = vdev->mmu; struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; @@ -708,30 +708,29 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) return -EINVAL; entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE); - - if (cd_dma != 0) { - cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) | - FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) | - FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) | - FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) | - FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) | - FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) | - FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) | - IVPU_MMU_CD_0_TCR_EPD1 | - IVPU_MMU_CD_0_AA64 | - IVPU_MMU_CD_0_R | - IVPU_MMU_CD_0_ASET | - IVPU_MMU_CD_0_V; - cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK; - cd[2] = 0; - cd[3] = 0x0000000000007444; - - /* For global context generate memory fault on VPU */ - if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) - cd[0] |= IVPU_MMU_CD_0_A; - } else { - memset(cd, 0, sizeof(cd)); - } + drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid); + + cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) | + FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) | + IVPU_MMU_CD_0_TCR_EPD1 | + IVPU_MMU_CD_0_AA64 | + IVPU_MMU_CD_0_R | + IVPU_MMU_CD_0_ASET; + cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK; + cd[2] = 0; + cd[3] = 0x0000000000007444; + + /* For global context generate memory fault on VPU */ + if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) + cd[0] |= IVPU_MMU_CD_0_A; + + if (valid) + cd[0] |= IVPU_MMU_CD_0_V; WRITE_ONCE(entry[1], cd[1]); WRITE_ONCE(entry[2], cd[2]); @@ -741,8 +740,8 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) if (!ivpu_is_force_snoop_enabled(vdev)) clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE); - ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", - cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]); + ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + valid ? "valid" : "invalid", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]); mutex_lock(&mmu->lock); if (!mmu->on) @@ -750,38 +749,18 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); if (ret) - goto unlock; + goto err_invalidate; ret = ivpu_mmu_cmdq_sync(vdev); + if (ret) + goto err_invalidate; unlock: mutex_unlock(&mmu->lock); - return ret; -} - -static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev) -{ - int ret; - - ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma); - if (ret) - ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret); - - return ret; -} - -static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma) -{ - int ret; - - if (ssid == 0) { - ivpu_err(vdev, "Invalid SSID: %u\n", ssid); - return -EINVAL; - } - - ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma); - if (ret) - ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret); + return 0; +err_invalidate: + WRITE_ONCE(entry[0], 0); + mutex_unlock(&mmu->lock); return ret; } @@ -808,12 +787,6 @@ int ivpu_mmu_init(struct ivpu_device *vdev) return ret; } - ret = ivpu_mmu_cd_add_gbl(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret); - return ret; - } - ret = ivpu_mmu_enable(vdev); if (ret) { ivpu_err(vdev, "Failed to resume MMU: %d\n", ret); @@ -966,12 +939,12 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val); } -int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) +int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) { - return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma); + return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true); } -void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid) +void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid) { - ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */ + ivpu_mmu_cdtab_entry_set(vdev, ssid, 0, false); } diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h index 6fa35c240710..7afea9cd8731 100644 --- a/drivers/accel/ivpu/ivpu_mmu.h +++ b/drivers/accel/ivpu/ivpu_mmu.h @@ -40,8 +40,8 @@ struct ivpu_mmu_info { int ivpu_mmu_init(struct ivpu_device *vdev); void ivpu_mmu_disable(struct ivpu_device *vdev); int ivpu_mmu_enable(struct ivpu_device *vdev); -int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable); -void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid); +int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable); +void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid); int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid); void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index bbe652a7019d..891967a95bc3 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -90,19 +90,6 @@ static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_ } } -static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) -{ - dma_addr_t pgd_dma; - - pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); - if (!pgtable->pgd_dma_ptr) - return -ENOMEM; - - pgtable->pgd_dma = pgd_dma; - - return 0; -} - static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) { int pgd_idx, pud_idx, pmd_idx; @@ -140,6 +127,27 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt } ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); + pgtable->pgd_dma_ptr = NULL; + pgtable->pgd_dma = 0; +} + +static u64* +ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) +{ + u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr; + dma_addr_t pgd_dma; + + if (pgd_dma_ptr) + return pgd_dma_ptr; + + pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); + if (!pgd_dma_ptr) + return NULL; + + pgtable->pgd_dma_ptr = pgd_dma_ptr; + pgtable->pgd_dma = pgd_dma; + + return pgd_dma_ptr; } static u64* @@ -237,6 +245,12 @@ ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); + drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID); + + /* Allocate PGD - first level page table if needed */ + if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable)) + return -ENOMEM; + /* Allocate PUD - second level page table if needed */ if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) return -ENOMEM; @@ -418,6 +432,7 @@ int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) { + size_t start_vpu_addr = vpu_addr; struct scatterlist *sg; int ret; u64 prot; @@ -448,20 +463,36 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); if (ret) { ivpu_err(vdev, "Failed to map context pages\n"); - mutex_unlock(&ctx->lock); - return ret; + goto err_unmap_pages; } vpu_addr += size; } + if (!ctx->is_cd_valid) { + ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable); + if (ret) { + ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n", + ctx->id, ret); + goto err_unmap_pages; + } + ctx->is_cd_valid = true; + } + /* Ensure page table modifications are flushed from wc buffers to memory */ wmb(); - mutex_unlock(&ctx->lock); - ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); - if (ret) + if (ret) { ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); + goto err_unmap_pages; + } + + mutex_unlock(&ctx->lock); + return 0; + +err_unmap_pages: + ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr); + mutex_unlock(&ctx->lock); return ret; } @@ -530,65 +561,75 @@ ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *n mutex_unlock(&ctx->lock); } -static int -ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) +void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) { u64 start, end; - int ret; mutex_init(&ctx->lock); - ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); - if (ret) { - ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret); - return ret; - } - if (!context_id) { start = vdev->hw->ranges.global.start; end = vdev->hw->ranges.shave.end; } else { - start = vdev->hw->ranges.user.start; - end = vdev->hw->ranges.dma.end; + start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start); + end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end); } drm_mm_init(&ctx->mm, start, end - start); ctx->id = context_id; - - return 0; } -static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) +void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) { - if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr)) - return; + if (ctx->is_cd_valid) { + ivpu_mmu_cd_clear(vdev, ctx->id); + ctx->is_cd_valid = false; + } mutex_destroy(&ctx->lock); ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); drm_mm_takedown(&ctx->mm); - - ctx->pgtable.pgd_dma_ptr = NULL; - ctx->pgtable.pgd_dma = 0; } -int ivpu_mmu_global_context_init(struct ivpu_device *vdev) +void ivpu_mmu_global_context_init(struct ivpu_device *vdev) { - return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); + ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); } void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) { - return ivpu_mmu_context_fini(vdev, &vdev->gctx); + ivpu_mmu_context_fini(vdev, &vdev->gctx); } int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) { - return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); + int ret; + + ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); + + mutex_lock(&vdev->rctx.lock); + + if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) { + ivpu_err(vdev, "Failed to allocate root page table for reserved context\n"); + ret = -ENOMEM; + goto unlock; + } + + ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable); + if (ret) { + ivpu_err(vdev, "Failed to set context descriptor for reserved context\n"); + goto unlock; + } + +unlock: + mutex_unlock(&vdev->rctx.lock); + return ret; } void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) { - return ivpu_mmu_user_context_fini(vdev, &vdev->rctx); + ivpu_mmu_cd_clear(vdev, vdev->rctx.id); + ivpu_mmu_context_fini(vdev, &vdev->rctx); } void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) @@ -603,36 +644,3 @@ void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) xa_unlock(&vdev->context_xa); } - -int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id) -{ - int ret; - - drm_WARN_ON(&vdev->drm, !ctx_id); - - ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); - if (ret) { - ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret); - return ret; - } - - ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); - if (ret) { - ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret); - goto err_context_fini; - } - - return 0; - -err_context_fini: - ivpu_mmu_context_fini(vdev, ctx); - return ret; -} - -void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) -{ - drm_WARN_ON(&vdev->drm, !ctx->id); - - ivpu_mmu_clear_pgtable(vdev, ctx->id); - ivpu_mmu_context_fini(vdev, ctx); -} diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index 7f9aaf3d10c2..8042fc067062 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -23,19 +23,20 @@ struct ivpu_mmu_pgtable { }; struct ivpu_mmu_context { - struct mutex lock; /* Protects: mm, pgtable */ + struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */ struct drm_mm mm; struct ivpu_mmu_pgtable pgtable; + bool is_cd_valid; u32 id; }; -int ivpu_mmu_global_context_init(struct ivpu_device *vdev); +void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id); +void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); +void ivpu_mmu_global_context_init(struct ivpu_device *vdev); void ivpu_mmu_global_context_fini(struct ivpu_device *vdev); int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev); void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev); -int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id); -void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid); int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c index 2f9d37f5c208..ffe7b10f8a76 100644 --- a/drivers/accel/ivpu/ivpu_ms.c +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -11,7 +11,7 @@ #include "ivpu_ms.h" #include "ivpu_pm.h" -#define MS_INFO_BUFFER_SIZE SZ_16K +#define MS_INFO_BUFFER_SIZE SZ_64K #define MS_NUM_BUFFERS 2 #define MS_READ_PERIOD_MULTIPLIER 2 #define MS_MIN_SAMPLE_PERIOD_NS 1000000 diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 59d3170f5e35..dbc0711e28d1 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -9,21 +9,25 @@ #include <linux/pm_runtime.h> #include <linux/reboot.h> -#include "vpu_boot_api.h" +#include "ivpu_coredump.h" #include "ivpu_drv.h" -#include "ivpu_hw.h" #include "ivpu_fw.h" #include "ivpu_fw_log.h" +#include "ivpu_hw.h" #include "ivpu_ipc.h" #include "ivpu_job.h" #include "ivpu_jsm_msg.h" #include "ivpu_mmu.h" #include "ivpu_ms.h" #include "ivpu_pm.h" +#include "ivpu_trace.h" +#include "vpu_boot_api.h" static bool ivpu_disable_recovery; +#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG) module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644); MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected"); +#endif static unsigned long ivpu_tdr_timeout_ms; module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644); @@ -37,6 +41,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) ivpu_cmdq_reset_all_contexts(vdev); ivpu_ipc_reset(vdev); + ivpu_fw_log_reset(vdev); ivpu_fw_load(vdev); fw->entry_point = fw->cold_boot_entry_point; } @@ -123,7 +128,8 @@ static void ivpu_pm_recovery_work(struct work_struct *work) if (ret) ivpu_err(vdev, "Failed to resume NPU: %d\n", ret); - ivpu_fw_log_dump(vdev); + ivpu_jsm_state_dump(vdev); + ivpu_dev_coredump(vdev); atomic_inc(&vdev->pm->reset_counter); atomic_set(&vdev->pm->reset_pending, 1); @@ -195,6 +201,7 @@ int ivpu_pm_suspend_cb(struct device *dev) struct ivpu_device *vdev = to_ivpu_device(drm); unsigned long timeout; + trace_pm("suspend"); ivpu_dbg(vdev, PM, "Suspend..\n"); timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr); @@ -212,6 +219,7 @@ int ivpu_pm_suspend_cb(struct device *dev) ivpu_pm_prepare_warm_boot(vdev); ivpu_dbg(vdev, PM, "Suspend done.\n"); + trace_pm("suspend done"); return 0; } @@ -222,6 +230,7 @@ int ivpu_pm_resume_cb(struct device *dev) struct ivpu_device *vdev = to_ivpu_device(drm); int ret; + trace_pm("resume"); ivpu_dbg(vdev, PM, "Resume..\n"); ret = ivpu_resume(vdev); @@ -229,6 +238,7 @@ int ivpu_pm_resume_cb(struct device *dev) ivpu_err(vdev, "Failed to resume: %d\n", ret); ivpu_dbg(vdev, PM, "Resume done.\n"); + trace_pm("resume done"); return ret; } @@ -243,6 +253,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work)); + trace_pm("runtime suspend"); ivpu_dbg(vdev, PM, "Runtime suspend..\n"); ivpu_mmu_disable(vdev); @@ -262,13 +273,14 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) if (!is_idle || ret_d0i3) { ivpu_err(vdev, "Forcing cold boot due to previous errors\n"); atomic_inc(&vdev->pm->reset_counter); - ivpu_fw_log_dump(vdev); + ivpu_dev_coredump(vdev); ivpu_pm_prepare_cold_boot(vdev); } else { ivpu_pm_prepare_warm_boot(vdev); } ivpu_dbg(vdev, PM, "Runtime suspend done.\n"); + trace_pm("runtime suspend done"); return 0; } @@ -279,6 +291,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev) struct ivpu_device *vdev = to_ivpu_device(drm); int ret; + trace_pm("runtime resume"); ivpu_dbg(vdev, PM, "Runtime resume..\n"); ret = ivpu_resume(vdev); @@ -286,6 +299,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev) ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); ivpu_dbg(vdev, PM, "Runtime resume done.\n"); + trace_pm("runtime resume done"); return ret; } @@ -411,7 +425,7 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent) ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us); if (ret) { - ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret); + ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret); return ret; } @@ -428,7 +442,7 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev) ret = ivpu_jsm_dct_disable(vdev); if (ret) { - ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret); + ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret); return ret; } diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c index 913669f1786e..616477fc17fa 100644 --- a/drivers/accel/ivpu/ivpu_sysfs.c +++ b/drivers/accel/ivpu/ivpu_sysfs.c @@ -6,6 +6,8 @@ #include <linux/device.h> #include <linux/err.h> +#include "ivpu_drv.h" +#include "ivpu_fw.h" #include "ivpu_hw.h" #include "ivpu_sysfs.h" @@ -39,8 +41,30 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b static DEVICE_ATTR_RO(npu_busy_time_us); +/** + * DOC: sched_mode + * + * The sched_mode is used to report current NPU scheduling mode. + * + * It returns following strings: + * - "HW" - Hardware Scheduler mode + * - "OS" - Operating System Scheduler mode + * + */ +static ssize_t +sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct ivpu_device *vdev = to_ivpu_device(drm); + + return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS"); +} + +static DEVICE_ATTR_RO(sched_mode); + static struct attribute *ivpu_dev_attrs[] = { &dev_attr_npu_busy_time_us.attr, + &dev_attr_sched_mode.attr, NULL, }; diff --git a/drivers/accel/ivpu/ivpu_trace.h b/drivers/accel/ivpu/ivpu_trace.h new file mode 100644 index 000000000000..eb792038e701 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_trace.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#if !defined(__IVPU_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __IVPU_TRACE_H__ + +#include <linux/tracepoint.h> +#include "ivpu_drv.h" +#include "ivpu_job.h" +#include "vpu_jsm_api.h" +#include "ivpu_jsm_msg.h" +#include "ivpu_ipc.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vpu +#define TRACE_INCLUDE_FILE ivpu_trace + +TRACE_EVENT(pm, + TP_PROTO(const char *event), + TP_ARGS(event), + TP_STRUCT__entry(__field(const char *, event)), + TP_fast_assign(__entry->event = event;), + TP_printk("%s", __entry->event) +); + +TRACE_EVENT(job, + TP_PROTO(const char *event, struct ivpu_job *job), + TP_ARGS(event, job), + TP_STRUCT__entry(__field(const char *, event) + __field(u32, ctx_id) + __field(u32, engine_id) + __field(u32, job_id) + ), + TP_fast_assign(__entry->event = event; + __entry->ctx_id = job->file_priv->ctx.id; + __entry->engine_id = job->engine_idx; + __entry->job_id = job->job_id;), + TP_printk("%s context:%d engine:%d job:%d", + __entry->event, + __entry->ctx_id, + __entry->engine_id, + __entry->job_id) +); + +TRACE_EVENT(jsm, + TP_PROTO(const char *event, struct vpu_jsm_msg *msg), + TP_ARGS(event, msg), + TP_STRUCT__entry(__field(const char *, event) + __field(const char *, type) + __field(enum vpu_ipc_msg_status, status) + __field(u32, request_id) + __field(u32, result) + ), + TP_fast_assign(__entry->event = event; + __entry->type = ivpu_jsm_msg_type_to_str(msg->type); + __entry->status = msg->status; + __entry->request_id = msg->request_id; + __entry->result = msg->result;), + TP_printk("%s type:%s, status:%#x, id:%#x, result:%#x", + __entry->event, + __entry->type, + __entry->status, + __entry->request_id, + __entry->result) +); + +#endif /* __IVPU_TRACE_H__ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/accel/ivpu/ivpu_trace_points.c b/drivers/accel/ivpu/ivpu_trace_points.c new file mode 100644 index 000000000000..f8fb99de0de3 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_trace_points.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "ivpu_trace.h" +#endif diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h index 82954b91b748..908e68ea1c39 100644 --- a/drivers/accel/ivpu/vpu_boot_api.h +++ b/drivers/accel/ivpu/vpu_boot_api.h @@ -1,14 +1,13 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright (c) 2020-2023, Intel Corporation. + * Copyright (c) 2020-2024, Intel Corporation. */ #ifndef VPU_BOOT_API_H #define VPU_BOOT_API_H /* - * =========== FW API version information beginning ================ - * The bellow values will be used to construct the version info this way: + * The below values will be used to construct the version info this way: * fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) | * VPU_BOOT_API_VER_MINOR; * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes @@ -27,19 +26,18 @@ * Minor version changes when API backward compatibility is preserved. * Resets to 0 if Major version is incremented. */ -#define VPU_BOOT_API_VER_MINOR 24 +#define VPU_BOOT_API_VER_MINOR 26 /* * API header changed (field names, documentation, formatting) but API itself has not been changed */ -#define VPU_BOOT_API_VER_PATCH 0 +#define VPU_BOOT_API_VER_PATCH 3 /* * Index in the API version table * Must be unique for each API */ #define VPU_BOOT_API_VER_INDEX 0 -/* ------------ FW API version information end ---------------------*/ #pragma pack(push, 4) @@ -164,8 +162,6 @@ enum vpu_trace_destination { /* VPU 30xx HW component IDs are sequential, so define first and last IDs. */ #define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT #define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15 -#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST -#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST struct vpu_boot_l2_cache_config { u8 use; @@ -199,6 +195,17 @@ struct vpu_warm_boot_section { */ #define POWER_PROFILE_SURVIVABILITY 0x1 +/** + * Enum for dvfs_mode boot param. + */ +enum vpu_governor { + VPU_GOV_DEFAULT = 0, /* Default Governor for the system */ + VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */ + VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */ + VPU_GOV_POWER_SAVE = 3, /* Power save governor */ + VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */ +}; + struct vpu_boot_params { u32 magic; u32 vpu_id; @@ -301,7 +308,14 @@ struct vpu_boot_params { u32 temp_sensor_period_ms; /** PLL ratio for efficient clock frequency */ u32 pn_freq_pll_ratio; - /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */ + /** + * DVFS Mode: + * 0 - Default, DVFS mode selected by the firmware + * 1 - Max Performance + * 2 - On Demand + * 3 - Power Save + * 4 - On Demand Priority Aware + */ u32 dvfs_mode; /** * Depending on DVFS Mode: @@ -332,8 +346,8 @@ struct vpu_boot_params { u64 d0i3_entry_vpu_ts; /* * The system time of the host operating system in microseconds. - * E.g the number of microseconds since 1st of January 1970, or whatever date the - * host operating system uses to maintain system time. + * E.g the number of microseconds since 1st of January 1970, or whatever + * date the host operating system uses to maintain system time. * This value will be used to track system time on the VPU. * The KMD is required to update this value on every VPU reset. */ @@ -382,10 +396,7 @@ struct vpu_boot_params { u32 pad6[734]; }; -/* - * Magic numbers set between host and vpu to detect corruptio of tracing init - */ - +/* Magic numbers set between host and vpu to detect corruption of tracing init */ #define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE) /* Tracing buffer message format definitions */ @@ -405,7 +416,9 @@ struct vpu_tracing_buffer_header { u32 host_canary_start; /* offset from start of buffer for trace entries */ u32 read_index; - u32 pad_to_cache_line_size_0[14]; + /* keeps track of wrapping on the reader side */ + u32 read_wrap_count; + u32 pad_to_cache_line_size_0[13]; /* End of first cache line */ /** diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 33f462b1a25d..7215c144158c 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -22,7 +22,7 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 16 +#define VPU_JSM_API_VER_MINOR 25 /* * API header changed (field names, documentation, formatting) but API itself has not been changed @@ -36,7 +36,7 @@ /* * Number of Priority Bands for Hardware Scheduling - * Bands: RealTime, Focus, Normal, Idle + * Bands: Idle(0), Normal(1), Focus(2), RealTime(3) */ #define VPU_HWS_NUM_PRIORITY_BANDS 4 @@ -74,6 +74,7 @@ #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU /* Job status returned when the job was preempted mid-inference */ #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU +#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU /* * Host <-> VPU IPC channels. @@ -86,18 +87,58 @@ /* * Job flags bit masks. */ -#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001 -#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000 +enum { + /* + * Null submission mask. + * When set, batch buffer's commands are not processed but returned as + * successful immediately, except fences and timestamps. + * When cleared, batch buffer's commands are processed normally. + * Used for testing and profiling purposes. + */ + VPU_JOB_FLAGS_NULL_SUBMISSION_MASK = (1 << 0U), + /* + * Inline command mask. + * When set, the object in job queue is an inline command (see struct vpu_inline_cmd below). + * When cleared, the object in job queue is a job (see struct vpu_job_queue_entry below). + */ + VPU_JOB_FLAGS_INLINE_CMD_MASK = (1 << 1U), + /* + * VPU private data mask. + * Reserved for the VPU to store private data about the job (or inline command) + * while being processed. + */ + VPU_JOB_FLAGS_PRIVATE_DATA_MASK = 0xFFFF0000U +}; /* - * Sizes of the reserved areas in jobs, in bytes. + * Job queue flags bit masks. */ -#define VPU_JOB_RESERVED_BYTES 8 +enum { + /* + * No job done notification mask. + * When set, indicates that no job done notification should be sent for any + * job from this queue. When cleared, indicates that job done notification + * should be sent for every job completed from this queue. + */ + VPU_JOB_QUEUE_FLAGS_NO_JOB_DONE_MASK = (1 << 0U), + /* + * Native fence usage mask. + * When set, indicates that job queue uses native fences (as inline commands + * in job queue). Such queues may also use legacy fences (as commands in batch buffers). + * When cleared, indicates the job queue only uses legacy fences. + * NOTE: For queues using native fences, VPU expects that all jobs in the queue + * are immediately followed by an inline command object. This object is expected + * to be a fence signal command in most cases, but can also be a NOP in case the host + * does not need per-job fence signalling. Other inline commands objects can be + * inserted between "job and inline command" pairs. + */ + VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), -/* - * Sizes of the reserved areas in job queues, in bytes. - */ -#define VPU_JOB_QUEUE_RESERVED_BYTES 52 + /* + * Enable turbo mode for testing NPU performance; not recommended for regular usage. + */ + VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U) +}; /* * Max length (including trailing NULL char) of trace entity name (e.g., the @@ -141,23 +182,112 @@ #define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL /* + * Inline commands types. + */ +/* + * NOP. + * VPU does nothing other than consuming the inline command object. + */ +#define VPU_INLINE_CMD_TYPE_NOP 0x0 +/* + * Fence wait. + * VPU waits for the fence current value to reach monitored value. + * Fence wait operations are executed upon job dispatching. While waiting for + * the fence to be satisfied, VPU blocks fetching of the next objects in the queue. + * Jobs present in the queue prior to the fence wait object may be processed + * concurrently. + */ +#define VPU_INLINE_CMD_TYPE_FENCE_WAIT 0x1 +/* + * Fence signal. + * VPU sets the fence current value to the provided value. If new current value + * is equal to or higher than monitored value, VPU sends fence signalled notification + * to the host. Fence signal operations are executed upon completion of all the jobs + * present in the queue prior to them, and in-order relative to each other in the queue. + * But jobs in-between them may be processed concurrently and may complete out-of-order. + */ +#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2 + +/* + * Job scheduling priority bands for both hardware scheduling and OS scheduling. + */ +enum vpu_job_scheduling_priority_band { + VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE = 0, + VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL = 1, + VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS = 2, + VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME = 3, + VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4, +}; + +/* * Job format. + * Jobs defines the actual workloads to be executed by a given engine. */ struct vpu_job_queue_entry { - u64 batch_buf_addr; /**< Address of VPU commands batch buffer */ - u32 job_id; /**< Job ID */ - u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */ - u64 root_page_table_addr; /**< Address of root page table to use for this job */ - u64 root_page_table_update_counter; /**< Page tables update events counter */ - u64 primary_preempt_buf_addr; + /**< Address of VPU commands batch buffer */ + u64 batch_buf_addr; + /**< Job ID */ + u32 job_id; + /**< Flags bit field, see VPU_JOB_FLAGS_* above */ + u32 flags; + /** + * Doorbell ring timestamp taken by KMD from SoC's global system clock, in + * microseconds. NPU can convert this value to its own fixed clock's timebase, + * to match other profiling timestamps. + */ + u64 doorbell_timestamp; + /**< Extra id for job tracking, used only in the firmware perf traces */ + u64 host_tracking_id; /**< Address of the primary preemption buffer to use for this job */ - u32 primary_preempt_buf_size; + u64 primary_preempt_buf_addr; /**< Size of the primary preemption buffer to use for this job */ - u32 secondary_preempt_buf_size; + u32 primary_preempt_buf_size; /**< Size of secondary preemption buffer to use for this job */ - u64 secondary_preempt_buf_addr; + u32 secondary_preempt_buf_size; /**< Address of secondary preemption buffer to use for this job */ - u8 reserved_0[VPU_JOB_RESERVED_BYTES]; + u64 secondary_preempt_buf_addr; + u64 reserved_0; +}; + +/* + * Inline command format. + * Inline commands are the commands executed at scheduler level (typically, + * synchronization directives). Inline command and job objects must be of + * the same size and have flags field at same offset. + */ +struct vpu_inline_cmd { + u64 reserved_0; + /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ + u32 type; + /* Flags bit field, see VPU_JOB_FLAGS_* above. */ + u32 flags; + /* Inline command payload. Depends on inline command type. */ + union { + /* Fence (wait and signal) commands' payload. */ + struct { + /* Fence object handle. */ + u64 fence_handle; + /* User VA of the current fence value. */ + u64 current_value_va; + /* User VA of the monitored fence value (read-only). */ + u64 monitored_value_va; + /* Value to wait for or write in fence location. */ + u64 value; + /* User VA of the log buffer in which to add log entry on completion. */ + u64 log_buffer_va; + } fence; + /* Other commands do not have a payload. */ + /* Payload definition for future inline commands can be inserted here. */ + u64 reserved_1[6]; + } payload; +}; + +/* + * Job queue slots can be populated either with job objects or inline command objects. + */ +union vpu_jobq_slot { + struct vpu_job_queue_entry job; + struct vpu_inline_cmd inline_cmd; }; /* @@ -167,7 +297,21 @@ struct vpu_job_queue_header { u32 engine_idx; u32 head; u32 tail; - u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES]; + u32 flags; + /* Set to 1 to indicate priority_band field is valid */ + u32 priority_band_valid; + /* + * Priority for the work of this job queue, valid only if the HWS is NOT used + * and the `priority_band_valid` is set to 1. It is applied only during + * the VPU_JSM_MSG_REGISTER_DB message processing. + * The device firmware might use the `priority_band` to optimize the power + * management logic, but it will not affect the order of jobs. + * Available priority bands: @see enum vpu_job_scheduling_priority_band + */ + u32 priority_band; + /* Inside realtime band assigns a further priority, limited to 0..31 range */ + u32 realtime_priority_level; + u32 reserved_0[9]; }; /* @@ -175,7 +319,7 @@ struct vpu_job_queue_header { */ struct vpu_job_queue { struct vpu_job_queue_header header; - struct vpu_job_queue_entry job[]; + union vpu_jobq_slot slot[]; }; /** @@ -197,9 +341,7 @@ enum vpu_trace_entity_type { struct vpu_hws_log_buffer_header { /* Written by VPU after adding a log entry. Initialised by host to 0. */ u32 first_free_entry_index; - /* Incremented by VPU every time the VPU overwrites the 0th entry; - * initialised by host to 0. - */ + /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ u32 wraparound_count; /* * This is the number of buffers that can be stored in the log buffer provided by the host. @@ -230,14 +372,80 @@ struct vpu_hws_log_buffer_entry { u64 operation_data[2]; }; +/* Native fence log buffer types. */ +enum vpu_hws_native_fence_log_type { + VPU_HWS_NATIVE_FENCE_LOG_TYPE_WAITS = 1, + VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2 +}; + +/* HWS native fence log buffer header. */ +struct vpu_hws_native_fence_log_header { + union { + struct { + /* Index of the first free entry in buffer. */ + u32 first_free_entry_idx; + /* Incremented each time NPU wraps around the buffer to write next entry. */ + u32 wraparound_count; + }; + /* Field allowing atomic update of both fields above. */ + u64 atomic_wraparound_and_entry_idx; + }; + /* Log buffer type, see enum vpu_hws_native_fence_log_type. */ + u64 type; + /* Allocated number of entries in the log buffer. */ + u64 entry_nb; + u64 reserved[2]; +}; + +/* Native fence log operation types. */ +enum vpu_hws_native_fence_log_op { + VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0, + VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1 +}; + +/* HWS native fence log entry. */ +struct vpu_hws_native_fence_log_entry { + /* Newly signaled/unblocked fence value. */ + u64 fence_value; + /* Native fence object handle to which this operation belongs. */ + u64 fence_handle; + /* Operation type, see enum vpu_hws_native_fence_log_op. */ + u64 op_type; + u64 reserved_0; + /* + * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence + * wait was started (in NPU SysTime). + */ + u64 fence_wait_start_ts; + u64 reserved_1; + /* Timestamp at which fence operation was completed (in NPU SysTime). */ + u64 fence_end_ts; +}; + +/* Native fence log buffer. */ +struct vpu_hws_native_fence_log_buffer { + struct vpu_hws_native_fence_log_header header; + struct vpu_hws_native_fence_log_entry entry[]; +}; + /* * Host <-> VPU IPC messages types. */ enum vpu_ipc_msg_type { VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF, + /* IPC Host -> Device, Async commands */ VPU_JSM_MSG_ASYNC_CMD = 0x1100, VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD, + /** + * Preempt engine. The NPU stops (preempts) all the jobs currently + * executing on the target engine making the engine become idle and ready to + * execute new jobs. + * NOTE: The NPU does not remove unstarted jobs (if any) from job queues of + * the target engine, but it stops processing them (until the queue doorbell + * is rung again); the host is responsible to reset the job queue, either + * after preemption or when resubmitting jobs to the queue. + */ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, VPU_JSM_MSG_REGISTER_DB = 0x1102, VPU_JSM_MSG_UNREGISTER_DB = 0x1103, @@ -323,9 +531,10 @@ enum vpu_ipc_msg_type { * NOTE: Please introduce new ASYNC commands before this one. * */ VPU_JSM_MSG_STATE_DUMP = 0x11FF, + /* IPC Host -> Device, General commands */ VPU_JSM_MSG_GENERAL_CMD = 0x1200, - VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD, + VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD, /** * Control dyndbg behavior by executing a dyndbg command; equivalent to * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`. @@ -335,8 +544,12 @@ enum vpu_ipc_msg_type { * Perform the save procedure for the D0i3 entry */ VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202, + /* IPC Device -> Host, Job completion */ VPU_JSM_MSG_JOB_DONE = 0x2100, + /* IPC Device -> Host, Fence signalled */ + VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101, + /* IPC Device -> Host, Async command completion */ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, @@ -422,6 +635,7 @@ enum vpu_ipc_msg_type { * NOTE: Please introduce new ASYNC responses before this one. * */ VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF, + /* IPC Device -> Host, General command completion */ VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300, VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE, @@ -600,11 +814,6 @@ struct vpu_jsm_metric_streamer_update { u64 next_buffer_size; }; -struct vpu_ipc_msg_payload_blob_deinit { - /* 64-bit unique ID for the blob to be de-initialized. */ - u64 blob_id; -}; - struct vpu_ipc_msg_payload_job_done { /* Engine to which the job was submitted. */ u32 engine_idx; @@ -622,6 +831,21 @@ struct vpu_ipc_msg_payload_job_done { u64 cmdq_id; }; +/* + * Notification message upon native fence signalling. + * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED + */ +struct vpu_ipc_msg_payload_native_fence_signalled { + /* Engine ID. */ + u32 engine_idx; + /* Host SSID. */ + u32 host_ssid; + /* CMDQ ID */ + u64 cmdq_id; + /* Fence object handle. */ + u64 fence_handle; +}; + struct vpu_jsm_engine_reset_context { /* Host SSID */ u32 host_ssid; @@ -700,11 +924,6 @@ struct vpu_ipc_msg_payload_get_power_level_count_done { u8 power_limit[16]; }; -struct vpu_ipc_msg_payload_blob_deinit_done { - /* 64-bit unique ID for the blob de-initialized. */ - u64 blob_id; -}; - /* HWS priority band setup request / response */ struct vpu_ipc_msg_payload_hws_priority_band_setup { /* @@ -794,7 +1013,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { u32 reserved_0; /* Command queue id */ u64 cmdq_id; - /* Priority band to assign to work of this context */ + /* + * Priority band to assign to work of this context. + * Available priority bands: @see enum vpu_job_scheduling_priority_band + */ u32 priority_band; /* Inside realtime band assigns a further priority */ u32 realtime_priority_level; @@ -869,9 +1091,7 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log { */ u64 notify_index; /* - * Enable extra events to be output to log for debug of scheduling algorithm. - * Interpreted by VPU as a boolean to enable or disable, expected values are - * 0 and 1. + * Field is now deprecated, will be removed when KMD is updated to support removal */ u32 enable_extra_events; /* Zero Padding */ @@ -1243,10 +1463,10 @@ union vpu_ipc_msg_payload { struct vpu_jsm_metric_streamer_start metric_streamer_start; struct vpu_jsm_metric_streamer_stop metric_streamer_stop; struct vpu_jsm_metric_streamer_update metric_streamer_update; - struct vpu_ipc_msg_payload_blob_deinit blob_deinit; struct vpu_ipc_msg_payload_ssid_release ssid_release; struct vpu_jsm_hws_register_db hws_register_db; struct vpu_ipc_msg_payload_job_done job_done; + struct vpu_ipc_msg_payload_native_fence_signalled native_fence_signalled; struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done; struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done; struct vpu_ipc_msg_payload_register_db_done register_db_done; @@ -1254,7 +1474,6 @@ union vpu_ipc_msg_payload { struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done; struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done; struct vpu_jsm_metric_streamer_done metric_streamer_done; - struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done; struct vpu_ipc_msg_payload_trace_config trace_config; struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability; struct vpu_ipc_msg_payload_trace_get_name trace_get_name; diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index ada9b1eb0787..8ab82e78dd94 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -405,6 +405,38 @@ static const struct mhi_channel_config aic100_channels[] = { .auto_queue = false, .wake_capable = false, }, + { + .name = "IPCR", + .num = 24, + .num_elements = 32, + .local_elements = 0, + .event_ring = 0, + .dir = DMA_TO_DEVICE, + .ee_mask = MHI_CH_EE_AMSS, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + .wake_capable = false, + }, + { + .name = "IPCR", + .num = 25, + .num_elements = 32, + .local_elements = 0, + .event_ring = 0, + .dir = DMA_FROM_DEVICE, + .ee_mask = MHI_CH_EE_AMSS, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = true, + .wake_capable = false, + }, }; static struct mhi_event_config aic100_events[] = { diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c index 20b653d99e52..ba0cf2f94732 100644 --- a/drivers/accel/qaic/qaic_debugfs.c +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -64,20 +64,9 @@ static int bootlog_show(struct seq_file *s, void *unused) return 0; } -static int bootlog_fops_open(struct inode *inode, struct file *file) -{ - return single_open(file, bootlog_show, inode->i_private); -} - -static const struct file_operations bootlog_fops = { - .owner = THIS_MODULE, - .open = bootlog_fops_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(bootlog); -static int read_dbc_fifo_size(struct seq_file *s, void *unused) +static int fifo_size_show(struct seq_file *s, void *unused) { struct dma_bridge_chan *dbc = s->private; @@ -85,20 +74,9 @@ static int read_dbc_fifo_size(struct seq_file *s, void *unused) return 0; } -static int fifo_size_open(struct inode *inode, struct file *file) -{ - return single_open(file, read_dbc_fifo_size, inode->i_private); -} - -static const struct file_operations fifo_size_fops = { - .owner = THIS_MODULE, - .open = fifo_size_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fifo_size); -static int read_dbc_queued(struct seq_file *s, void *unused) +static int queued_show(struct seq_file *s, void *unused) { struct dma_bridge_chan *dbc = s->private; u32 tail = 0, head = 0; @@ -115,18 +93,7 @@ static int read_dbc_queued(struct seq_file *s, void *unused) return 0; } -static int queued_open(struct inode *inode, struct file *file) -{ - return single_open(file, read_dbc_queued, inode->i_private); -} - -static const struct file_operations queued_fops = { - .owner = THIS_MODULE, - .open = queued_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(queued); void qaic_debugfs_init(struct qaic_drm_device *qddev) { diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index bf10156c334e..3575e0c984d6 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -34,6 +34,7 @@ MODULE_IMPORT_NS(DMA_BUF); +#define PCI_DEV_AIC080 0xa080 #define PCI_DEV_AIC100 0xa100 #define QAIC_NAME "qaic" #define QAIC_DESC "Qualcomm Cloud AI Accelerators" @@ -53,12 +54,12 @@ static void qaicm_wq_release(struct drm_device *dev, void *res) destroy_workqueue(wq); } -static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt) +static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name) { struct workqueue_struct *wq; int ret; - wq = alloc_workqueue(fmt, WQ_UNBOUND, 0); + wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name); if (!wq) return ERR_PTR(-ENOMEM); ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq); @@ -365,7 +366,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de return NULL; qdev->dev_state = QAIC_OFFLINE; - if (id->device == PCI_DEV_AIC100) { + if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) { qdev->num_dbc = 16; qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL); if (!qdev->dbc) @@ -607,6 +608,7 @@ static struct mhi_driver qaic_mhi_driver = { }; static const struct pci_device_id qaic_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), }, { } }; diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c index bf94bbab6be5..6d772143d612 100644 --- a/drivers/accel/qaic/sahara.c +++ b/drivers/accel/qaic/sahara.c @@ -2,6 +2,7 @@ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ +#include <linux/devcoredump.h> #include <linux/firmware.h> #include <linux/limits.h> #include <linux/mhi.h> @@ -9,6 +10,7 @@ #include <linux/mod_devicetable.h> #include <linux/overflow.h> #include <linux/types.h> +#include <linux/vmalloc.h> #include <linux/workqueue.h> #include "sahara.h" @@ -36,12 +38,14 @@ #define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */ #define SAHARA_TRANSFER_MAX_SIZE 0x80000 +#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */ #define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\ SAHARA_PACKET_MAX_SIZE) #define SAHARA_IMAGE_ID_NONE U32_MAX #define SAHARA_VERSION 2 #define SAHARA_SUCCESS 0 +#define SAHARA_TABLE_ENTRY_STR_LEN 20 #define SAHARA_MODE_IMAGE_TX_PENDING 0x0 #define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1 @@ -53,6 +57,8 @@ #define SAHARA_END_OF_IMAGE_LENGTH 0x10 #define SAHARA_DONE_LENGTH 0x8 #define SAHARA_RESET_LENGTH 0x8 +#define SAHARA_MEM_DEBUG64_LENGTH 0x18 +#define SAHARA_MEM_READ64_LENGTH 0x18 struct sahara_packet { __le32 cmd; @@ -80,18 +86,95 @@ struct sahara_packet { __le32 image; __le32 status; } end_of_image; + struct { + __le64 table_address; + __le64 table_length; + } memory_debug64; + struct { + __le64 memory_address; + __le64 memory_length; + } memory_read64; }; }; +struct sahara_debug_table_entry64 { + __le64 type; + __le64 address; + __le64 length; + char description[SAHARA_TABLE_ENTRY_STR_LEN]; + char filename[SAHARA_TABLE_ENTRY_STR_LEN]; +}; + +struct sahara_dump_table_entry { + u64 type; + u64 address; + u64 length; + char description[SAHARA_TABLE_ENTRY_STR_LEN]; + char filename[SAHARA_TABLE_ENTRY_STR_LEN]; +}; + +#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef +#define SAHARA_DUMP_V1_VER 1 +struct sahara_memory_dump_meta_v1 { + u64 magic; + u64 version; + u64 dump_size; + u64 table_size; +}; + +/* + * Layout of crashdump provided to user via devcoredump + * +------------------------------------------+ + * | Crashdump Meta structure | + * | type: struct sahara_memory_dump_meta_v1 | + * +------------------------------------------+ + * | Crashdump Table | + * | type: array of struct | + * | sahara_dump_table_entry | + * | | + * | | + * +------------------------------------------+ + * | Crashdump | + * | | + * | | + * | | + * | | + * | | + * +------------------------------------------+ + * + * First is the metadata header. Userspace can use the magic number to verify + * the content type, and then check the version for the rest of the format. + * New versions should keep the magic number location/value, and version + * location, but increment the version value. + * + * For v1, the metadata lists the size of the entire dump (header + table + + * dump) and the size of the table. Then the dump image table, which describes + * the contents of the dump. Finally all the images are listed in order, with + * no deadspace in between. Userspace can use the sizes listed in the image + * table to reconstruct the individual images. + */ + struct sahara_context { struct sahara_packet *tx[SAHARA_NUM_TX_BUF]; struct sahara_packet *rx; - struct work_struct work; + struct work_struct fw_work; + struct work_struct dump_work; struct mhi_device *mhi_dev; const char **image_table; u32 table_size; u32 active_image_id; const struct firmware *firmware; + u64 dump_table_address; + u64 dump_table_length; + size_t rx_size; + size_t rx_size_requested; + void *mem_dump; + size_t mem_dump_sz; + struct sahara_dump_table_entry *dump_image; + u64 dump_image_offset; + void *mem_dump_freespace; + u64 dump_images_left; + bool is_mem_dump_mode; }; static const char *aic100_image_table[] = { @@ -153,6 +236,8 @@ static void sahara_send_reset(struct sahara_context *context) { int ret; + context->is_mem_dump_mode = false; + context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD); context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH); @@ -186,7 +271,8 @@ static void sahara_hello(struct sahara_context *context) } if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING && - le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) { + le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE && + le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) { dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n", le32_to_cpu(context->rx->hello.mode)); return; @@ -320,9 +406,70 @@ static void sahara_end_of_image(struct sahara_context *context) dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret); } +static void sahara_memory_debug64(struct sahara_context *context) +{ + int ret; + + dev_dbg(&context->mhi_dev->dev, + "MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n", + le32_to_cpu(context->rx->length), + le64_to_cpu(context->rx->memory_debug64.table_address), + le64_to_cpu(context->rx->memory_debug64.table_length)); + + if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) { + dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n", + le32_to_cpu(context->rx->length)); + return; + } + + context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address); + context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length); + + if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 || + !context->dump_table_length) { + dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n", + context->dump_table_length); + return; + } + + /* + * From this point, the protocol flips. We make memory_read requests to + * the device, and the device responds with the raw data. If the device + * has an error, it will send an End of Image command. First we need to + * request the memory dump table so that we know where all the pieces + * of the dump are that we can consume. + */ + + context->is_mem_dump_mode = true; + + /* + * Assume that the table is smaller than our MTU so that we can read it + * in one shot. The spec does not put an upper limit on the table, but + * no known device will exceed this. + */ + if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) { + dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n", + context->dump_table_length); + sahara_send_reset(context); + return; + } + + context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD); + context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH); + context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address); + context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length); + + context->rx_size_requested = context->dump_table_length; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], + SAHARA_MEM_READ64_LENGTH, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret); +} + static void sahara_processing(struct work_struct *work) { - struct sahara_context *context = container_of(work, struct sahara_context, work); + struct sahara_context *context = container_of(work, struct sahara_context, fw_work); int ret; switch (le32_to_cpu(context->rx->cmd)) { @@ -338,6 +485,12 @@ static void sahara_processing(struct work_struct *work) case SAHARA_DONE_RESP_CMD: /* Intentional do nothing as we don't need to exit an app */ break; + case SAHARA_RESET_RESP_CMD: + /* Intentional do nothing as we don't need to exit an app */ + break; + case SAHARA_MEM_DEBUG64_CMD: + sahara_memory_debug64(context); + break; default: dev_err(&context->mhi_dev->dev, "Unknown command %d\n", le32_to_cpu(context->rx->cmd)); @@ -350,6 +503,217 @@ static void sahara_processing(struct work_struct *work) dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret); } +static void sahara_parse_dump_table(struct sahara_context *context) +{ + struct sahara_dump_table_entry *image_out_table; + struct sahara_debug_table_entry64 *dev_table; + struct sahara_memory_dump_meta_v1 *dump_meta; + u64 table_nents; + u64 dump_length; + int ret; + u64 i; + + table_nents = context->dump_table_length / sizeof(*dev_table); + context->dump_images_left = table_nents; + dump_length = 0; + + dev_table = (struct sahara_debug_table_entry64 *)(context->rx); + for (i = 0; i < table_nents; ++i) { + /* Do not trust the device, ensure the strings are terminated */ + dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; + dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; + + dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length)); + if (dump_length == SIZE_MAX) { + /* Discard the dump */ + sahara_send_reset(context); + return; + } + + dev_dbg(&context->mhi_dev->dev, + "Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n", + i, + le64_to_cpu(dev_table[i].type), + le64_to_cpu(dev_table[i].address), + le64_to_cpu(dev_table[i].length), + dev_table[i].description, + dev_table[i].filename); + } + + dump_length = size_add(dump_length, sizeof(*dump_meta)); + if (dump_length == SIZE_MAX) { + /* Discard the dump */ + sahara_send_reset(context); + return; + } + dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents)); + if (dump_length == SIZE_MAX) { + /* Discard the dump */ + sahara_send_reset(context); + return; + } + + context->mem_dump_sz = dump_length; + context->mem_dump = vzalloc(dump_length); + if (!context->mem_dump) { + /* Discard the dump */ + sahara_send_reset(context); + return; + } + + /* Populate the dump metadata and table for userspace */ + dump_meta = context->mem_dump; + dump_meta->magic = SAHARA_DUMP_V1_MAGIC; + dump_meta->version = SAHARA_DUMP_V1_VER; + dump_meta->dump_size = dump_length; + dump_meta->table_size = context->dump_table_length; + + image_out_table = context->mem_dump + sizeof(*dump_meta); + for (i = 0; i < table_nents; ++i) { + image_out_table[i].type = le64_to_cpu(dev_table[i].type); + image_out_table[i].address = le64_to_cpu(dev_table[i].address); + image_out_table[i].length = le64_to_cpu(dev_table[i].length); + strscpy(image_out_table[i].description, dev_table[i].description, + SAHARA_TABLE_ENTRY_STR_LEN); + strscpy(image_out_table[i].filename, + dev_table[i].filename, + SAHARA_TABLE_ENTRY_STR_LEN); + } + + context->mem_dump_freespace = &image_out_table[i]; + + /* Done parsing the table, switch to image dump mode */ + context->dump_table_length = 0; + + /* Request the first chunk of the first image */ + context->dump_image = &image_out_table[0]; + dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE); + /* Avoid requesting EOI sized data so that we can identify errors */ + if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) + dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; + + context->dump_image_offset = dump_length; + + context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD); + context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH); + context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address); + context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length); + + context->rx_size_requested = dump_length; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], + SAHARA_MEM_READ64_LENGTH, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret); +} + +static void sahara_parse_dump_image(struct sahara_context *context) +{ + u64 dump_length; + int ret; + + memcpy(context->mem_dump_freespace, context->rx, context->rx_size); + context->mem_dump_freespace += context->rx_size; + + if (context->dump_image_offset >= context->dump_image->length) { + /* Need to move to next image */ + context->dump_image++; + context->dump_images_left--; + context->dump_image_offset = 0; + + if (!context->dump_images_left) { + /* Dump done */ + dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev, + context->mem_dump, + context->mem_dump_sz, + GFP_KERNEL); + context->mem_dump = NULL; + sahara_send_reset(context); + return; + } + } + + /* Get next image chunk */ + dump_length = context->dump_image->length - context->dump_image_offset; + dump_length = min(dump_length, SAHARA_READ_MAX_SIZE); + /* Avoid requesting EOI sized data so that we can identify errors */ + if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) + dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; + + context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD); + context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH); + context->tx[0]->memory_read64.memory_address = + cpu_to_le64(context->dump_image->address + context->dump_image_offset); + context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length); + + context->dump_image_offset += dump_length; + context->rx_size_requested = dump_length; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], + SAHARA_MEM_READ64_LENGTH, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, + "Unable to send read for dump content %d\n", ret); +} + +static void sahara_dump_processing(struct work_struct *work) +{ + struct sahara_context *context = container_of(work, struct sahara_context, dump_work); + int ret; + + /* + * We should get the expected raw data, but if the device has an error + * it is supposed to send EOI with an error code. + */ + if (context->rx_size != context->rx_size_requested && + context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) { + dev_err(&context->mhi_dev->dev, + "Unexpected response to read_data. Expected size: %#zx got: %#zx\n", + context->rx_size_requested, + context->rx_size); + goto error; + } + + if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH && + le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) { + dev_err(&context->mhi_dev->dev, + "Unexpected EOI response to read_data. Status: %d\n", + le32_to_cpu(context->rx->end_of_image.status)); + goto error; + } + + if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH && + le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) { + dev_err(&context->mhi_dev->dev, + "Invalid EOI response to read_data. CMD: %d\n", + le32_to_cpu(context->rx->cmd)); + goto error; + } + + /* + * Need to know if we received the dump table, or part of a dump image. + * Since we get raw data, we cannot tell from the data itself. Instead, + * we use the stored dump_table_length, which we zero after we read and + * process the entire table. + */ + if (context->dump_table_length) + sahara_parse_dump_table(context); + else + sahara_parse_dump_image(context); + + ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx, + SAHARA_PACKET_MAX_SIZE, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret); + + return; + +error: + vfree(context->mem_dump); + context->mem_dump = NULL; + sahara_send_reset(context); +} + static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { struct sahara_context *context; @@ -382,7 +746,8 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_ } context->mhi_dev = mhi_dev; - INIT_WORK(&context->work, sahara_processing); + INIT_WORK(&context->fw_work, sahara_processing); + INIT_WORK(&context->dump_work, sahara_dump_processing); context->image_table = aic100_image_table; context->table_size = ARRAY_SIZE(aic100_image_table); context->active_image_id = SAHARA_IMAGE_ID_NONE; @@ -405,7 +770,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev) { struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); - cancel_work_sync(&context->work); + cancel_work_sync(&context->fw_work); + cancel_work_sync(&context->dump_work); + if (context->mem_dump) + vfree(context->mem_dump); sahara_release_image(context); mhi_unprepare_from_transfer(mhi_dev); } @@ -418,8 +786,14 @@ static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result { struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); - if (!mhi_result->transaction_status) - schedule_work(&context->work); + if (!mhi_result->transaction_status) { + context->rx_size = mhi_result->bytes_xferd; + if (context->is_mem_dump_mode) + schedule_work(&context->dump_work); + else + schedule_work(&context->fw_work); + } + } static const struct mhi_device_id sahara_mhi_match_table[] = { diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c index d0c8aed90fd1..7a47d8095a7d 100644 --- a/drivers/acpi/arm64/init.c +++ b/drivers/acpi/arm64/init.c @@ -2,7 +2,7 @@ #include <linux/acpi.h> #include "init.h" -void __init acpi_arm_init(void) +void __init acpi_arch_init(void) { if (IS_ENABLED(CONFIG_ACPI_AGDI)) acpi_agdi_init(); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 4c745a26226b..1f7e4c691d9e 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1218,6 +1218,17 @@ static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; } +static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node) +{ + struct acpi_iort_memory_access *memory_access; + struct acpi_iort_root_complex *pci_rc; + + pci_rc = (struct acpi_iort_root_complex *)node->node_data; + memory_access = + (struct acpi_iort_memory_access *)&pci_rc->memory_properties; + return memory_access->memory_flags & ACPI_IORT_MF_CANWBS; +} + static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, u32 streamid) { @@ -1335,6 +1346,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in) fwspec = dev_iommu_fwspec_get(dev); if (fwspec && iort_pci_rc_supports_ats(node)) fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; + if (fwspec && iort_pci_rc_supports_canwbs(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS; } else { node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, iort_match_node_callback, dev); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index aed4a37da03e..3d5342f8d7b3 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -853,6 +853,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) struct power_supply_config psy_cfg = { .drv_data = battery, .attr_grp = acpi_battery_groups, + .no_wakeup_source = true, }; bool full_cap_broken = false; @@ -888,7 +889,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY; battery->bat_desc.get_property = acpi_battery_get_property; - battery->bat = power_supply_register_no_ws(&battery->device->dev, + battery->bat = power_supply_register(&battery->device->dev, &battery->bat_desc, &psy_cfg); if (IS_ERR(battery->bat)) { diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 16917dc3ad60..058910af82bc 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1434,6 +1434,8 @@ static int __init acpi_bus_init(void) struct kobject *acpi_kobj; EXPORT_SYMBOL_GPL(acpi_kobj); +void __weak __init acpi_arch_init(void) { } + static int __init acpi_init(void) { int result; @@ -1461,8 +1463,7 @@ static int __init acpi_init(void) acpi_viot_early_init(); acpi_hest_init(); acpi_ghes_init(); - acpi_arm_init(); - acpi_riscv_init(); + acpi_arch_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 831fa4a12159..698897b29de2 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -578,7 +578,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) * @dev: the target CPU * @index: the index of suggested state */ -static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) +static void acpi_idle_play_dead(struct cpuidle_device *dev, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -591,11 +591,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { io_idle(cx->address); } else - return -ENODEV; + return; } - - /* Never reached */ - return 0; } static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) @@ -803,12 +800,12 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) state->enter = acpi_idle_enter; state->flags = 0; - if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 || - cx->type == ACPI_STATE_C3) { - state->enter_dead = acpi_idle_play_dead; - if (cx->type != ACPI_STATE_C3) - drv->safe_state_index = count; - } + + state->enter_dead = acpi_idle_play_dead; + + if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) + drv->safe_state_index = count; + /* * Halt-induced C1 is not good for ->enter_s2idle, because it * re-enables interrupts on exit. Moreover, C1 is generally not diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c index 5ef97905a727..673e4d5dd752 100644 --- a/drivers/acpi/riscv/init.c +++ b/drivers/acpi/riscv/init.c @@ -7,7 +7,7 @@ #include <linux/acpi.h> #include "init.h" -void __init acpi_riscv_init(void) +void __init acpi_arch_init(void) { riscv_acpi_init_gsi_mapping(); } diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 423565c31d5e..cb45ef5240da 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -296,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* * 2. Devices which also have the skip i2c/serdev quirks and which * need the x86-android-tablets module to properly work. + * Sorted alphabetically. */ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) { @@ -309,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { + /* Acer Iconia One 8 A1-840 (non FHD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Above strings are too generic also match BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Asus ME176C tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), @@ -319,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { - /* Lenovo Yoga Book X90F/L */ + /* Asus TF103C transformer 2-in-1 */ .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { + /* Lenovo Yoga Book X90F/L */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, @@ -425,6 +440,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = { { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */ { "10EC5651", 0 }, /* RealTek ALC5651 audio codec */ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */ + { "INT33F5", 0 }, /* TI Dollar Cove PMIC */ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */ { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */ { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */ diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index b3acbc4174fb..a738e7745865 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1047,7 +1047,7 @@ void binder_alloc_vma_close(struct binder_alloc *alloc) /** * binder_alloc_free_page() - shrinker callback to free pages * @item: item to free - * @lock: lock protecting the item + * @lru: list_lru instance of the item * @cb_arg: callback argument * * Called from list_lru_walk() in binder_shrink_scan() to free @@ -1055,9 +1055,8 @@ void binder_alloc_vma_close(struct binder_alloc *alloc) */ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, - spinlock_t *lock, void *cb_arg) - __must_hold(lock) + __must_hold(&lru->lock) { struct binder_lru_page *page = container_of(item, typeof(*page), lru); struct binder_alloc *alloc = page->alloc; @@ -1092,7 +1091,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, list_lru_isolate(lru, item); spin_unlock(&alloc->lock); - spin_unlock(lock); + spin_unlock(&lru->lock); if (vma) { trace_binder_unmap_user_start(alloc, index); @@ -1106,7 +1105,6 @@ enum lru_status binder_alloc_free_page(struct list_head *item, mmput_async(mm); __free_page(page_to_free); - spin_lock(lock); return LRU_REMOVED_RETRY; err_invalid_vma: diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 70387234477e..c02c8ebcb466 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -118,7 +118,7 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} #endif enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, - spinlock_t *lock, void *cb_arg); + void *cb_arg); struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 547f56341705..3999305b5356 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -370,7 +370,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id /* AHCI controllers often implement SFF compatible interface. * Grab all PCI BARs just in case. */ - rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); + rc = pcim_request_all_regions(pdev, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) @@ -386,7 +386,9 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) pci_enable_msi(pdev); - hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; + hpriv->mmio = pcim_iomap(pdev, AHCI_PCI_BAR, 0); + if (!hpriv->mmio) + return -ENOMEM; /* save initial config */ ahci_save_initial_config(&pdev->dev, hpriv); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 2d3d3d67b4d9..8d27c567be1c 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1869,7 +1869,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* AHCI controllers often implement SFF compatible interface. * Grab all PCI BARs just in case. */ - rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); + rc = pcim_request_all_regions(pdev, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) @@ -1893,7 +1893,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (ahci_sb600_enable_64bit(pdev)) hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; - hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; + hpriv->mmio = pcim_iomap(pdev, ahci_pci_bar, 0); + if (!hpriv->mmio) + return -ENOMEM; /* detect remapped nvme devices */ ahci_remap_check(pdev, ahci_pci_bar, hpriv); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index ed209f4f2798..a97f2c40c640 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -130,7 +130,7 @@ config BLK_DEV_UBD_SYNC kernel command line option. Alternatively, you can say Y here to turn on synchronous operation by default for all block devices. - If you're running a journalling file system (like reiserfs, for + If you're running a journalling file system (like xfs, for example) in your virtual machine, you will want to say Y here. If you care for the safety of the data in your virtual machine, Y is a wise choice too. In all other cases (for example, if you're just diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs index b0227cf9ddd3..5de7223beb4d 100644 --- a/drivers/block/rnull.rs +++ b/drivers/block/rnull.rs @@ -32,7 +32,7 @@ module! { } struct NullBlkModule { - _disk: Pin<Box<Mutex<GenDisk<NullBlkDevice>>>>, + _disk: Pin<KBox<Mutex<GenDisk<NullBlkDevice>>>>, } impl kernel::Module for NullBlkModule { @@ -47,7 +47,7 @@ impl kernel::Module for NullBlkModule { .rotational(false) .build(format_args!("rnullb{}", 0), tagset)?; - let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?; + let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?; Ok(Self { _disk: disk }) } diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index 6aea609b795c..402b7b175863 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -94,6 +94,7 @@ endchoice config ZRAM_DEF_COMP string + depends on ZRAM default "lzo-rle" if ZRAM_DEF_COMP_LZORLE default "lzo" if ZRAM_DEF_COMP_LZO default "lz4" if ZRAM_DEF_COMP_LZ4 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index ad9c9bc3ccfc..3dee026988dc 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -178,11 +178,105 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index) static void zram_accessed(struct zram *zram, u32 index) { zram_clear_flag(zram, index, ZRAM_IDLE); + zram_clear_flag(zram, index, ZRAM_PP_SLOT); #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME zram->table[index].ac_time = ktime_get_boottime(); #endif } +#if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP +struct zram_pp_slot { + unsigned long index; + struct list_head entry; +}; + +/* + * A post-processing bucket is, essentially, a size class, this defines + * the range (in bytes) of pp-slots sizes in particular bucket. + */ +#define PP_BUCKET_SIZE_RANGE 64 +#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1) + +struct zram_pp_ctl { + struct list_head pp_buckets[NUM_PP_BUCKETS]; +}; + +static struct zram_pp_ctl *init_pp_ctl(void) +{ + struct zram_pp_ctl *ctl; + u32 idx; + + ctl = kmalloc(sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return NULL; + + for (idx = 0; idx < NUM_PP_BUCKETS; idx++) + INIT_LIST_HEAD(&ctl->pp_buckets[idx]); + return ctl; +} + +static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps) +{ + list_del_init(&pps->entry); + + zram_slot_lock(zram, pps->index); + zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT); + zram_slot_unlock(zram, pps->index); + + kfree(pps); +} + +static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl) +{ + u32 idx; + + if (!ctl) + return; + + for (idx = 0; idx < NUM_PP_BUCKETS; idx++) { + while (!list_empty(&ctl->pp_buckets[idx])) { + struct zram_pp_slot *pps; + + pps = list_first_entry(&ctl->pp_buckets[idx], + struct zram_pp_slot, + entry); + release_pp_slot(zram, pps); + } + } + + kfree(ctl); +} + +static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, + struct zram_pp_slot *pps) +{ + u32 idx; + + idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; + list_add(&pps->entry, &ctl->pp_buckets[idx]); + + zram_set_flag(zram, pps->index, ZRAM_PP_SLOT); +} + +static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl) +{ + struct zram_pp_slot *pps = NULL; + s32 idx = NUM_PP_BUCKETS - 1; + + /* The higher the bucket id the more optimal slot post-processing is */ + while (idx >= 0) { + pps = list_first_entry_or_null(&ctl->pp_buckets[idx], + struct zram_pp_slot, + entry); + if (pps) + break; + + idx--; + } + return pps; +} +#endif + static inline void update_used_max(struct zram *zram, const unsigned long pages) { @@ -296,19 +390,28 @@ static void mark_idle(struct zram *zram, ktime_t cutoff) for (index = 0; index < nr_pages; index++) { /* - * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race. - * See the comment in writeback_store. + * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no + * post-processing (recompress, writeback) happens to the + * ZRAM_SAME slot. + * + * And ZRAM_WB slots simply cannot be ZRAM_IDLE. */ zram_slot_lock(zram, index); - if (zram_allocated(zram, index) && - !zram_test_flag(zram, index, ZRAM_UNDER_WB)) { + if (!zram_allocated(zram, index) || + zram_test_flag(zram, index, ZRAM_WB) || + zram_test_flag(zram, index, ZRAM_SAME)) { + zram_slot_unlock(zram, index); + continue; + } + #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME - is_idle = !cutoff || ktime_after(cutoff, - zram->table[index].ac_time); + is_idle = !cutoff || + ktime_after(cutoff, zram->table[index].ac_time); #endif - if (is_idle) - zram_set_flag(zram, index, ZRAM_IDLE); - } + if (is_idle) + zram_set_flag(zram, index, ZRAM_IDLE); + else + zram_clear_flag(zram, index, ZRAM_IDLE); zram_slot_unlock(zram, index); } } @@ -587,11 +690,57 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, #define IDLE_WRITEBACK (1<<1) #define INCOMPRESSIBLE_WRITEBACK (1<<2) +static int scan_slots_for_writeback(struct zram *zram, u32 mode, + unsigned long nr_pages, + unsigned long index, + struct zram_pp_ctl *ctl) +{ + struct zram_pp_slot *pps = NULL; + + for (; nr_pages != 0; index++, nr_pages--) { + if (!pps) + pps = kmalloc(sizeof(*pps), GFP_KERNEL); + if (!pps) + return -ENOMEM; + + INIT_LIST_HEAD(&pps->entry); + + zram_slot_lock(zram, index); + if (!zram_allocated(zram, index)) + goto next; + + if (zram_test_flag(zram, index, ZRAM_WB) || + zram_test_flag(zram, index, ZRAM_SAME)) + goto next; + + if (mode & IDLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_IDLE)) + goto next; + if (mode & HUGE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_HUGE)) + goto next; + if (mode & INCOMPRESSIBLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + goto next; + + pps->index = index; + place_pp_slot(zram, ctl, pps); + pps = NULL; +next: + zram_slot_unlock(zram, index); + } + + kfree(pps); + return 0; +} + static ssize_t writeback_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; + struct zram_pp_ctl *ctl = NULL; + struct zram_pp_slot *pps; unsigned long index = 0; struct bio bio; struct bio_vec bio_vec; @@ -626,6 +775,12 @@ static ssize_t writeback_store(struct device *dev, goto release_init_lock; } + /* Do not permit concurrent post-processing actions. */ + if (atomic_xchg(&zram->pp_in_progress, 1)) { + up_read(&zram->init_lock); + return -EAGAIN; + } + if (!zram->backing_dev) { ret = -ENODEV; goto release_init_lock; @@ -637,7 +792,15 @@ static ssize_t writeback_store(struct device *dev, goto release_init_lock; } - for (; nr_pages != 0; index++, nr_pages--) { + ctl = init_pp_ctl(); + if (!ctl) { + ret = -ENOMEM; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, nr_pages, index, ctl); + + while ((pps = select_pp_slot(ctl))) { spin_lock(&zram->wb_limit_lock); if (zram->wb_limit_enable && !zram->bd_wb_limit) { spin_unlock(&zram->wb_limit_lock); @@ -654,38 +817,20 @@ static ssize_t writeback_store(struct device *dev, } } + index = pps->index; zram_slot_lock(zram, index); - if (!zram_allocated(zram, index)) - goto next; - - if (zram_test_flag(zram, index, ZRAM_WB) || - zram_test_flag(zram, index, ZRAM_SAME) || - zram_test_flag(zram, index, ZRAM_UNDER_WB)) - goto next; - - if (mode & IDLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_IDLE)) - goto next; - if (mode & HUGE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_HUGE)) - goto next; - if (mode & INCOMPRESSIBLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) - goto next; - /* - * Clearing ZRAM_UNDER_WB is duty of caller. - * IOW, zram_free_page never clear it. + * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so + * slots can change in the meantime. If slots are accessed or + * freed they lose ZRAM_PP_SLOT flag and hence we don't + * post-process them. */ - zram_set_flag(zram, index, ZRAM_UNDER_WB); - /* Need for hugepage writeback racing */ - zram_set_flag(zram, index, ZRAM_IDLE); + if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) + goto next; zram_slot_unlock(zram, index); + if (zram_read_page(zram, page, index, NULL)) { - zram_slot_lock(zram, index); - zram_clear_flag(zram, index, ZRAM_UNDER_WB); - zram_clear_flag(zram, index, ZRAM_IDLE); - zram_slot_unlock(zram, index); + release_pp_slot(zram, pps); continue; } @@ -700,10 +845,7 @@ static ssize_t writeback_store(struct device *dev, */ err = submit_bio_wait(&bio); if (err) { - zram_slot_lock(zram, index); - zram_clear_flag(zram, index, ZRAM_UNDER_WB); - zram_clear_flag(zram, index, ZRAM_IDLE); - zram_slot_unlock(zram, index); + release_pp_slot(zram, pps); /* * BIO errors are not fatal, we continue and simply * attempt to writeback the remaining objects (pages). @@ -717,25 +859,19 @@ static ssize_t writeback_store(struct device *dev, } atomic64_inc(&zram->stats.bd_writes); + zram_slot_lock(zram, index); /* - * We released zram_slot_lock so need to check if the slot was - * changed. If there is freeing for the slot, we can catch it - * easily by zram_allocated. - * A subtle case is the slot is freed/reallocated/marked as - * ZRAM_IDLE again. To close the race, idle_store doesn't - * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB. - * Thus, we could close the race by checking ZRAM_IDLE bit. + * Same as above, we release slot lock during writeback so + * slot can change under us: slot_free() or slot_free() and + * reallocation (zram_write_page()). In both cases slot loses + * ZRAM_PP_SLOT flag. No concurrent post-processing can set + * ZRAM_PP_SLOT on such slots until current post-processing + * finishes. */ - zram_slot_lock(zram, index); - if (!zram_allocated(zram, index) || - !zram_test_flag(zram, index, ZRAM_IDLE)) { - zram_clear_flag(zram, index, ZRAM_UNDER_WB); - zram_clear_flag(zram, index, ZRAM_IDLE); + if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) goto next; - } zram_free_page(zram, index); - zram_clear_flag(zram, index, ZRAM_UNDER_WB); zram_set_flag(zram, index, ZRAM_WB); zram_set_element(zram, index, blk_idx); blk_idx = 0; @@ -746,12 +882,15 @@ static ssize_t writeback_store(struct device *dev, spin_unlock(&zram->wb_limit_lock); next: zram_slot_unlock(zram, index); + release_pp_slot(zram, pps); } if (blk_idx) free_block_bdev(zram, blk_idx); __free_page(page); release_init_lock: + release_pp_ctl(zram, ctl); + atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); return ret; @@ -1342,19 +1481,17 @@ static void zram_free_page(struct zram *zram, size_t index) #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME zram->table[index].ac_time = 0; #endif - if (zram_test_flag(zram, index, ZRAM_IDLE)) - zram_clear_flag(zram, index, ZRAM_IDLE); + + zram_clear_flag(zram, index, ZRAM_IDLE); + zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE); + zram_clear_flag(zram, index, ZRAM_PP_SLOT); + zram_set_priority(zram, index, 0); if (zram_test_flag(zram, index, ZRAM_HUGE)) { zram_clear_flag(zram, index, ZRAM_HUGE); atomic64_dec(&zram->stats.huge_pages); } - if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) - zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE); - - zram_set_priority(zram, index, 0); - if (zram_test_flag(zram, index, ZRAM_WB)) { zram_clear_flag(zram, index, ZRAM_WB); free_block_bdev(zram, zram_get_element(zram, index)); @@ -1378,13 +1515,11 @@ static void zram_free_page(struct zram *zram, size_t index) zs_free(zram->mem_pool, handle); atomic64_sub(zram_get_obj_size(zram, index), - &zram->stats.compr_data_size); + &zram->stats.compr_data_size); out: atomic64_dec(&zram->stats.pages_stored); zram_set_handle(zram, index, 0); zram_set_obj_size(zram, index, 0); - WARN_ON_ONCE(zram->table[index].flags & - ~(1UL << ZRAM_UNDER_WB)); } /* @@ -1648,6 +1783,52 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, } #ifdef CONFIG_ZRAM_MULTI_COMP +#define RECOMPRESS_IDLE (1 << 0) +#define RECOMPRESS_HUGE (1 << 1) + +static int scan_slots_for_recompress(struct zram *zram, u32 mode, + struct zram_pp_ctl *ctl) +{ + unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; + struct zram_pp_slot *pps = NULL; + unsigned long index; + + for (index = 0; index < nr_pages; index++) { + if (!pps) + pps = kmalloc(sizeof(*pps), GFP_KERNEL); + if (!pps) + return -ENOMEM; + + INIT_LIST_HEAD(&pps->entry); + + zram_slot_lock(zram, index); + if (!zram_allocated(zram, index)) + goto next; + + if (mode & RECOMPRESS_IDLE && + !zram_test_flag(zram, index, ZRAM_IDLE)) + goto next; + + if (mode & RECOMPRESS_HUGE && + !zram_test_flag(zram, index, ZRAM_HUGE)) + goto next; + + if (zram_test_flag(zram, index, ZRAM_WB) || + zram_test_flag(zram, index, ZRAM_SAME) || + zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + goto next; + + pps->index = index; + place_pp_slot(zram, ctl, pps); + pps = NULL; +next: + zram_slot_unlock(zram, index); + } + + kfree(pps); + return 0; +} + /* * This function will decompress (unless it's ZRAM_HUGE) the page and then * attempt to compress it using provided compression algorithm priority @@ -1655,7 +1836,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, * * Corresponding ZRAM slot should be locked. */ -static int zram_recompress(struct zram *zram, u32 index, struct page *page, +static int recompress_slot(struct zram *zram, u32 index, struct page *page, u64 *num_recomp_pages, u32 threshold, u32 prio, u32 prio_max) { @@ -1685,6 +1866,13 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, if (ret) return ret; + /* + * We touched this entry so mark it as non-IDLE. This makes sure that + * we don't preserve IDLE flag and don't incorrectly pick this entry + * for different post-processing type (e.g. writeback). + */ + zram_clear_flag(zram, index, ZRAM_IDLE); + class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); /* * Iterate the secondary comp algorithms list (in order of priority) @@ -1798,20 +1986,17 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, return 0; } -#define RECOMPRESS_IDLE (1 << 0) -#define RECOMPRESS_HUGE (1 << 1) - static ssize_t recompress_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; struct zram *zram = dev_to_zram(dev); - unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; char *args, *param, *val, *algo = NULL; u64 num_recomp_pages = ULLONG_MAX; + struct zram_pp_ctl *ctl = NULL; + struct zram_pp_slot *pps; u32 mode = 0, threshold = 0; - unsigned long index; struct page *page; ssize_t ret; @@ -1881,6 +2066,12 @@ static ssize_t recompress_store(struct device *dev, goto release_init_lock; } + /* Do not permit concurrent post-processing actions. */ + if (atomic_xchg(&zram->pp_in_progress, 1)) { + up_read(&zram->init_lock); + return -EAGAIN; + } + if (algo) { bool found = false; @@ -1907,36 +2098,32 @@ static ssize_t recompress_store(struct device *dev, goto release_init_lock; } + ctl = init_pp_ctl(); + if (!ctl) { + ret = -ENOMEM; + goto release_init_lock; + } + + scan_slots_for_recompress(zram, mode, ctl); + ret = len; - for (index = 0; index < nr_pages; index++) { + while ((pps = select_pp_slot(ctl))) { int err = 0; if (!num_recomp_pages) break; - zram_slot_lock(zram, index); - - if (!zram_allocated(zram, index)) - goto next; - - if (mode & RECOMPRESS_IDLE && - !zram_test_flag(zram, index, ZRAM_IDLE)) - goto next; - - if (mode & RECOMPRESS_HUGE && - !zram_test_flag(zram, index, ZRAM_HUGE)) - goto next; - - if (zram_test_flag(zram, index, ZRAM_WB) || - zram_test_flag(zram, index, ZRAM_UNDER_WB) || - zram_test_flag(zram, index, ZRAM_SAME) || - zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + zram_slot_lock(zram, pps->index); + if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT)) goto next; - err = zram_recompress(zram, index, page, &num_recomp_pages, - threshold, prio, prio_max); + err = recompress_slot(zram, pps->index, page, + &num_recomp_pages, threshold, + prio, prio_max); next: - zram_slot_unlock(zram, index); + zram_slot_unlock(zram, pps->index); + release_pp_slot(zram, pps); + if (err) { ret = err; break; @@ -1948,6 +2135,8 @@ next: __free_page(page); release_init_lock: + release_pp_ctl(zram, ctl); + atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); return ret; } @@ -2105,7 +2294,7 @@ static void zram_destroy_comps(struct zram *zram) { u32 prio; - for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { + for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { struct zcomp *comp = zram->comps[prio]; zram->comps[prio] = NULL; @@ -2144,6 +2333,7 @@ static void zram_reset_device(struct zram *zram) zram->disksize = 0; zram_destroy_comps(zram); memset(&zram->stats, 0, sizeof(zram->stats)); + atomic_set(&zram->pp_in_progress, 0); reset_bdev(zram); comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); @@ -2176,7 +2366,7 @@ static ssize_t disksize_store(struct device *dev, goto out_unlock; } - for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { + for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { if (!zram->comp_algs[prio]) continue; @@ -2381,6 +2571,9 @@ static int zram_add(void) zram->disk->fops = &zram_devops; zram->disk->private_data = zram; snprintf(zram->disk->disk_name, 16, "zram%d", device_id); + atomic_set(&zram->pp_in_progress, 0); + zram_comp_params_reset(zram); + comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */ set_capacity(zram->disk, 0); @@ -2388,9 +2581,6 @@ static int zram_add(void) if (ret) goto out_cleanup_disk; - zram_comp_params_reset(zram); - comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); - zram_debugfs_register(zram); pr_info("Added device: %s\n", zram->disk->disk_name); return device_id; diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index cfc8c059db63..134be414e210 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -47,7 +47,7 @@ enum zram_pageflags { ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */ ZRAM_WB, /* page is stored on backing_device */ - ZRAM_UNDER_WB, /* page is under writeback */ + ZRAM_PP_SLOT, /* Selected for post-processing */ ZRAM_HUGE, /* Incompressible page */ ZRAM_IDLE, /* not accessed page since last idle marking */ ZRAM_INCOMPRESSIBLE, /* none of the algorithms could compress it */ @@ -139,5 +139,6 @@ struct zram { #ifdef CONFIG_ZRAM_MEMORY_TRACKING struct dentry *debugfs_dir; #endif + atomic_t pp_in_progress; }; #endif diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index cf0be8a7939d..0fc9a510e059 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -162,7 +162,7 @@ config TCG_NSC config TCG_ATMEL tristate "Atmel TPM Interface" - depends on PPC64 || HAS_IOPORT_MAP + depends on HAS_IOPORT_MAP depends on HAS_IOPORT help If you have a TPM security chip from Atmel say Yes and it diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index b0f13c8ea79c..b70165b588ec 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -1390,5 +1390,4 @@ int tpm2_sessions_init(struct tpm_chip *chip) return rc; } -EXPORT_SYMBOL(tpm2_sessions_init); #endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 9fb2defa9dc4..54a0360a3c95 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -15,7 +15,66 @@ */ #include "tpm.h" -#include "tpm_atmel.h" + +struct tpm_atmel_priv { + int region_size; + int have_region; + unsigned long base; + void __iomem *iobase; +}; + +#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + (offset)) +#define atmel_putb(val, chip, offset) \ + outb(val, atmel_get_priv(chip)->base + (offset)) +#define atmel_request_region request_region +#define atmel_release_region release_region +/* Atmel definitions */ +enum tpm_atmel_addr { + TPM_ATMEL_BASE_ADDR_LO = 0x08, + TPM_ATMEL_BASE_ADDR_HI = 0x09 +}; + +static inline int tpm_read_index(int base, int index) +{ + outb(index, base); + return inb(base + 1) & 0xFF; +} + +/* Verify this is a 1.1 Atmel TPM */ +static int atmel_verify_tpm11(void) +{ + /* verify that it is an Atmel part */ + if (tpm_read_index(TPM_ADDR, 4) != 'A' || + tpm_read_index(TPM_ADDR, 5) != 'T' || + tpm_read_index(TPM_ADDR, 6) != 'M' || + tpm_read_index(TPM_ADDR, 7) != 'L') + return 1; + + /* query chip for its version number */ + if (tpm_read_index(TPM_ADDR, 0x00) != 1 || + tpm_read_index(TPM_ADDR, 0x01) != 1) + return 1; + + /* This is an atmel supported part */ + return 0; +} + +/* Determine where to talk to device */ +static void __iomem *atmel_get_base_addr(unsigned long *base, int *region_size) +{ + int lo, hi; + + if (atmel_verify_tpm11() != 0) + return NULL; + + lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); + hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); + + *base = (hi << 8) | lo; + *region_size = 2; + + return ioport_map(*base, *region_size); +} /* write status bits */ enum tpm_atmel_write_status { @@ -142,7 +201,6 @@ static void atml_plat_remove(void) tpm_chip_unregister(chip); if (priv->have_region) atmel_release_region(priv->base, priv->region_size); - atmel_put_base_addr(priv->iobase); platform_device_unregister(pdev); } @@ -211,7 +269,6 @@ static int __init init_atmel(void) err_unreg_dev: platform_device_unregister(pdev); err_rel_reg: - atmel_put_base_addr(iobase); if (have_region) atmel_release_region(base, region_size); diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h deleted file mode 100644 index 7ac3f69dcf0f..000000000000 --- a/drivers/char/tpm/tpm_atmel.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2005 IBM Corporation - * - * Authors: - * Kylene Hall <kjhall@us.ibm.com> - * - * Maintained by: <tpmdd-devel@lists.sourceforge.net> - * - * Device driver for TCG/TCPA TPM (trusted platform module). - * Specifications at www.trustedcomputinggroup.org - * - * These difference are required on power because the device must be - * discovered through the device tree and iomap must be used to get - * around the need for holes in the io_page_mask. This does not happen - * automatically because the tpm is not a normal pci device and lives - * under the root node. - */ - -struct tpm_atmel_priv { - int region_size; - int have_region; - unsigned long base; - void __iomem *iobase; -}; - -#ifdef CONFIG_PPC64 - -#include <linux/of.h> - -#define atmel_getb(priv, offset) readb(priv->iobase + offset) -#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset) -#define atmel_request_region request_mem_region -#define atmel_release_region release_mem_region - -static inline void atmel_put_base_addr(void __iomem *iobase) -{ - iounmap(iobase); -} - -static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) -{ - struct device_node *dn; - unsigned long address, size; - const unsigned int *reg; - int reglen; - int naddrc; - int nsizec; - - dn = of_find_node_by_name(NULL, "tpm"); - - if (!dn) - return NULL; - - if (!of_device_is_compatible(dn, "AT97SC3201")) { - of_node_put(dn); - return NULL; - } - - reg = of_get_property(dn, "reg", ®len); - naddrc = of_n_addr_cells(dn); - nsizec = of_n_size_cells(dn); - - of_node_put(dn); - - - if (naddrc == 2) - address = ((unsigned long) reg[0] << 32) | reg[1]; - else - address = reg[0]; - - if (nsizec == 2) - size = - ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1]; - else - size = reg[naddrc]; - - *base = address; - *region_size = size; - return ioremap(*base, *region_size); -} -#else -#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset) -#define atmel_putb(val, chip, offset) \ - outb(val, atmel_get_priv(chip)->base + offset) -#define atmel_request_region request_region -#define atmel_release_region release_region -/* Atmel definitions */ -enum tpm_atmel_addr { - TPM_ATMEL_BASE_ADDR_LO = 0x08, - TPM_ATMEL_BASE_ADDR_HI = 0x09 -}; - -static inline int tpm_read_index(int base, int index) -{ - outb(index, base); - return inb(base+1) & 0xFF; -} - -/* Verify this is a 1.1 Atmel TPM */ -static int atmel_verify_tpm11(void) -{ - - /* verify that it is an Atmel part */ - if (tpm_read_index(TPM_ADDR, 4) != 'A' || - tpm_read_index(TPM_ADDR, 5) != 'T' || - tpm_read_index(TPM_ADDR, 6) != 'M' || - tpm_read_index(TPM_ADDR, 7) != 'L') - return 1; - - /* query chip for its version number */ - if (tpm_read_index(TPM_ADDR, 0x00) != 1 || - tpm_read_index(TPM_ADDR, 0x01) != 1) - return 1; - - /* This is an atmel supported part */ - return 0; -} - -static inline void atmel_put_base_addr(void __iomem *iobase) -{ -} - -/* Determine where to talk to device */ -static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) -{ - int lo, hi; - - if (atmel_verify_tpm11() != 0) - return NULL; - - lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); - hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); - - *base = (hi << 8) | lo; - *region_size = 2; - - return ioport_map(*base, *region_size); -} -#endif diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 1e5b107d1f3b..76d048f63d55 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -450,6 +450,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops tpm_ibmvtpm = { + .flags = TPM_OPS_AUTO_STARTUP, .recv = tpm_ibmvtpm_recv, .send = tpm_ibmvtpm_send, .cancel = tpm_ibmvtpm_cancel, @@ -690,20 +691,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (!strcmp(id->compat, "IBM,vtpm20")) chip->flags |= TPM_CHIP_FLAG_TPM2; - rc = tpm_get_timeouts(chip); - if (rc) - goto init_irq_cleanup; - - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = tpm2_get_cc_attrs_tbl(chip); - if (rc) - goto init_irq_cleanup; - - rc = tpm2_sessions_init(chip); - if (rc) - goto init_irq_cleanup; - } - return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c index adf22992138e..3b55a7b05c46 100644 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c @@ -17,6 +17,7 @@ */ #include <linux/acpi.h> +#include <linux/bug.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -30,11 +31,13 @@ #define TPM_CR50_MAX_BUFSIZE 64 #define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */ #define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */ -#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */ -#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */ +#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID for Cr50 H1 */ +#define TPM_TI50_DT_I2C_DID_VID 0x504a6666L /* Device and vendor ID for Ti50 DT */ +#define TPM_TI50_OT_I2C_DID_VID 0x50666666L /* Device and vendor ID for TI50 OT */ #define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */ #define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */ #define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */ +#define TPM_CR50_I2C_DEFAULT_LOC 0 #define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4)) #define TPM_I2C_STS(l) (0x0001 | ((l) << 4)) @@ -199,8 +202,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t }; int rc; - i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); @@ -219,7 +220,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t out: tpm_cr50_i2c_disable_tpm_irq(chip); - i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; @@ -261,8 +261,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, priv->buf[0] = addr; memcpy(priv->buf + 1, buffer, len); - i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); @@ -276,7 +274,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, out: tpm_cr50_i2c_disable_tpm_irq(chip); - i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; @@ -285,25 +282,26 @@ out: } /** - * tpm_cr50_check_locality() - Verify TPM locality 0 is active. + * tpm_cr50_check_locality() - Verify if required TPM locality is active. * @chip: A TPM chip. + * @loc: Locality to be verified * * Return: - * - 0: Success. + * - loc: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_check_locality(struct tpm_chip *chip) +static int tpm_cr50_check_locality(struct tpm_chip *chip, int loc) { u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY; u8 buf; int rc; - rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); + rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf)); if (rc < 0) return rc; if ((buf & mask) == mask) - return 0; + return loc; return -EIO; } @@ -311,53 +309,72 @@ static int tpm_cr50_check_locality(struct tpm_chip *chip) /** * tpm_cr50_release_locality() - Release TPM locality. * @chip: A TPM chip. - * @force: Flag to force release if set. + * @loc: Locality to be released + * + * Return: + * - 0: Success. + * - -errno: A POSIX error code. */ -static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force) +static int tpm_cr50_release_locality(struct tpm_chip *chip, int loc) { + struct i2c_client *client = to_i2c_client(chip->dev.parent); u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING; - u8 addr = TPM_I2C_ACCESS(0); + u8 addr = TPM_I2C_ACCESS(loc); u8 buf; + int rc; - if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0) - return; + rc = tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)); + if (rc < 0) + goto unlock_out; - if (force || (buf & mask) == mask) { + if ((buf & mask) == mask) { buf = TPM_ACCESS_ACTIVE_LOCALITY; - tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); + rc = tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); } + +unlock_out: + i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); + return rc; } /** - * tpm_cr50_request_locality() - Request TPM locality 0. + * tpm_cr50_request_locality() - Request TPM locality. * @chip: A TPM chip. + * @loc: Locality to be requested. * * Return: - * - 0: Success. + * - loc: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_request_locality(struct tpm_chip *chip) +static int tpm_cr50_request_locality(struct tpm_chip *chip, int loc) { + struct i2c_client *client = to_i2c_client(chip->dev.parent); u8 buf = TPM_ACCESS_REQUEST_USE; unsigned long stop; int rc; - if (!tpm_cr50_check_locality(chip)) - return 0; + i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); + if (tpm_cr50_check_locality(chip, loc) == loc) + return loc; + + rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf)); if (rc < 0) - return rc; + goto unlock_out; stop = jiffies + chip->timeout_a; do { - if (!tpm_cr50_check_locality(chip)) - return 0; + if (tpm_cr50_check_locality(chip, loc) == loc) + return loc; msleep(TPM_CR50_TIMEOUT_SHORT_MS); } while (time_before(jiffies, stop)); - return -ETIMEDOUT; + rc = -ETIMEDOUT; + +unlock_out: + i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); + return rc; } /** @@ -373,7 +390,7 @@ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip) { u8 buf[4]; - if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) + if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) return 0; return buf[0]; @@ -389,7 +406,7 @@ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip) { u8 buf[4] = { TPM_STS_COMMAND_READY }; - tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf)); + tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)); msleep(TPM_CR50_TIMEOUT_SHORT_MS); } @@ -419,7 +436,7 @@ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask, stop = jiffies + chip->timeout_b; do { - if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) { + if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) { msleep(TPM_CR50_TIMEOUT_SHORT_MS); continue; } @@ -453,7 +470,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL; size_t burstcnt, cur, len, expected; - u8 addr = TPM_I2C_DATA_FIFO(0); + u8 addr = TPM_I2C_DATA_FIFO(chip->locality); u32 status; int rc; @@ -515,7 +532,6 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) goto out_err; } - tpm_cr50_release_locality(chip, false); return cur; out_err: @@ -523,7 +539,6 @@ out_err: if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); - tpm_cr50_release_locality(chip, false); return rc; } @@ -545,10 +560,6 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) u32 status; int rc; - rc = tpm_cr50_request_locality(chip); - if (rc < 0) - return rc; - /* Wait until TPM is ready for a command */ stop = jiffies + chip->timeout_b; while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) { @@ -577,7 +588,8 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) * that is inserted by tpm_cr50_i2c_write() */ limit = min_t(size_t, burstcnt - 1, len); - rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit); + rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(chip->locality), + &buf[sent], limit); if (rc < 0) { dev_err(&chip->dev, "Write failed\n"); goto out_err; @@ -598,7 +610,7 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) } /* Start the TPM command */ - rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go, + rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), tpm_go, sizeof(tpm_go)); if (rc < 0) { dev_err(&chip->dev, "Start command failed\n"); @@ -611,7 +623,6 @@ out_err: if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); - tpm_cr50_release_locality(chip, false); return rc; } @@ -650,6 +661,8 @@ static const struct tpm_class_ops cr50_i2c = { .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = &tpm_cr50_i2c_req_canceled, + .request_locality = &tpm_cr50_request_locality, + .relinquish_locality = &tpm_cr50_release_locality, }; #ifdef CONFIG_ACPI @@ -669,6 +682,27 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match); #endif /** + * tpm_cr50_vid_to_name() - Maps VID to name. + * @vendor: Vendor identifier to map to name + * + * Return: + * A valid string for the vendor or empty string + */ +static const char *tpm_cr50_vid_to_name(u32 vendor) +{ + switch (vendor) { + case TPM_CR50_I2C_DID_VID: + return "cr50"; + case TPM_TI50_DT_I2C_DID_VID: + return "ti50 DT"; + case TPM_TI50_OT_I2C_DID_VID: + return "ti50 OT"; + default: + return "unknown"; + } +} + +/** * tpm_cr50_i2c_probe() - Driver probe function. * @client: I2C client information. * @@ -684,6 +718,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) u32 vendor; u8 buf[4]; int rc; + int loc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; @@ -726,29 +761,37 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) TPM_CR50_TIMEOUT_NOIRQ_MS); } - rc = tpm_cr50_request_locality(chip); - if (rc < 0) { + loc = tpm_cr50_request_locality(chip, TPM_CR50_I2C_DEFAULT_LOC); + if (loc < 0) { dev_err(dev, "Could not request locality\n"); - return rc; + return loc; } /* Read four bytes from DID_VID register */ - rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf)); + rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(loc), buf, sizeof(buf)); if (rc < 0) { dev_err(dev, "Could not read vendor id\n"); - tpm_cr50_release_locality(chip, true); + if (tpm_cr50_release_locality(chip, loc)) + dev_err(dev, "Could not release locality\n"); + return rc; + } + + rc = tpm_cr50_release_locality(chip, loc); + if (rc) { + dev_err(dev, "Could not release locality\n"); return rc; } vendor = le32_to_cpup((__le32 *)buf); - if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) { + if (vendor != TPM_CR50_I2C_DID_VID && + vendor != TPM_TI50_DT_I2C_DID_VID && + vendor != TPM_TI50_OT_I2C_DID_VID) { dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor); - tpm_cr50_release_locality(chip, true); return -ENODEV; } dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n", - vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50", + tpm_cr50_vid_to_name(vendor), client->addr, client->irq, vendor >> 16); return tpm_chip_register(chip); } @@ -772,7 +815,6 @@ static void tpm_cr50_i2c_remove(struct i2c_client *client) } tpm_chip_unregister(chip); - tpm_cr50_release_locality(chip, true); } static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume); diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig index 54ece9207055..08e26137f3d9 100644 --- a/drivers/clk/.kunitconfig +++ b/drivers/clk/.kunitconfig @@ -1,5 +1,6 @@ CONFIG_KUNIT=y CONFIG_OF=y +CONFIG_OF_OVERLAY=y CONFIG_COMMON_CLK=y CONFIG_CLK_KUNIT_TEST=y CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 299bc678ed1b..713573b6c86c 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -226,6 +226,17 @@ config COMMON_CLK_EP93XX help This driver supports the SoC clocks on the Cirrus Logic ep93xx. +config COMMON_CLK_EYEQ + bool "Clock driver for the Mobileye EyeQ platform" + depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST + select AUXILIARY_BUS + default MACH_EYEQ5 || MACH_EYEQ6H + help + This driver provides clocks found on Mobileye EyeQ5, EyeQ6L and Eye6H + SoCs. Controllers live in shared register regions called OLB. Driver + provides read-only PLLs, derived from the main crystal clock (which + must be constant). It also exposes some divider clocks. + config COMMON_CLK_FSL_FLEXSPI tristate "Clock driver for FlexSPI on Layerscape SoCs" depends on ARCH_LAYERSCAPE || COMPILE_TEST @@ -259,7 +270,7 @@ config COMMON_CLK_LAN966X tristate "Generic Clock Controller driver for LAN966X SoC" depends on HAS_IOMEM depends on OF - depends on SOC_LAN966 || COMPILE_TEST + depends on SOC_LAN966 || ARCH_LAN969X || COMPILE_TEST help This driver provides support for Generic Clock Controller(GCK) on LAN966X SoC. GCK generates and supplies clock to various peripherals @@ -291,7 +302,7 @@ config CLK_TWL help Enable support for controlling the clock resources on TWL family PMICs. These devices have some 32K clock outputs which can be - controlled by software. For now, only the TWL6032 clocks are + controlled by software. For now, the TWL6032 and TWL6030 clocks are supported. config CLK_TWL6040 @@ -342,6 +353,14 @@ config COMMON_CLK_LOCHNAGAR This driver supports the clocking features of the Cirrus Logic Lochnagar audio development board. +config COMMON_CLK_NPCM8XX + tristate "Clock driver for the NPCM8XX SoC Family" + depends on ARCH_NPCM || COMPILE_TEST + help + This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family, + all the clocks are initialized by the bootloader, so this driver + allows only reading of current settings directly from the hardware. + config COMMON_CLK_LOONGSON2 bool "Clock driver for Loongson-2 SoC" depends on LOONGARCH || COMPILE_TEST @@ -517,7 +536,6 @@ config CLK_KUNIT_TEST tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS depends on KUNIT default KUNIT_ALL_TESTS - select OF_OVERLAY if OF select DTC help Kunit tests for the common clock framework. @@ -526,7 +544,6 @@ config CLK_FIXED_RATE_KUNIT_TEST tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS depends on KUNIT default KUNIT_ALL_TESTS - select OF_OVERLAY if OF select DTC help KUnit tests for the basic fixed rate clk type. diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index fb8878a5d7d9..bf4bd45adc3a 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -4,6 +4,20 @@ obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o obj-$(CONFIG_COMMON_CLK) += clk.o obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o clk-test-y := clk_test.o \ + kunit_clk_assigned_rates_u64_one.dtbo.o \ + kunit_clk_assigned_rates_u64_one_consumer.dtbo.o \ + kunit_clk_assigned_rates_u64_multiple.dtbo.o \ + kunit_clk_assigned_rates_u64_multiple_consumer.dtbo.o \ + kunit_clk_assigned_rates_multiple.dtbo.o \ + kunit_clk_assigned_rates_multiple_consumer.dtbo.o \ + kunit_clk_assigned_rates_null.dtbo.o \ + kunit_clk_assigned_rates_null_consumer.dtbo.o \ + kunit_clk_assigned_rates_one.dtbo.o \ + kunit_clk_assigned_rates_one_consumer.dtbo.o \ + kunit_clk_assigned_rates_without.dtbo.o \ + kunit_clk_assigned_rates_without_consumer.dtbo.o \ + kunit_clk_assigned_rates_zero.dtbo.o \ + kunit_clk_assigned_rates_zero_consumer.dtbo.o \ kunit_clk_parent_data_test.dtbo.o obj-$(CONFIG_COMMON_CLK) += clk-divider.o obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o @@ -42,6 +56,7 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o +obj-$(CONFIG_COMMON_CLK_EYEQ) += clk-eyeq.o obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o @@ -62,6 +77,7 @@ obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o +obj-$(CONFIG_COMMON_CLK_NPCM8XX) += clk-npcm8xx.o obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c index 39472a51530a..457a48d48941 100644 --- a/drivers/clk/clk-apple-nco.c +++ b/drivers/clk/clk-apple-nco.c @@ -297,6 +297,9 @@ static int applnco_probe(struct platform_device *pdev) memset(&init, 0, sizeof(init)); init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-%d", np->name, i); + if (!init.name) + return -ENOMEM; + init.ops = &applnco_ops; init.parent_data = &pdata; init.num_parents = 1; diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c index bf4d8ddc93ae..934e53a96ddd 100644 --- a/drivers/clk/clk-axi-clkgen.c +++ b/drivers/clk/clk-axi-clkgen.c @@ -7,6 +7,7 @@ */ #include <linux/platform_device.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/slab.h> #include <linux/io.h> @@ -512,6 +513,7 @@ static int axi_clkgen_probe(struct platform_device *pdev) struct clk_init_data init; const char *parent_names[2]; const char *clk_name; + struct clk *axi_clk; unsigned int i; int ret; @@ -528,8 +530,24 @@ static int axi_clkgen_probe(struct platform_device *pdev) return PTR_ERR(axi_clkgen->base); init.num_parents = of_clk_get_parent_count(pdev->dev.of_node); - if (init.num_parents < 1 || init.num_parents > 2) - return -EINVAL; + + axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); + if (!IS_ERR(axi_clk)) { + if (init.num_parents < 2 || init.num_parents > 3) + return -EINVAL; + + init.num_parents -= 1; + } else { + /* + * Legacy... So that old DTs which do not have clock-names still + * work. In this case we don't explicitly enable the AXI bus + * clock. + */ + if (PTR_ERR(axi_clk) != -ENOENT) + return PTR_ERR(axi_clk); + if (init.num_parents < 1 || init.num_parents > 2) + return -EINVAL; + } for (i = 0; i < init.num_parents; i++) { parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i); diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c index dd3d42d9ad86..d0705bb03a2a 100644 --- a/drivers/clk/clk-cdce706.c +++ b/drivers/clk/clk-cdce706.c @@ -678,7 +678,7 @@ MODULE_DEVICE_TABLE(of, cdce706_dt_match); #endif static const struct i2c_device_id cdce706_id[] = { - { "cdce706", 0 }, + { "cdce706" }, { } }; MODULE_DEVICE_TABLE(i2c, cdce706_id); diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c index e48be7a6c0e2..c51818c1af98 100644 --- a/drivers/clk/clk-cdce925.c +++ b/drivers/clk/clk-cdce925.c @@ -601,7 +601,7 @@ static int cdce925_regulator_enable(struct device *dev, const char *name) /* The CDCE925 uses a funky way to read/write registers. Bulk mode is * just weird, so just use the single byte mode exclusively. */ -static struct regmap_bus regmap_cdce925_bus = { +static const struct regmap_bus regmap_cdce925_bus = { .write = cdce925_regmap_i2c_write, .read = cdce925_regmap_i2c_read, }; diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index 82ae1f26e634..5368d92d9b39 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -218,8 +218,8 @@ static void devm_clk_bulk_release_all_enable(struct device *dev, void *res) clk_bulk_put_all(devres->num_clks, devres->clks); } -int __must_check devm_clk_bulk_get_all_enable(struct device *dev, - struct clk_bulk_data **clks) +int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, + struct clk_bulk_data **clks) { struct clk_bulk_devres *devres; int ret; @@ -244,11 +244,12 @@ int __must_check devm_clk_bulk_get_all_enable(struct device *dev, } else { clk_bulk_put_all(devres->num_clks, devres->clks); devres_free(devres); + return ret; } - return ret; + return devres->num_clks; } -EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable); +EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled); static int devm_clk_match(struct device *dev, void *res, void *data) { diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index a2c2b5203b0a..c1f426b8a504 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -72,6 +72,8 @@ static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, return clk_div_mask(width); if (flags & CLK_DIVIDER_POWER_OF_TWO) return 1 << clk_div_mask(width); + if (flags & CLK_DIVIDER_EVEN_INTEGERS) + return 2 * (clk_div_mask(width) + 1); if (table) return _get_table_maxdiv(table, width); return clk_div_mask(width) + 1; @@ -97,6 +99,8 @@ static unsigned int _get_div(const struct clk_div_table *table, return 1 << val; if (flags & CLK_DIVIDER_MAX_AT_ZERO) return val ? val : clk_div_mask(width) + 1; + if (flags & CLK_DIVIDER_EVEN_INTEGERS) + return 2 * (val + 1); if (table) return _get_table_div(table, val); return val + 1; @@ -122,6 +126,8 @@ static unsigned int _get_val(const struct clk_div_table *table, return __ffs(div); if (flags & CLK_DIVIDER_MAX_AT_ZERO) return (div == clk_div_mask(width) + 1) ? 0 : div; + if (flags & CLK_DIVIDER_EVEN_INTEGERS) + return (div >> 1) - 1; if (table) return _get_table_val(table, div); return div - 1; @@ -538,7 +544,8 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev, struct device_node *np, const char *name, const char *parent_name, const struct clk_hw *parent_hw, const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + void __iomem *reg, u8 shift, u8 width, + unsigned long clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock) { struct clk_divider *div; @@ -610,8 +617,8 @@ EXPORT_SYMBOL_GPL(__clk_hw_register_divider); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, - u8 clk_divider_flags, const struct clk_div_table *table, - spinlock_t *lock) + unsigned long clk_divider_flags, + const struct clk_div_table *table, spinlock_t *lock) { struct clk_hw *hw; @@ -664,7 +671,8 @@ struct clk_hw *__devm_clk_hw_register_divider(struct device *dev, struct device_node *np, const char *name, const char *parent_name, const struct clk_hw *parent_hw, const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + void __iomem *reg, u8 shift, u8 width, + unsigned long clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock) { struct clk_hw **ptr, *hw; diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c index 22fbea61c3dc..e52c5460e927 100644 --- a/drivers/clk/clk-en7523.c +++ b/drivers/clk/clk-en7523.c @@ -3,8 +3,10 @@ #include <linux/delay.h> #include <linux/clk-provider.h> #include <linux/io.h> +#include <linux/mfd/syscon.h> #include <linux/platform_device.h> #include <linux/property.h> +#include <linux/regmap.h> #include <linux/reset-controller.h> #include <dt-bindings/clock/en7523-clk.h> #include <dt-bindings/reset/airoha,en7581-reset.h> @@ -31,19 +33,14 @@ #define REG_RESET_CONTROL_PCIE1 BIT(27) #define REG_RESET_CONTROL_PCIE2 BIT(26) /* EN7581 */ -#define REG_PCIE0_MEM 0x00 -#define REG_PCIE0_MEM_MASK 0x04 -#define REG_PCIE1_MEM 0x08 -#define REG_PCIE1_MEM_MASK 0x0c -#define REG_PCIE2_MEM 0x10 -#define REG_PCIE2_MEM_MASK 0x14 #define REG_NP_SCU_PCIC 0x88 #define REG_NP_SCU_SSTR 0x9c #define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13) #define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11) +#define REG_CRYPTO_CLKSRC2 0x20c -#define REG_RST_CTRL2 0x00 -#define REG_RST_CTRL1 0x04 +#define REG_RST_CTRL2 0x830 +#define REG_RST_CTRL1 0x834 struct en_clk_desc { int id; @@ -79,12 +76,8 @@ struct en_rst_data { struct en_clk_soc_data { const struct clk_ops pcie_ops; - struct { - const u16 *bank_ofs; - const u16 *idx_map; - u16 idx_map_nr; - } reset; - int (*hw_init)(struct platform_device *pdev, void __iomem *np_base); + int (*hw_init)(struct platform_device *pdev, + struct clk_hw_onecell_data *clk_data); }; static const u32 gsw_base[] = { 400000000, 500000000 }; @@ -92,6 +85,10 @@ static const u32 emi_base[] = { 333000000, 400000000 }; static const u32 bus_base[] = { 500000000, 540000000 }; static const u32 slic_base[] = { 100000000, 3125000 }; static const u32 npu_base[] = { 333000000, 400000000, 500000000 }; +/* EN7581 */ +static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 }; +static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 }; +static const u32 crypto_base[] = { 540000000, 480000000 }; static const struct en_clk_desc en7523_base_clks[] = { { @@ -189,6 +186,102 @@ static const struct en_clk_desc en7523_base_clks[] = { } }; +static const struct en_clk_desc en7581_base_clks[] = { + { + .id = EN7523_CLK_GSW, + .name = "gsw", + + .base_reg = REG_GSW_CLK_DIV_SEL, + .base_bits = 1, + .base_shift = 8, + .base_values = gsw_base, + .n_base_values = ARRAY_SIZE(gsw_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + .div_offset = 1, + }, { + .id = EN7523_CLK_EMI, + .name = "emi", + + .base_reg = REG_EMI_CLK_DIV_SEL, + .base_bits = 2, + .base_shift = 8, + .base_values = emi7581_base, + .n_base_values = ARRAY_SIZE(emi7581_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + .div_offset = 1, + }, { + .id = EN7523_CLK_BUS, + .name = "bus", + + .base_reg = REG_BUS_CLK_DIV_SEL, + .base_bits = 1, + .base_shift = 8, + .base_values = bus_base, + .n_base_values = ARRAY_SIZE(bus_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + .div_offset = 1, + }, { + .id = EN7523_CLK_SLIC, + .name = "slic", + + .base_reg = REG_SPI_CLK_FREQ_SEL, + .base_bits = 1, + .base_shift = 0, + .base_values = slic_base, + .n_base_values = ARRAY_SIZE(slic_base), + + .div_reg = REG_SPI_CLK_DIV_SEL, + .div_bits = 5, + .div_shift = 24, + .div_val0 = 20, + .div_step = 2, + }, { + .id = EN7523_CLK_SPI, + .name = "spi", + + .base_reg = REG_SPI_CLK_DIV_SEL, + + .base_value = 400000000, + + .div_bits = 5, + .div_shift = 8, + .div_val0 = 40, + .div_step = 2, + }, { + .id = EN7523_CLK_NPU, + .name = "npu", + + .base_reg = REG_NPU_CLK_DIV_SEL, + .base_bits = 2, + .base_shift = 8, + .base_values = npu7581_base, + .n_base_values = ARRAY_SIZE(npu7581_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + .div_offset = 1, + }, { + .id = EN7523_CLK_CRYPTO, + .name = "crypto", + + .base_reg = REG_CRYPTO_CLKSRC2, + .base_bits = 1, + .base_shift = 0, + .base_values = crypto_base, + .n_base_values = ARRAY_SIZE(crypto_base), + } +}; + static const u16 en7581_rst_ofs[] = { REG_RST_CTRL2, REG_RST_CTRL1, @@ -252,15 +345,11 @@ static const u16 en7581_rst_map[] = { [EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31, }; -static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i) +static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val) { - const struct en_clk_desc *desc = &en7523_base_clks[i]; - u32 val; - if (!desc->base_bits) return desc->base_value; - val = readl(base + desc->base_reg); val >>= desc->base_shift; val &= (1 << desc->base_bits) - 1; @@ -270,16 +359,11 @@ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i) return desc->base_values[val]; } -static u32 en7523_get_div(void __iomem *base, int i) +static u32 en7523_get_div(const struct en_clk_desc *desc, u32 val) { - const struct en_clk_desc *desc = &en7523_base_clks[i]; - u32 reg, val; - if (!desc->div_bits) return 1; - reg = desc->div_reg ? desc->div_reg : desc->base_reg; - val = readl(base + reg); val >>= desc->div_shift; val &= (1 << desc->div_bits) - 1; @@ -412,44 +496,83 @@ static void en7581_pci_disable(struct clk_hw *hw) usleep_range(1000, 2000); } -static int en7581_clk_hw_init(struct platform_device *pdev, - void __iomem *np_base) +static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data, + void __iomem *base, void __iomem *np_base) { - void __iomem *pb_base; - u32 val; + struct clk_hw *hw; + u32 rate; + int i; + + for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) { + const struct en_clk_desc *desc = &en7523_base_clks[i]; + u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg; + u32 val = readl(base + desc->base_reg); - pb_base = devm_platform_ioremap_resource(pdev, 3); - if (IS_ERR(pb_base)) - return PTR_ERR(pb_base); + rate = en7523_get_base_rate(desc, val); + val = readl(base + reg); + rate /= en7523_get_div(desc, val); - val = readl(np_base + REG_NP_SCU_SSTR); - val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK); - writel(val, np_base + REG_NP_SCU_SSTR); - val = readl(np_base + REG_NP_SCU_PCIC); - writel(val | 3, np_base + REG_NP_SCU_PCIC); + hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %ld\n", + desc->name, PTR_ERR(hw)); + continue; + } + + clk_data->hws[desc->id] = hw; + } + + hw = en7523_register_pcie_clk(dev, np_base); + clk_data->hws[EN7523_CLK_PCIE] = hw; + + clk_data->num = EN7523_NUM_CLOCKS; +} + +static int en7523_clk_hw_init(struct platform_device *pdev, + struct clk_hw_onecell_data *clk_data) +{ + void __iomem *base, *np_base; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); - writel(0x20000000, pb_base + REG_PCIE0_MEM); - writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK); - writel(0x24000000, pb_base + REG_PCIE1_MEM); - writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK); - writel(0x28000000, pb_base + REG_PCIE2_MEM); - writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK); + np_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(np_base)) + return PTR_ERR(np_base); + + en7523_register_clocks(&pdev->dev, clk_data, base, np_base); return 0; } -static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data, - void __iomem *base, void __iomem *np_base) +static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data, + struct regmap *map, void __iomem *base) { struct clk_hw *hw; u32 rate; int i; - for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) { - const struct en_clk_desc *desc = &en7523_base_clks[i]; + for (i = 0; i < ARRAY_SIZE(en7581_base_clks); i++) { + const struct en_clk_desc *desc = &en7581_base_clks[i]; + u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg; + int err; + + err = regmap_read(map, desc->base_reg, &val); + if (err) { + pr_err("Failed reading fixed clk rate %s: %d\n", + desc->name, err); + continue; + } + rate = en7523_get_base_rate(desc, val); - rate = en7523_get_base_rate(base, i); - rate /= en7523_get_div(base, i); + err = regmap_read(map, reg, &val); + if (err) { + pr_err("Failed reading fixed clk div %s: %d\n", + desc->name, err); + continue; + } + rate /= en7523_get_div(desc, val); hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate); if (IS_ERR(hw)) { @@ -461,7 +584,7 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat clk_data->hws[desc->id] = hw; } - hw = en7523_register_pcie_clk(dev, np_base); + hw = en7523_register_pcie_clk(dev, base); clk_data->hws[EN7523_CLK_PCIE] = hw; clk_data->num = EN7523_NUM_CLOCKS; @@ -516,38 +639,27 @@ static int en7523_reset_xlate(struct reset_controller_dev *rcdev, return rst_data->idx_map[reset_spec->args[0]]; } -static const struct reset_control_ops en7523_reset_ops = { +static const struct reset_control_ops en7581_reset_ops = { .assert = en7523_reset_assert, .deassert = en7523_reset_deassert, .status = en7523_reset_status, }; -static int en7523_reset_register(struct platform_device *pdev, - const struct en_clk_soc_data *soc_data) +static int en7581_reset_register(struct device *dev, void __iomem *base) { - struct device *dev = &pdev->dev; struct en_rst_data *rst_data; - void __iomem *base; - - /* no reset lines available */ - if (!soc_data->reset.idx_map_nr) - return 0; - - base = devm_platform_ioremap_resource(pdev, 2); - if (IS_ERR(base)) - return PTR_ERR(base); rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL); if (!rst_data) return -ENOMEM; - rst_data->bank_ofs = soc_data->reset.bank_ofs; - rst_data->idx_map = soc_data->reset.idx_map; + rst_data->bank_ofs = en7581_rst_ofs; + rst_data->idx_map = en7581_rst_map; rst_data->base = base; - rst_data->rcdev.nr_resets = soc_data->reset.idx_map_nr; + rst_data->rcdev.nr_resets = ARRAY_SIZE(en7581_rst_map); rst_data->rcdev.of_xlate = en7523_reset_xlate; - rst_data->rcdev.ops = &en7523_reset_ops; + rst_data->rcdev.ops = &en7581_reset_ops; rst_data->rcdev.of_node = dev->of_node; rst_data->rcdev.of_reset_n_cells = 1; rst_data->rcdev.owner = THIS_MODULE; @@ -556,28 +668,38 @@ static int en7523_reset_register(struct platform_device *pdev, return devm_reset_controller_register(dev, &rst_data->rcdev); } -static int en7523_clk_probe(struct platform_device *pdev) +static int en7581_clk_hw_init(struct platform_device *pdev, + struct clk_hw_onecell_data *clk_data) { - struct device_node *node = pdev->dev.of_node; - const struct en_clk_soc_data *soc_data; - struct clk_hw_onecell_data *clk_data; - void __iomem *base, *np_base; - int r; + struct regmap *map; + void __iomem *base; + u32 val; + + map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu"); + if (IS_ERR(map)) + return PTR_ERR(map); base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); - np_base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(np_base)) - return PTR_ERR(np_base); + en7581_register_clocks(&pdev->dev, clk_data, map, base); - soc_data = device_get_match_data(&pdev->dev); - if (soc_data->hw_init) { - r = soc_data->hw_init(pdev, np_base); - if (r) - return r; - } + val = readl(base + REG_NP_SCU_SSTR); + val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK); + writel(val, base + REG_NP_SCU_SSTR); + val = readl(base + REG_NP_SCU_PCIC); + writel(val | 3, base + REG_NP_SCU_PCIC); + + return en7581_reset_register(&pdev->dev, base); +} + +static int en7523_clk_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct en_clk_soc_data *soc_data; + struct clk_hw_onecell_data *clk_data; + int r; clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws, EN7523_NUM_CLOCKS), @@ -585,21 +707,12 @@ static int en7523_clk_probe(struct platform_device *pdev) if (!clk_data) return -ENOMEM; - en7523_register_clocks(&pdev->dev, clk_data, base, np_base); - - r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + soc_data = device_get_match_data(&pdev->dev); + r = soc_data->hw_init(pdev, clk_data); if (r) - return dev_err_probe(&pdev->dev, r, "Could not register clock provider: %s\n", - pdev->name); - - r = en7523_reset_register(pdev, soc_data); - if (r) { - of_clk_del_provider(node); - return dev_err_probe(&pdev->dev, r, "Could not register reset controller: %s\n", - pdev->name); - } + return r; - return 0; + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct en_clk_soc_data en7523_data = { @@ -608,6 +721,7 @@ static const struct en_clk_soc_data en7523_data = { .prepare = en7523_pci_prepare, .unprepare = en7523_pci_unprepare, }, + .hw_init = en7523_clk_hw_init, }; static const struct en_clk_soc_data en7581_data = { @@ -616,11 +730,6 @@ static const struct en_clk_soc_data en7581_data = { .enable = en7581_pci_enable, .disable = en7581_pci_disable, }, - .reset = { - .bank_ofs = en7581_rst_ofs, - .idx_map = en7581_rst_map, - .idx_map_nr = ARRAY_SIZE(en7581_rst_map), - }, .hw_init = en7581_clk_hw_init, }; diff --git a/drivers/clk/clk-eyeq.c b/drivers/clk/clk-eyeq.c new file mode 100644 index 000000000000..640c25788487 --- /dev/null +++ b/drivers/clk/clk-eyeq.c @@ -0,0 +1,859 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PLL clock driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms. + * + * This controller handles: + * - Read-only PLLs, all derived from the same main crystal clock. + * - It also exposes divider clocks, those are children to PLLs. + * - Fixed factor clocks, children to PLLs. + * + * Parent clock is expected to be constant. This driver's registers live in a + * shared region called OLB. Some PLLs and fixed-factors are initialised early + * by of_clk_init(); if so, two clk providers are registered. + * + * We use eqc_ as prefix, as-in "EyeQ Clock", but way shorter. + * + * Copyright (C) 2024 Mobileye Vision Technologies Ltd. + */ + +/* + * Set pr_fmt() for printing from eqc_early_init(). + * It is called at of_clk_init() stage (read: really early). + */ +#define pr_fmt(fmt) "clk-eyeq: " fmt + +#include <linux/array_size.h> +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/overflow.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <dt-bindings/clock/mobileye,eyeq5-clk.h> + +/* In frac mode, it enables fractional noise canceling DAC. Else, no function. */ +#define PCSR0_DAC_EN BIT(0) +/* Fractional or integer mode */ +#define PCSR0_DSM_EN BIT(1) +#define PCSR0_PLL_EN BIT(2) +/* All clocks output held at 0 */ +#define PCSR0_FOUTPOSTDIV_EN BIT(3) +#define PCSR0_POST_DIV1 GENMASK(6, 4) +#define PCSR0_POST_DIV2 GENMASK(9, 7) +#define PCSR0_REF_DIV GENMASK(15, 10) +#define PCSR0_INTIN GENMASK(27, 16) +#define PCSR0_BYPASS BIT(28) +/* Bits 30..29 are reserved */ +#define PCSR0_PLL_LOCKED BIT(31) + +#define PCSR1_RESET BIT(0) +#define PCSR1_SSGC_DIV GENMASK(4, 1) +/* Spread amplitude (% = 0.1 * SPREAD[4:0]) */ +#define PCSR1_SPREAD GENMASK(9, 5) +#define PCSR1_DIS_SSCG BIT(10) +/* Down-spread or center-spread */ +#define PCSR1_DOWN_SPREAD BIT(11) +#define PCSR1_FRAC_IN GENMASK(31, 12) + +struct eqc_pll { + unsigned int index; + const char *name; + unsigned int reg64; +}; + +/* + * Divider clock. Divider is 2*(v+1), with v the register value. + * Min divider is 2, max is 2*(2^width). + */ +struct eqc_div { + unsigned int index; + const char *name; + unsigned int parent; + unsigned int reg; + u8 shift; + u8 width; +}; + +struct eqc_fixed_factor { + unsigned int index; + const char *name; + unsigned int mult; + unsigned int div; + unsigned int parent; +}; + +struct eqc_match_data { + unsigned int pll_count; + const struct eqc_pll *plls; + + unsigned int div_count; + const struct eqc_div *divs; + + unsigned int fixed_factor_count; + const struct eqc_fixed_factor *fixed_factors; + + const char *reset_auxdev_name; + const char *pinctrl_auxdev_name; + + unsigned int early_clk_count; +}; + +struct eqc_early_match_data { + unsigned int early_pll_count; + const struct eqc_pll *early_plls; + + unsigned int early_fixed_factor_count; + const struct eqc_fixed_factor *early_fixed_factors; + + /* + * We want our of_xlate callback to EPROBE_DEFER instead of dev_err() + * and EINVAL. For that, we must know the total clock count. + */ + unsigned int late_clk_count; +}; + +/* + * Both factors (mult and div) must fit in 32 bits. When an operation overflows, + * this function throws away low bits so that factors still fit in 32 bits. + * + * Precision loss depends on amplitude of mult and div. Worst theorical + * loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10. + * This is 1Hz every 4.3GHz. + */ +static void eqc_pll_downshift_factors(unsigned long *mult, unsigned long *div) +{ + unsigned long biggest; + unsigned int shift; + + /* This function can be removed if mult/div switch to unsigned long. */ + static_assert(sizeof_field(struct clk_fixed_factor, mult) == sizeof(unsigned int)); + static_assert(sizeof_field(struct clk_fixed_factor, div) == sizeof(unsigned int)); + + /* No overflow, nothing to be done. */ + if (*mult <= UINT_MAX && *div <= UINT_MAX) + return; + + /* + * Compute the shift required to bring the biggest factor into unsigned + * int range. That is, shift its highest set bit to the unsigned int + * most significant bit. + */ + biggest = max(*mult, *div); + shift = __fls(biggest) - (BITS_PER_BYTE * sizeof(unsigned int)) + 1; + + *mult >>= shift; + *div >>= shift; +} + +static int eqc_pll_parse_registers(u32 r0, u32 r1, unsigned long *mult, + unsigned long *div, unsigned long *acc) +{ + u32 spread; + + if (r0 & PCSR0_BYPASS) { + *mult = 1; + *div = 1; + *acc = 0; + return 0; + } + + if (!(r0 & PCSR0_PLL_LOCKED)) + return -EINVAL; + + *mult = FIELD_GET(PCSR0_INTIN, r0); + *div = FIELD_GET(PCSR0_REF_DIV, r0); + if (r0 & PCSR0_FOUTPOSTDIV_EN) + *div *= FIELD_GET(PCSR0_POST_DIV1, r0) * FIELD_GET(PCSR0_POST_DIV2, r0); + + /* Fractional mode, in 2^20 (0x100000) parts. */ + if (r0 & PCSR0_DSM_EN) { + *div *= (1ULL << 20); + *mult = *mult * (1ULL << 20) + FIELD_GET(PCSR1_FRAC_IN, r1); + } + + if (!*mult || !*div) + return -EINVAL; + + if (r1 & (PCSR1_RESET | PCSR1_DIS_SSCG)) { + *acc = 0; + return 0; + } + + /* + * Spread spectrum. + * + * Spread is 1/1000 parts of frequency, accuracy is half of + * that. To get accuracy, convert to ppb (parts per billion). + * + * acc = spread * 1e6 / 2 + * with acc in parts per billion and, + * spread in parts per thousand. + */ + spread = FIELD_GET(PCSR1_SPREAD, r1); + *acc = spread * 500000; + + if (r1 & PCSR1_DOWN_SPREAD) { + /* + * Downspreading: the central frequency is half a + * spread lower. + */ + *mult *= 2000 - spread; + *div *= 2000; + + /* + * Previous operation might overflow 32 bits. If it + * does, throw away the least amount of low bits. + */ + eqc_pll_downshift_factors(mult, div); + } + + return 0; +} + +static void eqc_probe_init_plls(struct device *dev, const struct eqc_match_data *data, + void __iomem *base, struct clk_hw_onecell_data *cells) +{ + unsigned long mult, div, acc; + const struct eqc_pll *pll; + struct clk_hw *hw; + unsigned int i; + u32 r0, r1; + u64 val; + int ret; + + for (i = 0; i < data->pll_count; i++) { + pll = &data->plls[i]; + + val = readq(base + pll->reg64); + r0 = val; + r1 = val >> 32; + + ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc); + if (ret) { + dev_warn(dev, "failed parsing state of %s\n", pll->name); + cells->hws[pll->index] = ERR_PTR(ret); + continue; + } + + hw = clk_hw_register_fixed_factor_with_accuracy_fwname(dev, + dev->of_node, pll->name, "ref", 0, mult, div, acc); + cells->hws[pll->index] = hw; + if (IS_ERR(hw)) + dev_warn(dev, "failed registering %s: %pe\n", pll->name, hw); + } +} + +static void eqc_probe_init_divs(struct device *dev, const struct eqc_match_data *data, + void __iomem *base, struct clk_hw_onecell_data *cells) +{ + struct clk_parent_data parent_data = { }; + const struct eqc_div *div; + struct clk_hw *parent; + void __iomem *reg; + struct clk_hw *hw; + unsigned int i; + + for (i = 0; i < data->div_count; i++) { + div = &data->divs[i]; + reg = base + div->reg; + parent = cells->hws[div->parent]; + + if (IS_ERR(parent)) { + /* Parent is in early clk provider. */ + parent_data.index = div->parent; + parent_data.hw = NULL; + } else { + /* Avoid clock lookup when we already have the hw reference. */ + parent_data.index = 0; + parent_data.hw = parent; + } + + hw = clk_hw_register_divider_table_parent_data(dev, div->name, + &parent_data, 0, reg, div->shift, div->width, + CLK_DIVIDER_EVEN_INTEGERS, NULL, NULL); + cells->hws[div->index] = hw; + if (IS_ERR(hw)) + dev_warn(dev, "failed registering %s: %pe\n", + div->name, hw); + } +} + +static void eqc_probe_init_fixed_factors(struct device *dev, + const struct eqc_match_data *data, + struct clk_hw_onecell_data *cells) +{ + const struct eqc_fixed_factor *ff; + struct clk_hw *hw, *parent_hw; + unsigned int i; + + for (i = 0; i < data->fixed_factor_count; i++) { + ff = &data->fixed_factors[i]; + parent_hw = cells->hws[ff->parent]; + + if (IS_ERR(parent_hw)) { + /* Parent is in early clk provider. */ + hw = clk_hw_register_fixed_factor_index(dev, ff->name, + ff->parent, 0, ff->mult, ff->div); + } else { + /* Avoid clock lookup when we already have the hw reference. */ + hw = clk_hw_register_fixed_factor_parent_hw(dev, ff->name, + parent_hw, 0, ff->mult, ff->div); + } + + cells->hws[ff->index] = hw; + if (IS_ERR(hw)) + dev_warn(dev, "failed registering %s: %pe\n", + ff->name, hw); + } +} + +static void eqc_auxdev_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + kfree(adev); +} + +static int eqc_auxdev_create(struct device *dev, void __iomem *base, + const char *name, u32 id) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + adev->name = name; + adev->dev.parent = dev; + adev->dev.platform_data = (void __force *)base; + adev->dev.release = eqc_auxdev_release; + adev->id = id; + + ret = auxiliary_device_init(adev); + if (ret) + return ret; + + ret = auxiliary_device_add(adev); + if (ret) + auxiliary_device_uninit(adev); + + return ret; +} + +static int eqc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct eqc_match_data *data; + struct clk_hw_onecell_data *cells; + unsigned int i, clk_count; + struct resource *res; + void __iomem *base; + int ret; + + data = device_get_match_data(dev); + if (!data) + return 0; /* No clocks nor auxdevs, we are done. */ + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + base = ioremap(res->start, resource_size(res)); + if (!base) + return -ENOMEM; + + /* Init optional reset auxiliary device. */ + if (data->reset_auxdev_name) { + ret = eqc_auxdev_create(dev, base, data->reset_auxdev_name, 0); + if (ret) + dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n", + KBUILD_MODNAME, data->reset_auxdev_name, ret); + } + + /* Init optional pinctrl auxiliary device. */ + if (data->pinctrl_auxdev_name) { + ret = eqc_auxdev_create(dev, base, data->pinctrl_auxdev_name, 0); + if (ret) + dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n", + KBUILD_MODNAME, data->pinctrl_auxdev_name, ret); + } + + if (data->pll_count + data->div_count + data->fixed_factor_count == 0) + return 0; /* Zero clocks, we are done. */ + + clk_count = data->pll_count + data->div_count + + data->fixed_factor_count + data->early_clk_count; + cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL); + if (!cells) + return -ENOMEM; + + cells->num = clk_count; + + /* Early PLLs are marked as errors: the early provider will get queried. */ + for (i = 0; i < clk_count; i++) + cells->hws[i] = ERR_PTR(-EINVAL); + + eqc_probe_init_plls(dev, data, base, cells); + + eqc_probe_init_divs(dev, data, base, cells); + + eqc_probe_init_fixed_factors(dev, data, cells); + + return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells); +} + +/* Required early for GIC timer (pll-cpu) and UARTs (pll-per). */ +static const struct eqc_pll eqc_eyeq5_early_plls[] = { + { .index = EQ5C_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C }, + { .index = EQ5C_PLL_PER, .name = "pll-per", .reg64 = 0x05C }, +}; + +static const struct eqc_pll eqc_eyeq5_plls[] = { + { .index = EQ5C_PLL_VMP, .name = "pll-vmp", .reg64 = 0x034 }, + { .index = EQ5C_PLL_PMA, .name = "pll-pma", .reg64 = 0x03C }, + { .index = EQ5C_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 }, + { .index = EQ5C_PLL_DDR0, .name = "pll-ddr0", .reg64 = 0x04C }, + { .index = EQ5C_PLL_PCI, .name = "pll-pci", .reg64 = 0x054 }, + { .index = EQ5C_PLL_PMAC, .name = "pll-pmac", .reg64 = 0x064 }, + { .index = EQ5C_PLL_MPC, .name = "pll-mpc", .reg64 = 0x06C }, + { .index = EQ5C_PLL_DDR1, .name = "pll-ddr1", .reg64 = 0x074 }, +}; + +enum { + /* + * EQ5C_PLL_CPU children. + * EQ5C_PER_OCC_PCI is the last clock exposed in dt-bindings. + */ + EQ5C_CPU_OCC = EQ5C_PER_OCC_PCI + 1, + EQ5C_CPU_SI_CSS0, + EQ5C_CPU_CPC, + EQ5C_CPU_CM, + EQ5C_CPU_MEM, + EQ5C_CPU_OCC_ISRAM, + EQ5C_CPU_ISRAM, + EQ5C_CPU_OCC_DBU, + EQ5C_CPU_SI_DBU_TP, + + /* + * EQ5C_PLL_VDI children. + */ + EQ5C_VDI_OCC_VDI, + EQ5C_VDI_VDI, + EQ5C_VDI_OCC_CAN_SER, + EQ5C_VDI_CAN_SER, + EQ5C_VDI_I2C_SER, + + /* + * EQ5C_PLL_PER children. + */ + EQ5C_PER_PERIPH, + EQ5C_PER_CAN, + EQ5C_PER_TIMER, + EQ5C_PER_CCF, + EQ5C_PER_OCC_MJPEG, + EQ5C_PER_HSM, + EQ5C_PER_MJPEG, + EQ5C_PER_FCMU_A, +}; + +static const struct eqc_fixed_factor eqc_eyeq5_early_fixed_factors[] = { + /* EQ5C_PLL_CPU children */ + { EQ5C_CPU_OCC, "occ-cpu", 1, 1, EQ5C_PLL_CPU }, + { EQ5C_CPU_SI_CSS0, "si-css0", 1, 1, EQ5C_CPU_OCC }, + { EQ5C_CPU_CORE0, "core0", 1, 1, EQ5C_CPU_SI_CSS0 }, + { EQ5C_CPU_CORE1, "core1", 1, 1, EQ5C_CPU_SI_CSS0 }, + { EQ5C_CPU_CORE2, "core2", 1, 1, EQ5C_CPU_SI_CSS0 }, + { EQ5C_CPU_CORE3, "core3", 1, 1, EQ5C_CPU_SI_CSS0 }, + + /* EQ5C_PLL_PER children */ + { EQ5C_PER_OCC, "occ-periph", 1, 16, EQ5C_PLL_PER }, + { EQ5C_PER_UART, "uart", 1, 1, EQ5C_PER_OCC }, +}; + +static const struct eqc_fixed_factor eqc_eyeq5_fixed_factors[] = { + /* EQ5C_PLL_CPU children */ + { EQ5C_CPU_CPC, "cpc", 1, 1, EQ5C_CPU_SI_CSS0 }, + { EQ5C_CPU_CM, "cm", 1, 1, EQ5C_CPU_SI_CSS0 }, + { EQ5C_CPU_MEM, "mem", 1, 1, EQ5C_CPU_SI_CSS0 }, + { EQ5C_CPU_OCC_ISRAM, "occ-isram", 1, 2, EQ5C_PLL_CPU }, + { EQ5C_CPU_ISRAM, "isram", 1, 1, EQ5C_CPU_OCC_ISRAM }, + { EQ5C_CPU_OCC_DBU, "occ-dbu", 1, 10, EQ5C_PLL_CPU }, + { EQ5C_CPU_SI_DBU_TP, "si-dbu-tp", 1, 1, EQ5C_CPU_OCC_DBU }, + + /* EQ5C_PLL_VDI children */ + { EQ5C_VDI_OCC_VDI, "occ-vdi", 1, 2, EQ5C_PLL_VDI }, + { EQ5C_VDI_VDI, "vdi", 1, 1, EQ5C_VDI_OCC_VDI }, + { EQ5C_VDI_OCC_CAN_SER, "occ-can-ser", 1, 16, EQ5C_PLL_VDI }, + { EQ5C_VDI_CAN_SER, "can-ser", 1, 1, EQ5C_VDI_OCC_CAN_SER }, + { EQ5C_VDI_I2C_SER, "i2c-ser", 1, 20, EQ5C_PLL_VDI }, + + /* EQ5C_PLL_PER children */ + { EQ5C_PER_PERIPH, "periph", 1, 1, EQ5C_PER_OCC }, + { EQ5C_PER_CAN, "can", 1, 1, EQ5C_PER_OCC }, + { EQ5C_PER_SPI, "spi", 1, 1, EQ5C_PER_OCC }, + { EQ5C_PER_I2C, "i2c", 1, 1, EQ5C_PER_OCC }, + { EQ5C_PER_TIMER, "timer", 1, 1, EQ5C_PER_OCC }, + { EQ5C_PER_GPIO, "gpio", 1, 1, EQ5C_PER_OCC }, + { EQ5C_PER_EMMC, "emmc-sys", 1, 10, EQ5C_PLL_PER }, + { EQ5C_PER_CCF, "ccf-ctrl", 1, 4, EQ5C_PLL_PER }, + { EQ5C_PER_OCC_MJPEG, "occ-mjpeg", 1, 2, EQ5C_PLL_PER }, + { EQ5C_PER_HSM, "hsm", 1, 1, EQ5C_PER_OCC_MJPEG }, + { EQ5C_PER_MJPEG, "mjpeg", 1, 1, EQ5C_PER_OCC_MJPEG }, + { EQ5C_PER_FCMU_A, "fcmu-a", 1, 20, EQ5C_PLL_PER }, + { EQ5C_PER_OCC_PCI, "occ-pci-sys", 1, 8, EQ5C_PLL_PER }, +}; + +static const struct eqc_div eqc_eyeq5_divs[] = { + { + .index = EQ5C_DIV_OSPI, + .name = "div-ospi", + .parent = EQ5C_PLL_PER, + .reg = 0x11C, + .shift = 0, + .width = 4, + }, +}; + +static const struct eqc_early_match_data eqc_eyeq5_early_match_data __initconst = { + .early_pll_count = ARRAY_SIZE(eqc_eyeq5_early_plls), + .early_plls = eqc_eyeq5_early_plls, + + .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_early_fixed_factors), + .early_fixed_factors = eqc_eyeq5_early_fixed_factors, + + .late_clk_count = ARRAY_SIZE(eqc_eyeq5_plls) + ARRAY_SIZE(eqc_eyeq5_divs) + + ARRAY_SIZE(eqc_eyeq5_fixed_factors), +}; + +static const struct eqc_match_data eqc_eyeq5_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq5_plls), + .plls = eqc_eyeq5_plls, + + .div_count = ARRAY_SIZE(eqc_eyeq5_divs), + .divs = eqc_eyeq5_divs, + + .fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_fixed_factors), + .fixed_factors = eqc_eyeq5_fixed_factors, + + .reset_auxdev_name = "reset", + .pinctrl_auxdev_name = "pinctrl", + + .early_clk_count = ARRAY_SIZE(eqc_eyeq5_early_plls) + + ARRAY_SIZE(eqc_eyeq5_early_fixed_factors), +}; + +static const struct eqc_pll eqc_eyeq6l_plls[] = { + { .index = EQ6LC_PLL_DDR, .name = "pll-ddr", .reg64 = 0x02C }, + { .index = EQ6LC_PLL_CPU, .name = "pll-cpu", .reg64 = 0x034 }, /* also acc */ + { .index = EQ6LC_PLL_PER, .name = "pll-per", .reg64 = 0x03C }, + { .index = EQ6LC_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 }, +}; + +static const struct eqc_match_data eqc_eyeq6l_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq6l_plls), + .plls = eqc_eyeq6l_plls, + + .reset_auxdev_name = "reset", +}; + +static const struct eqc_match_data eqc_eyeq6h_west_match_data = { + .reset_auxdev_name = "reset_west", +}; + +static const struct eqc_pll eqc_eyeq6h_east_plls[] = { + { .index = 0, .name = "pll-east", .reg64 = 0x074 }, +}; + +static const struct eqc_match_data eqc_eyeq6h_east_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq6h_east_plls), + .plls = eqc_eyeq6h_east_plls, + + .reset_auxdev_name = "reset_east", +}; + +static const struct eqc_pll eqc_eyeq6h_south_plls[] = { + { .index = EQ6HC_SOUTH_PLL_VDI, .name = "pll-vdi", .reg64 = 0x000 }, + { .index = EQ6HC_SOUTH_PLL_PCIE, .name = "pll-pcie", .reg64 = 0x008 }, + { .index = EQ6HC_SOUTH_PLL_PER, .name = "pll-per", .reg64 = 0x010 }, + { .index = EQ6HC_SOUTH_PLL_ISP, .name = "pll-isp", .reg64 = 0x018 }, +}; + +static const struct eqc_div eqc_eyeq6h_south_divs[] = { + { + .index = EQ6HC_SOUTH_DIV_EMMC, + .name = "div-emmc", + .parent = EQ6HC_SOUTH_PLL_PER, + .reg = 0x070, + .shift = 4, + .width = 4, + }, + { + .index = EQ6HC_SOUTH_DIV_OSPI_REF, + .name = "div-ospi-ref", + .parent = EQ6HC_SOUTH_PLL_PER, + .reg = 0x090, + .shift = 4, + .width = 4, + }, + { + .index = EQ6HC_SOUTH_DIV_OSPI_SYS, + .name = "div-ospi-sys", + .parent = EQ6HC_SOUTH_PLL_PER, + .reg = 0x090, + .shift = 8, + .width = 1, + }, + { + .index = EQ6HC_SOUTH_DIV_TSU, + .name = "div-tsu", + .parent = EQ6HC_SOUTH_PLL_PCIE, + .reg = 0x098, + .shift = 4, + .width = 8, + }, +}; + +static const struct eqc_match_data eqc_eyeq6h_south_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq6h_south_plls), + .plls = eqc_eyeq6h_south_plls, + + .div_count = ARRAY_SIZE(eqc_eyeq6h_south_divs), + .divs = eqc_eyeq6h_south_divs, +}; + +static const struct eqc_pll eqc_eyeq6h_ddr0_plls[] = { + { .index = 0, .name = "pll-ddr0", .reg64 = 0x074 }, +}; + +static const struct eqc_match_data eqc_eyeq6h_ddr0_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr0_plls), + .plls = eqc_eyeq6h_ddr0_plls, +}; + +static const struct eqc_pll eqc_eyeq6h_ddr1_plls[] = { + { .index = 0, .name = "pll-ddr1", .reg64 = 0x074 }, +}; + +static const struct eqc_match_data eqc_eyeq6h_ddr1_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr1_plls), + .plls = eqc_eyeq6h_ddr1_plls, +}; + +static const struct eqc_pll eqc_eyeq6h_acc_plls[] = { + { .index = EQ6HC_ACC_PLL_XNN, .name = "pll-xnn", .reg64 = 0x040 }, + { .index = EQ6HC_ACC_PLL_VMP, .name = "pll-vmp", .reg64 = 0x050 }, + { .index = EQ6HC_ACC_PLL_PMA, .name = "pll-pma", .reg64 = 0x05C }, + { .index = EQ6HC_ACC_PLL_MPC, .name = "pll-mpc", .reg64 = 0x068 }, + { .index = EQ6HC_ACC_PLL_NOC, .name = "pll-noc", .reg64 = 0x070 }, +}; + +static const struct eqc_match_data eqc_eyeq6h_acc_match_data = { + .pll_count = ARRAY_SIZE(eqc_eyeq6h_acc_plls), + .plls = eqc_eyeq6h_acc_plls, + + .reset_auxdev_name = "reset_acc", +}; + +static const struct of_device_id eqc_match_table[] = { + { .compatible = "mobileye,eyeq5-olb", .data = &eqc_eyeq5_match_data }, + { .compatible = "mobileye,eyeq6l-olb", .data = &eqc_eyeq6l_match_data }, + { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqc_eyeq6h_west_match_data }, + { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqc_eyeq6h_east_match_data }, + { .compatible = "mobileye,eyeq6h-south-olb", .data = &eqc_eyeq6h_south_match_data }, + { .compatible = "mobileye,eyeq6h-ddr0-olb", .data = &eqc_eyeq6h_ddr0_match_data }, + { .compatible = "mobileye,eyeq6h-ddr1-olb", .data = &eqc_eyeq6h_ddr1_match_data }, + { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqc_eyeq6h_acc_match_data }, + {} +}; + +static struct platform_driver eqc_driver = { + .probe = eqc_probe, + .driver = { + .name = "clk-eyeq", + .of_match_table = eqc_match_table, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(eqc_driver); + +/* Required early for GIC timer. */ +static const struct eqc_pll eqc_eyeq6h_central_early_plls[] = { + { .index = EQ6HC_CENTRAL_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C }, +}; + +static const struct eqc_fixed_factor eqc_eyeq6h_central_early_fixed_factors[] = { + { EQ6HC_CENTRAL_CPU_OCC, "occ-cpu", 1, 1, EQ6HC_CENTRAL_PLL_CPU }, +}; + +static const struct eqc_early_match_data eqc_eyeq6h_central_early_match_data __initconst = { + .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_central_early_plls), + .early_plls = eqc_eyeq6h_central_early_plls, + + .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_central_early_fixed_factors), + .early_fixed_factors = eqc_eyeq6h_central_early_fixed_factors, +}; + +/* Required early for UART. */ +static const struct eqc_pll eqc_eyeq6h_west_early_plls[] = { + { .index = EQ6HC_WEST_PLL_PER, .name = "pll-west", .reg64 = 0x074 }, +}; + +static const struct eqc_fixed_factor eqc_eyeq6h_west_early_fixed_factors[] = { + { EQ6HC_WEST_PER_OCC, "west-per-occ", 1, 10, EQ6HC_WEST_PLL_PER }, + { EQ6HC_WEST_PER_UART, "west-per-uart", 1, 1, EQ6HC_WEST_PER_OCC }, +}; + +static const struct eqc_early_match_data eqc_eyeq6h_west_early_match_data __initconst = { + .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_west_early_plls), + .early_plls = eqc_eyeq6h_west_early_plls, + + .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_west_early_fixed_factors), + .early_fixed_factors = eqc_eyeq6h_west_early_fixed_factors, +}; + +static void __init eqc_early_init(struct device_node *np, + const struct eqc_early_match_data *early_data) +{ + struct clk_hw_onecell_data *cells; + unsigned int i, clk_count; + void __iomem *base; + int ret; + + clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count + + early_data->late_clk_count; + cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL); + if (!cells) { + ret = -ENOMEM; + goto err; + } + + cells->num = clk_count; + + /* + * Mark all clocks as deferred; some are registered here, the rest at + * platform device probe. + * + * Once the platform device is probed, its provider will take priority + * when looking up clocks. + */ + for (i = 0; i < clk_count; i++) + cells->hws[i] = ERR_PTR(-EPROBE_DEFER); + + /* Offsets (reg64) of early PLLs are relative to OLB block. */ + base = of_iomap(np, 0); + if (!base) { + ret = -ENODEV; + goto err; + } + + for (i = 0; i < early_data->early_pll_count; i++) { + const struct eqc_pll *pll = &early_data->early_plls[i]; + unsigned long mult, div, acc; + struct clk_hw *hw; + u32 r0, r1; + u64 val; + + val = readq(base + pll->reg64); + r0 = val; + r1 = val >> 32; + + ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc); + if (ret) { + pr_err("failed parsing state of %s\n", pll->name); + goto err; + } + + hw = clk_hw_register_fixed_factor_with_accuracy_fwname(NULL, + np, pll->name, "ref", 0, mult, div, acc); + cells->hws[pll->index] = hw; + if (IS_ERR(hw)) { + pr_err("failed registering %s: %pe\n", pll->name, hw); + ret = PTR_ERR(hw); + goto err; + } + } + + for (i = 0; i < early_data->early_fixed_factor_count; i++) { + const struct eqc_fixed_factor *ff = &early_data->early_fixed_factors[i]; + struct clk_hw *parent_hw = cells->hws[ff->parent]; + struct clk_hw *hw; + + hw = clk_hw_register_fixed_factor_parent_hw(NULL, ff->name, + parent_hw, 0, ff->mult, ff->div); + cells->hws[ff->index] = hw; + if (IS_ERR(hw)) { + pr_err("failed registering %s: %pe\n", ff->name, hw); + ret = PTR_ERR(hw); + goto err; + } + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells); + if (ret) { + pr_err("failed registering clk provider: %d\n", ret); + goto err; + } + + return; + +err: + /* + * We are doomed. The system will not be able to boot. + * + * Let's still try to be good citizens by freeing resources and print + * a last error message that might help debugging. + */ + + pr_err("failed clk init: %d\n", ret); + + if (cells) { + of_clk_del_provider(np); + + for (i = 0; i < early_data->early_pll_count; i++) { + const struct eqc_pll *pll = &early_data->early_plls[i]; + struct clk_hw *hw = cells->hws[pll->index]; + + if (!IS_ERR_OR_NULL(hw)) + clk_hw_unregister_fixed_factor(hw); + } + + kfree(cells); + } +} + +static void __init eqc_eyeq5_early_init(struct device_node *np) +{ + eqc_early_init(np, &eqc_eyeq5_early_match_data); +} +CLK_OF_DECLARE_DRIVER(eqc_eyeq5, "mobileye,eyeq5-olb", eqc_eyeq5_early_init); + +static void __init eqc_eyeq6h_central_early_init(struct device_node *np) +{ + eqc_early_init(np, &eqc_eyeq6h_central_early_match_data); +} +CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_central, "mobileye,eyeq6h-central-olb", + eqc_eyeq6h_central_early_init); + +static void __init eqc_eyeq6h_west_early_init(struct device_node *np) +{ + eqc_early_init(np, &eqc_eyeq6h_west_early_match_data); +} +CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_west, "mobileye,eyeq6h-west-olb", + eqc_eyeq6h_west_early_init); diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 8fba63fc70c5..e62ae8794d44 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -241,6 +241,17 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device * } EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_with_accuracy_fwname); +struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev, + const char *name, unsigned int index, unsigned long flags, + unsigned int mult, unsigned int div) +{ + const struct clk_parent_data pdata = { .index = index }; + + return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, &pdata, + flags, mult, div, 0, 0, false); +} +EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_index); + struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div) diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 5b114043771d..9099c57e2715 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -17,13 +17,15 @@ #include <linux/device.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> /** * DOC: basic gpio gated clock which can be enabled and disabled * with gpio output * Traits of this clock: - * prepare - clk_(un)prepare only ensures parent is (un)prepared - * enable - clk_enable and clk_disable are functional & control gpio + * prepare - clk_(un)prepare are functional and control a gpio that can sleep + * enable - clk_enable and clk_disable are functional & control + * non-sleeping gpio * rate - inherits rate from parent. No clk_set_rate support * parent - fixed parent. No clk_set_parent support */ @@ -199,7 +201,6 @@ static int gpio_clk_driver_probe(struct platform_device *pdev) struct gpio_desc *gpiod; struct clk_hw *hw; bool is_mux; - int ret; is_mux = of_device_is_compatible(node, "gpio-mux-clock"); @@ -211,17 +212,9 @@ static int gpio_clk_driver_probe(struct platform_device *pdev) gpio_name = is_mux ? "select" : "enable"; gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW); - if (IS_ERR(gpiod)) { - ret = PTR_ERR(gpiod); - if (ret == -EPROBE_DEFER) - pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n", - node, __func__); - else - pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n", - node, __func__, - gpio_name); - return ret; - } + if (IS_ERR(gpiod)) + return dev_err_probe(dev, PTR_ERR(gpiod), + "Can't get '%s' named GPIO property\n", gpio_name); if (is_mux) hw = clk_hw_register_gpio_mux(dev, gpiod); @@ -247,3 +240,187 @@ static struct platform_driver gpio_clk_driver = { }, }; builtin_platform_driver(gpio_clk_driver); + +/** + * DOC: gated fixed clock, controlled with a gpio output and a regulator + * Traits of this clock: + * prepare - clk_prepare and clk_unprepare are function & control regulator + * optionally a gpio that can sleep + * enable - clk_enable and clk_disable are functional & control gpio + * rate - rate is fixed and set on clock registration + * parent - fixed clock is a root clock and has no parent + */ + +/** + * struct clk_gated_fixed - Gateable fixed rate clock + * @clk_gpio: instance of clk_gpio for gate-gpio + * @supply: supply regulator + * @rate: fixed rate + */ +struct clk_gated_fixed { + struct clk_gpio clk_gpio; + struct regulator *supply; + unsigned long rate; +}; + +#define to_clk_gated_fixed(_clk_gpio) container_of(_clk_gpio, struct clk_gated_fixed, clk_gpio) + +static unsigned long clk_gated_fixed_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return to_clk_gated_fixed(to_clk_gpio(hw))->rate; +} + +static int clk_gated_fixed_prepare(struct clk_hw *hw) +{ + struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw)); + + if (!clk->supply) + return 0; + + return regulator_enable(clk->supply); +} + +static void clk_gated_fixed_unprepare(struct clk_hw *hw) +{ + struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw)); + + if (!clk->supply) + return; + + regulator_disable(clk->supply); +} + +static int clk_gated_fixed_is_prepared(struct clk_hw *hw) +{ + struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw)); + + if (!clk->supply) + return true; + + return regulator_is_enabled(clk->supply); +} + +/* + * Fixed gated clock with non-sleeping gpio. + * + * Prepare operation turns on the supply regulator + * and the enable operation switches the enable-gpio. + */ +static const struct clk_ops clk_gated_fixed_ops = { + .prepare = clk_gated_fixed_prepare, + .unprepare = clk_gated_fixed_unprepare, + .is_prepared = clk_gated_fixed_is_prepared, + .enable = clk_gpio_gate_enable, + .disable = clk_gpio_gate_disable, + .is_enabled = clk_gpio_gate_is_enabled, + .recalc_rate = clk_gated_fixed_recalc_rate, +}; + +static int clk_sleeping_gated_fixed_prepare(struct clk_hw *hw) +{ + int ret; + + ret = clk_gated_fixed_prepare(hw); + if (ret) + return ret; + + ret = clk_sleeping_gpio_gate_prepare(hw); + if (ret) + clk_gated_fixed_unprepare(hw); + + return ret; +} + +static void clk_sleeping_gated_fixed_unprepare(struct clk_hw *hw) +{ + clk_gated_fixed_unprepare(hw); + clk_sleeping_gpio_gate_unprepare(hw); +} + +/* + * Fixed gated clock with non-sleeping gpio. + * + * Enabling the supply regulator and switching the enable-gpio happens + * both in the prepare step. + * is_prepared only needs to check the gpio state, as toggling the + * gpio is the last step when preparing. + */ +static const struct clk_ops clk_sleeping_gated_fixed_ops = { + .prepare = clk_sleeping_gated_fixed_prepare, + .unprepare = clk_sleeping_gated_fixed_unprepare, + .is_prepared = clk_sleeping_gpio_gate_is_prepared, + .recalc_rate = clk_gated_fixed_recalc_rate, +}; + +static int clk_gated_fixed_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct clk_gated_fixed *clk; + const struct clk_ops *ops; + const char *clk_name; + u32 rate; + int ret; + + clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL); + if (!clk) + return -ENOMEM; + + ret = device_property_read_u32(dev, "clock-frequency", &rate); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clock-frequency\n"); + clk->rate = rate; + + ret = device_property_read_string(dev, "clock-output-names", &clk_name); + if (ret) + clk_name = fwnode_get_name(dev->fwnode); + + clk->supply = devm_regulator_get_optional(dev, "vdd"); + if (IS_ERR(clk->supply)) { + if (PTR_ERR(clk->supply) != -ENODEV) + return dev_err_probe(dev, PTR_ERR(clk->supply), + "Failed to get regulator\n"); + clk->supply = NULL; + } + + clk->clk_gpio.gpiod = devm_gpiod_get_optional(dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(clk->clk_gpio.gpiod)) + return dev_err_probe(dev, PTR_ERR(clk->clk_gpio.gpiod), + "Failed to get gpio\n"); + + if (gpiod_cansleep(clk->clk_gpio.gpiod)) + ops = &clk_sleeping_gated_fixed_ops; + else + ops = &clk_gated_fixed_ops; + + clk->clk_gpio.hw.init = CLK_HW_INIT_NO_PARENT(clk_name, ops, 0); + + /* register the clock */ + ret = devm_clk_hw_register(dev, &clk->clk_gpio.hw); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register clock\n"); + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + &clk->clk_gpio.hw); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register clock provider\n"); + + return 0; +} + +static const struct of_device_id gated_fixed_clk_match_table[] = { + { .compatible = "gated-fixed-clock" }, + { /* sentinel */ } +}; + +static struct platform_driver gated_fixed_clk_driver = { + .probe = clk_gated_fixed_probe, + .driver = { + .name = "gated-fixed-clk", + .of_match_table = gated_fixed_clk_match_table, + }, +}; +builtin_platform_driver(gated_fixed_clk_driver); diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c index 870fd7df50c1..16e0405fe28b 100644 --- a/drivers/clk/clk-lan966x.c +++ b/drivers/clk/clk-lan966x.c @@ -24,13 +24,20 @@ #define DIV_MAX 255 -static const char *clk_names[N_CLOCKS] = { +static const char * const lan966x_clk_names[] = { "qspi0", "qspi1", "qspi2", "sdmmc0", "pi", "mcan0", "mcan1", "flexcom0", "flexcom1", "flexcom2", "flexcom3", "flexcom4", "timer1", "usb_refclk", }; +static const char * const lan969x_clk_names[] = { + "qspi0", "qspi2", "sdmmc0", "sdmmc1", + "mcan0", "mcan1", "flexcom0", + "flexcom1", "flexcom2", "flexcom3", + "timer1", "usb_refclk", +}; + struct lan966x_gck { struct clk_hw hw; void __iomem *reg; @@ -53,7 +60,7 @@ struct clk_gate_soc_desc { int bit_idx; }; -static const struct clk_gate_soc_desc clk_gate_desc[] = { +static const struct clk_gate_soc_desc lan966x_clk_gate_desc[] = { { "uhphs", 11 }, { "udphs", 10 }, { "mcramc", 9 }, @@ -61,6 +68,37 @@ static const struct clk_gate_soc_desc clk_gate_desc[] = { { } }; +static const struct clk_gate_soc_desc lan969x_clk_gate_desc[] = { + { "usb_drd", 10 }, + { "mcramc", 9 }, + { "hmatrix", 8 }, + { } +}; + +struct lan966x_match_data { + char *name; + const char * const *clk_name; + const struct clk_gate_soc_desc *clk_gate_desc; + u8 num_generic_clks; + u8 num_total_clks; +}; + +static struct lan966x_match_data lan966x_desc = { + .name = "lan966x", + .clk_name = lan966x_clk_names, + .clk_gate_desc = lan966x_clk_gate_desc, + .num_total_clks = 18, + .num_generic_clks = 14, +}; + +static struct lan966x_match_data lan969x_desc = { + .name = "lan969x", + .clk_name = lan969x_clk_names, + .clk_gate_desc = lan969x_clk_gate_desc, + .num_total_clks = 15, + .num_generic_clks = 12, +}; + static DEFINE_SPINLOCK(clk_gate_lock); static void __iomem *base; @@ -186,24 +224,26 @@ static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i) }; static int lan966x_gate_clk_register(struct device *dev, + const struct lan966x_match_data *data, struct clk_hw_onecell_data *hw_data, void __iomem *gate_base) { - int i; + for (int i = data->num_generic_clks; i < data->num_total_clks; ++i) { + int idx = i - data->num_generic_clks; + const struct clk_gate_soc_desc *desc; - for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) { - int idx = i - GCK_GATE_UHPHS; + desc = &data->clk_gate_desc[idx]; hw_data->hws[i] = - devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name, - "lan966x", 0, gate_base, - clk_gate_desc[idx].bit_idx, + devm_clk_hw_register_gate(dev, desc->name, + data->name, 0, gate_base, + desc->bit_idx, 0, &clk_gate_lock); if (IS_ERR(hw_data->hws[i])) return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]), "failed to register %s clock\n", - clk_gate_desc[idx].name); + desc->name); } return 0; @@ -211,13 +251,18 @@ static int lan966x_gate_clk_register(struct device *dev, static int lan966x_clk_probe(struct platform_device *pdev) { + const struct lan966x_match_data *data; struct clk_hw_onecell_data *hw_data; struct device *dev = &pdev->dev; void __iomem *gate_base; struct resource *res; int i, ret; - hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS), + data = device_get_match_data(dev); + if (!data) + return -EINVAL; + + hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, data->num_total_clks), GFP_KERNEL); if (!hw_data) return -ENOMEM; @@ -228,10 +273,10 @@ static int lan966x_clk_probe(struct platform_device *pdev) init.ops = &lan966x_gck_ops; - hw_data->num = GCK_GATE_UHPHS; + hw_data->num = data->num_generic_clks; - for (i = 0; i < GCK_GATE_UHPHS; i++) { - init.name = clk_names[i]; + for (i = 0; i < data->num_generic_clks; i++) { + init.name = data->clk_name[i]; hw_data->hws[i] = lan966x_gck_clk_register(dev, i); if (IS_ERR(hw_data->hws[i])) { dev_err(dev, "failed to register %s clock\n", @@ -246,9 +291,9 @@ static int lan966x_clk_probe(struct platform_device *pdev) if (IS_ERR(gate_base)) return PTR_ERR(gate_base); - hw_data->num = N_CLOCKS; + hw_data->num = data->num_total_clks; - ret = lan966x_gate_clk_register(dev, hw_data, gate_base); + ret = lan966x_gate_clk_register(dev, data, hw_data, gate_base); if (ret) return ret; } @@ -257,7 +302,8 @@ static int lan966x_clk_probe(struct platform_device *pdev) } static const struct of_device_id lan966x_clk_dt_ids[] = { - { .compatible = "microchip,lan966x-gck", }, + { .compatible = "microchip,lan966x-gck", .data = &lan966x_desc }, + { .compatible = "microchip,lan9691-gck", .data = &lan969x_desc }, { } }; MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids); diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c index 820bb1e9e3b7..7082b4309c6f 100644 --- a/drivers/clk/clk-loongson2.c +++ b/drivers/clk/clk-loongson2.c @@ -29,8 +29,10 @@ enum loongson2_clk_type { struct loongson2_clk_provider { void __iomem *base; struct device *dev; - struct clk_hw_onecell_data clk_data; spinlock_t clk_lock; /* protect access to DIV registers */ + + /* Must be last --ends in a flexible-array member. */ + struct clk_hw_onecell_data clk_data; }; struct loongson2_clk_data { @@ -304,7 +306,7 @@ static int loongson2_clk_probe(struct platform_device *pdev) return PTR_ERR(clp->base); spin_lock_init(&clp->clk_lock); - clp->clk_data.num = clks_num + 1; + clp->clk_data.num = clks_num; clp->dev = dev; for (i = 0; i < clks_num; i++) { diff --git a/drivers/clk/clk-npcm8xx.c b/drivers/clk/clk-npcm8xx.c new file mode 100644 index 000000000000..2138c011411d --- /dev/null +++ b/drivers/clk/clk-npcm8xx.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Nuvoton NPCM8xx Clock Generator + * All the clocks are initialized by the bootloader, so this driver allows only + * reading of current settings directly from the hardware. + * + * Copyright (C) 2020 Nuvoton Technologies + * Author: Tomer Maimon <tomer.maimon@nuvoton.com> + */ + +#define pr_fmt(fmt) "npcm8xx_clk: " fmt + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/nuvoton,npcm845-clk.h> +#include <soc/nuvoton/clock-npcm8xx.h> + +/* npcm8xx clock registers*/ +#define NPCM8XX_CLKSEL 0x04 +#define NPCM8XX_CLKDIV1 0x08 +#define NPCM8XX_CLKDIV2 0x2C +#define NPCM8XX_CLKDIV3 0x58 +#define NPCM8XX_CLKDIV4 0x7C +#define NPCM8XX_PLLCON0 0x0C +#define NPCM8XX_PLLCON1 0x10 +#define NPCM8XX_PLLCON2 0x54 +#define NPCM8XX_PLLCONG 0x60 +#define NPCM8XX_THRTL_CNT 0xC0 + +#define PLLCON_LOKI BIT(31) +#define PLLCON_LOKS BIT(30) +#define PLLCON_FBDV GENMASK(27, 16) +#define PLLCON_OTDV2 GENMASK(15, 13) +#define PLLCON_PWDEN BIT(12) +#define PLLCON_OTDV1 GENMASK(10, 8) +#define PLLCON_INDV GENMASK(5, 0) + +static void __iomem *clk_base; + +struct npcm8xx_clk_pll { + void __iomem *pllcon; + unsigned int id; + const char *name; + unsigned long flags; + struct clk_hw hw; +}; + +#define to_npcm8xx_clk_pll(_hw) container_of(_hw, struct npcm8xx_clk_pll, hw) + +struct npcm8xx_clk_pll_data { + const char *name; + struct clk_parent_data parent; + unsigned int reg; + unsigned long flags; + struct clk_hw hw; +}; + +struct npcm8xx_clk_div_data { + u32 reg; + u8 shift; + u8 width; + const char *name; + const struct clk_hw *parent_hw; + unsigned long clk_divider_flags; + unsigned long flags; + int onecell_idx; + struct clk_hw hw; +}; + +struct npcm8xx_clk_mux_data { + u8 shift; + u32 mask; + const u32 *table; + const char *name; + const struct clk_parent_data *parent_data; + u8 num_parents; + unsigned long flags; + struct clk_hw hw; +}; + +static struct clk_hw hw_pll1_div2, hw_pll2_div2, hw_gfx_div2, hw_pre_clk; +static struct npcm8xx_clk_pll_data npcm8xx_pll_clks[] = { + { "pll0", { .index = 0 }, NPCM8XX_PLLCON0, 0 }, + { "pll1", { .index = 0 }, NPCM8XX_PLLCON1, 0 }, + { "pll2", { .index = 0 }, NPCM8XX_PLLCON2, 0 }, + { "pll_gfx", { .index = 0 }, NPCM8XX_PLLCONG, 0 }, +}; + +static const u32 cpuck_mux_table[] = { 0, 1, 2, 7 }; +static const struct clk_parent_data cpuck_mux_parents[] = { + { .hw = &npcm8xx_pll_clks[0].hw }, + { .hw = &npcm8xx_pll_clks[1].hw }, + { .index = 0 }, + { .hw = &npcm8xx_pll_clks[2].hw } +}; + +static const u32 pixcksel_mux_table[] = { 0, 2 }; +static const struct clk_parent_data pixcksel_mux_parents[] = { + { .hw = &npcm8xx_pll_clks[3].hw }, + { .index = 0 } +}; + +static const u32 default_mux_table[] = { 0, 1, 2, 3 }; +static const struct clk_parent_data default_mux_parents[] = { + { .hw = &npcm8xx_pll_clks[0].hw }, + { .hw = &npcm8xx_pll_clks[1].hw }, + { .index = 0 }, + { .hw = &hw_pll2_div2 } +}; + +static const u32 sucksel_mux_table[] = { 2, 3 }; +static const struct clk_parent_data sucksel_mux_parents[] = { + { .index = 0 }, + { .hw = &hw_pll2_div2 } +}; + +static const u32 mccksel_mux_table[] = { 0, 2 }; +static const struct clk_parent_data mccksel_mux_parents[] = { + { .hw = &hw_pll1_div2 }, + { .index = 0 } +}; + +static const u32 clkoutsel_mux_table[] = { 0, 1, 2, 3, 4 }; +static const struct clk_parent_data clkoutsel_mux_parents[] = { + { .hw = &npcm8xx_pll_clks[0].hw }, + { .hw = &npcm8xx_pll_clks[1].hw }, + { .index = 0 }, + { .hw = &hw_gfx_div2 }, + { .hw = &hw_pll2_div2 } +}; + +static const u32 gfxmsel_mux_table[] = { 2, 3 }; +static const struct clk_parent_data gfxmsel_mux_parents[] = { + { .index = 0 }, + { .hw = &npcm8xx_pll_clks[2].hw } +}; + +static const u32 dvcssel_mux_table[] = { 2, 3 }; +static const struct clk_parent_data dvcssel_mux_parents[] = { + { .index = 0 }, + { .hw = &npcm8xx_pll_clks[2].hw } +}; + +static const u32 default3_mux_table[] = { 0, 1, 2 }; +static const struct clk_parent_data default3_mux_parents[] = { + { .hw = &npcm8xx_pll_clks[0].hw }, + { .hw = &npcm8xx_pll_clks[1].hw }, + { .index = 0 } +}; + +static struct npcm8xx_clk_mux_data npcm8xx_muxes[] = { + { 0, 3, cpuck_mux_table, "cpu_mux", cpuck_mux_parents, + ARRAY_SIZE(cpuck_mux_parents), CLK_IS_CRITICAL }, + { 4, 2, pixcksel_mux_table, "gfx_pixel_mux", pixcksel_mux_parents, + ARRAY_SIZE(pixcksel_mux_parents), 0 }, + { 6, 2, default_mux_table, "sd_mux", default_mux_parents, + ARRAY_SIZE(default_mux_parents), 0 }, + { 8, 2, default_mux_table, "uart_mux", default_mux_parents, + ARRAY_SIZE(default_mux_parents), 0 }, + { 10, 2, sucksel_mux_table, "serial_usb_mux", sucksel_mux_parents, + ARRAY_SIZE(sucksel_mux_parents), 0 }, + { 12, 2, mccksel_mux_table, "mc_mux", mccksel_mux_parents, + ARRAY_SIZE(mccksel_mux_parents), 0 }, + { 14, 2, default_mux_table, "adc_mux", default_mux_parents, + ARRAY_SIZE(default_mux_parents), 0 }, + { 16, 2, default_mux_table, "gfx_mux", default_mux_parents, + ARRAY_SIZE(default_mux_parents), 0 }, + { 18, 3, clkoutsel_mux_table, "clkout_mux", clkoutsel_mux_parents, + ARRAY_SIZE(clkoutsel_mux_parents), 0 }, + { 21, 2, gfxmsel_mux_table, "gfxm_mux", gfxmsel_mux_parents, + ARRAY_SIZE(gfxmsel_mux_parents), 0 }, + { 23, 2, dvcssel_mux_table, "dvc_mux", dvcssel_mux_parents, + ARRAY_SIZE(dvcssel_mux_parents), 0 }, + { 25, 2, default3_mux_table, "rg_mux", default3_mux_parents, + ARRAY_SIZE(default3_mux_parents), 0 }, + { 27, 2, default3_mux_table, "rcp_mux", default3_mux_parents, + ARRAY_SIZE(default3_mux_parents), 0 }, +}; + +/* configurable pre dividers: */ +static struct npcm8xx_clk_div_data npcm8xx_pre_divs[] = { + { NPCM8XX_CLKDIV1, 21, 5, "pre_adc", &npcm8xx_muxes[6].hw, CLK_DIVIDER_READ_ONLY, 0, -1 }, + { NPCM8XX_CLKDIV1, 26, 2, "ahb", &hw_pre_clk, CLK_DIVIDER_READ_ONLY, CLK_IS_CRITICAL, NPCM8XX_CLK_AHB }, +}; + +/* configurable dividers: */ +static struct npcm8xx_clk_div_data npcm8xx_divs[] = { + { NPCM8XX_CLKDIV1, 28, 3, "adc", &npcm8xx_pre_divs[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_ADC }, + { NPCM8XX_CLKDIV1, 16, 5, "uart", &npcm8xx_muxes[3].hw, 0, 0, NPCM8XX_CLK_UART }, + { NPCM8XX_CLKDIV1, 11, 5, "mmc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_MMC }, + { NPCM8XX_CLKDIV1, 6, 5, "spi3", &npcm8xx_pre_divs[1].hw, 0, 0, NPCM8XX_CLK_SPI3 }, + { NPCM8XX_CLKDIV1, 2, 4, "pci", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_PCI }, + + { NPCM8XX_CLKDIV2, 30, 2, "apb4", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB4 }, + { NPCM8XX_CLKDIV2, 28, 2, "apb3", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB3 }, + { NPCM8XX_CLKDIV2, 26, 2, "apb2", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB2 }, + { NPCM8XX_CLKDIV2, 24, 2, "apb1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB1 }, + { NPCM8XX_CLKDIV2, 22, 2, "apb5", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB5 }, + { NPCM8XX_CLKDIV2, 16, 5, "clkout", &npcm8xx_muxes[8].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_CLKOUT }, + { NPCM8XX_CLKDIV2, 13, 3, "gfx", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_GFX }, + { NPCM8XX_CLKDIV2, 8, 5, "usb_bridge", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU }, + { NPCM8XX_CLKDIV2, 4, 4, "usb_host", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU48 }, + { NPCM8XX_CLKDIV2, 0, 4, "sdhc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SDHC }, + + { NPCM8XX_CLKDIV3, 16, 8, "spi1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI1 }, + { NPCM8XX_CLKDIV3, 11, 5, "uart2", &npcm8xx_muxes[3].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_UART2 }, + { NPCM8XX_CLKDIV3, 6, 5, "spi0", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI0 }, + { NPCM8XX_CLKDIV3, 1, 5, "spix", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPIX }, + + { NPCM8XX_CLKDIV4, 28, 4, "rg", &npcm8xx_muxes[11].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RG }, + { NPCM8XX_CLKDIV4, 12, 4, "rcp", &npcm8xx_muxes[12].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RCP }, + + { NPCM8XX_THRTL_CNT, 0, 2, "th", &npcm8xx_muxes[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_TH }, +}; + +static unsigned long npcm8xx_clk_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct npcm8xx_clk_pll *pll = to_npcm8xx_clk_pll(hw); + unsigned long fbdv, indv, otdv1, otdv2; + unsigned int val; + u64 ret; + + if (parent_rate == 0) { + pr_debug("%s: parent rate is zero\n", __func__); + return 0; + } + + val = readl_relaxed(pll->pllcon); + + indv = FIELD_GET(PLLCON_INDV, val); + fbdv = FIELD_GET(PLLCON_FBDV, val); + otdv1 = FIELD_GET(PLLCON_OTDV1, val); + otdv2 = FIELD_GET(PLLCON_OTDV2, val); + + ret = (u64)parent_rate * fbdv; + do_div(ret, indv * otdv1 * otdv2); + + return ret; +} + +static const struct clk_ops npcm8xx_clk_pll_ops = { + .recalc_rate = npcm8xx_clk_pll_recalc_rate, +}; + +static struct clk_hw * +npcm8xx_clk_register_pll(struct device *dev, void __iomem *pllcon, + const char *name, const struct clk_parent_data *parent, + unsigned long flags) +{ + struct npcm8xx_clk_pll *pll; + struct clk_init_data init = {}; + int ret; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &npcm8xx_clk_pll_ops; + init.parent_data = parent; + init.num_parents = 1; + init.flags = flags; + + pll->pllcon = pllcon; + pll->hw.init = &init; + + ret = devm_clk_hw_register(dev, &pll->hw); + if (ret) + return ERR_PTR(ret); + + return &pll->hw; +} + +static DEFINE_SPINLOCK(npcm8xx_clk_lock); + +static int npcm8xx_clk_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct npcm_clock_adev *rdev = to_npcm_clock_adev(adev); + struct clk_hw_onecell_data *npcm8xx_clk_data; + struct device *dev = &adev->dev; + struct clk_hw *hw; + unsigned int i; + + npcm8xx_clk_data = devm_kzalloc(dev, struct_size(npcm8xx_clk_data, hws, + NPCM8XX_NUM_CLOCKS), + GFP_KERNEL); + if (!npcm8xx_clk_data) + return -ENOMEM; + + clk_base = rdev->base; + + npcm8xx_clk_data->num = NPCM8XX_NUM_CLOCKS; + + for (i = 0; i < NPCM8XX_NUM_CLOCKS; i++) + npcm8xx_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); + + /* Register plls */ + for (i = 0; i < ARRAY_SIZE(npcm8xx_pll_clks); i++) { + struct npcm8xx_clk_pll_data *pll_clk = &npcm8xx_pll_clks[i]; + + hw = npcm8xx_clk_register_pll(dev, clk_base + pll_clk->reg, + pll_clk->name, &pll_clk->parent, + pll_clk->flags); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll\n"); + pll_clk->hw = *hw; + } + + /* Register fixed dividers */ + hw = devm_clk_hw_register_fixed_factor(dev, "pll1_div2", "pll1", 0, 1, 2); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register fixed div\n"); + hw_pll1_div2 = *hw; + + hw = devm_clk_hw_register_fixed_factor(dev, "pll2_div2", "pll2", 0, 1, 2); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll2 div2\n"); + hw_pll2_div2 = *hw; + + hw = devm_clk_hw_register_fixed_factor(dev, "pll_gfx_div2", "pll_gfx", 0, 1, 2); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register gfx div2\n"); + hw_gfx_div2 = *hw; + + /* Register muxes */ + for (i = 0; i < ARRAY_SIZE(npcm8xx_muxes); i++) { + struct npcm8xx_clk_mux_data *mux_data = &npcm8xx_muxes[i]; + + hw = devm_clk_hw_register_mux_parent_data_table(dev, + mux_data->name, + mux_data->parent_data, + mux_data->num_parents, + mux_data->flags, + clk_base + NPCM8XX_CLKSEL, + mux_data->shift, + mux_data->mask, + 0, + mux_data->table, + &npcm8xx_clk_lock); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register mux\n"); + mux_data->hw = *hw; + } + + hw = devm_clk_hw_register_fixed_factor(dev, "pre_clk", "cpu_mux", 0, 1, 2); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre clk div2\n"); + hw_pre_clk = *hw; + + hw = devm_clk_hw_register_fixed_factor(dev, "axi", "th", 0, 1, 2); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register axi div2\n"); + npcm8xx_clk_data->hws[NPCM8XX_CLK_AXI] = hw; + + hw = devm_clk_hw_register_fixed_factor(dev, "atb", "axi", 0, 1, 2); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register atb div2\n"); + npcm8xx_clk_data->hws[NPCM8XX_CLK_ATB] = hw; + + /* Register pre dividers */ + for (i = 0; i < ARRAY_SIZE(npcm8xx_pre_divs); i++) { + struct npcm8xx_clk_div_data *div_data = &npcm8xx_pre_divs[i]; + + hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name, + div_data->parent_hw, + div_data->flags, + clk_base + div_data->reg, + div_data->shift, + div_data->width, + div_data->clk_divider_flags, + &npcm8xx_clk_lock); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre div\n"); + div_data->hw = *hw; + + if (div_data->onecell_idx >= 0) + npcm8xx_clk_data->hws[div_data->onecell_idx] = hw; + } + + /* Register dividers */ + for (i = 0; i < ARRAY_SIZE(npcm8xx_divs); i++) { + struct npcm8xx_clk_div_data *div_data = &npcm8xx_divs[i]; + + hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name, + div_data->parent_hw, + div_data->flags, + clk_base + div_data->reg, + div_data->shift, + div_data->width, + div_data->clk_divider_flags, + &npcm8xx_clk_lock); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), "Can't register div\n"); + + if (div_data->onecell_idx >= 0) + npcm8xx_clk_data->hws[div_data->onecell_idx] = hw; + } + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + npcm8xx_clk_data); +} + +static const struct auxiliary_device_id npcm8xx_clock_ids[] = { + { + .name = "reset_npcm.clk-npcm8xx", + }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, npcm8xx_clock_ids); + +static struct auxiliary_driver npcm8xx_clock_driver = { + .probe = npcm8xx_clk_probe, + .id_table = npcm8xx_clock_ids, +}; +module_auxiliary_driver(npcm8xx_clock_driver); + +MODULE_DESCRIPTION("Clock driver for Nuvoton NPCM8XX BMC SoC"); +MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 4dcde305944c..a560edeb4b55 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <dt-bindings/clock/fsl,qoriq-clockgen.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> @@ -1065,11 +1066,8 @@ static void __init _clockgen_init(struct device_node *np, bool legacy); static void __init legacy_init_clockgen(struct device_node *np) { if (!clockgen.node) { - struct device_node *parent_np; - - parent_np = of_get_parent(np); + struct device_node *parent_np __free(device_node) = of_get_parent(np); _clockgen_init(parent_np, true); - of_node_put(parent_np); } } diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c index 6ee148e5469d..1127c35ce57d 100644 --- a/drivers/clk/clk-si514.c +++ b/drivers/clk/clk-si514.c @@ -371,7 +371,7 @@ static int si514_probe(struct i2c_client *client) } static const struct i2c_device_id si514_id[] = { - { "si514", 0 }, + { "si514" }, { } }; MODULE_DEVICE_TABLE(i2c, si514_id); diff --git a/drivers/clk/clk-twl.c b/drivers/clk/clk-twl.c index eab9d3c8ed8a..20bc3bf8fd62 100644 --- a/drivers/clk/clk-twl.c +++ b/drivers/clk/clk-twl.c @@ -11,13 +11,29 @@ #include <linux/platform_device.h> #include <linux/slab.h> -#define VREG_STATE 2 +#define VREG_STATE 2 +#define VREG_GRP 0 #define TWL6030_CFG_STATE_OFF 0x00 #define TWL6030_CFG_STATE_ON 0x01 #define TWL6030_CFG_STATE_MASK 0x03 +#define TWL6030_CFG_STATE_GRP_SHIFT 5 +#define TWL6030_CFG_STATE_APP_SHIFT 2 +#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) +#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ + TWL6030_CFG_STATE_APP_SHIFT) +#define P1_GRP BIT(0) /* processor power group */ +#define P2_GRP BIT(1) +#define P3_GRP BIT(2) +#define ALL_GRP (P1_GRP | P2_GRP | P3_GRP) + +enum twl_type { + TWL_TYPE_6030, + TWL_TYPE_6032, +}; struct twl_clock_info { struct device *dev; + enum twl_type type; u8 base; struct clk_hw hw; }; @@ -56,14 +72,21 @@ static unsigned long twl_clks_recalc_rate(struct clk_hw *hw, static int twl6032_clks_prepare(struct clk_hw *hw) { struct twl_clock_info *cinfo = to_twl_clks_info(hw); - int ret; - ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE, - TWL6030_CFG_STATE_ON); - if (ret < 0) - dev_err(cinfo->dev, "clk prepare failed\n"); + if (cinfo->type == TWL_TYPE_6030) { + int grp; + + grp = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_GRP); + if (grp < 0) + return grp; - return ret; + return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE, + grp << TWL6030_CFG_STATE_GRP_SHIFT | + TWL6030_CFG_STATE_ON); + } + + return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE, + TWL6030_CFG_STATE_ON); } static void twl6032_clks_unprepare(struct clk_hw *hw) @@ -71,32 +94,21 @@ static void twl6032_clks_unprepare(struct clk_hw *hw) struct twl_clock_info *cinfo = to_twl_clks_info(hw); int ret; - ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE, - TWL6030_CFG_STATE_OFF); + if (cinfo->type == TWL_TYPE_6030) + ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE, + ALL_GRP << TWL6030_CFG_STATE_GRP_SHIFT | + TWL6030_CFG_STATE_OFF); + else + ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE, + TWL6030_CFG_STATE_OFF); + if (ret < 0) dev_err(cinfo->dev, "clk unprepare failed\n"); } -static int twl6032_clks_is_prepared(struct clk_hw *hw) -{ - struct twl_clock_info *cinfo = to_twl_clks_info(hw); - int val; - - val = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE); - if (val < 0) { - dev_err(cinfo->dev, "clk read failed\n"); - return val; - } - - val &= TWL6030_CFG_STATE_MASK; - - return val == TWL6030_CFG_STATE_ON; -} - static const struct clk_ops twl6032_clks_ops = { .prepare = twl6032_clks_prepare, .unprepare = twl6032_clks_unprepare, - .is_prepared = twl6032_clks_is_prepared, .recalc_rate = twl_clks_recalc_rate, }; @@ -155,6 +167,7 @@ static int twl_clks_probe(struct platform_device *pdev) for (i = 0; i < count; i++) { cinfo[i].base = hw_data[i].base; cinfo[i].dev = &pdev->dev; + cinfo[i].type = platform_get_device_id(pdev)->driver_data; cinfo[i].hw.init = &hw_data[i].init; ret = devm_clk_hw_register(&pdev->dev, &cinfo[i].hw); if (ret) { @@ -176,7 +189,11 @@ static int twl_clks_probe(struct platform_device *pdev) static const struct platform_device_id twl_clks_id[] = { { + .name = "twl6030-clk", + .driver_data = TWL_TYPE_6030, + }, { .name = "twl6032-clk", + .driver_data = TWL_TYPE_6032, }, { /* sentinel */ } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d02451f951cf..bdc6e5b90da5 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -608,12 +608,6 @@ bool clk_hw_is_prepared(const struct clk_hw *hw) } EXPORT_SYMBOL_GPL(clk_hw_is_prepared); -bool clk_hw_rate_is_protected(const struct clk_hw *hw) -{ - return clk_core_rate_is_protected(hw->core); -} -EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); - bool clk_hw_is_enabled(const struct clk_hw *hw) { return clk_core_is_enabled(hw->core); @@ -2536,7 +2530,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core, rate = clk_core_req_round_rate_nolock(core, req_rate); /* bail early if nothing to do */ - if (rate == clk_core_get_rate_nolock(core)) + if (rate == clk_core_get_rate_recalc(core)) return 0; /* fail on a direct rate set of a protected provider */ diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c index 52fd25594c96..68a28e70bb61 100644 --- a/drivers/clk/clk_kunit_helpers.c +++ b/drivers/clk/clk_kunit_helpers.c @@ -203,5 +203,35 @@ int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struc } EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit); +KUNIT_DEFINE_ACTION_WRAPPER(of_clk_del_provider_wrapper, + of_clk_del_provider, struct device_node *); + +/** + * of_clk_add_hw_provider_kunit() - Test managed of_clk_add_hw_provider() + * @test: The test context + * @np: Device node pointer associated with clock provider + * @get: Callback for decoding clk_hw + * @data: Context pointer for @get callback. + * + * Just like of_clk_add_hw_provider(), except the clk_hw provider is managed by + * the test case and is automatically unregistered after the test case + * concludes. + * + * Return: 0 on success or a negative errno value on failure. + */ +int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data), + void *data) +{ + int ret; + + ret = of_clk_add_hw_provider(np, get, data); + if (ret) + return ret; + + return kunit_add_action_or_reset(test, of_clk_del_provider_wrapper, np); +} +EXPORT_SYMBOL_GPL(of_clk_add_hw_provider_kunit); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers"); diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c index aa3ddcfc00eb..f08feeaa3750 100644 --- a/drivers/clk/clk_test.c +++ b/drivers/clk/clk_test.c @@ -4,6 +4,7 @@ */ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/clk/clk-conf.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -15,6 +16,7 @@ #include <kunit/platform_device.h> #include <kunit/test.h> +#include "kunit_clk_assigned_rates.h" #include "clk_parent_data_test.h" static const struct clk_ops empty_clk_ops = { }; @@ -3075,7 +3077,326 @@ static struct kunit_suite clk_register_clk_parent_data_device_suite = { .test_cases = clk_register_clk_parent_data_device_test_cases, }; +struct clk_assigned_rates_context { + struct clk_dummy_context clk0; + struct clk_dummy_context clk1; +}; + +/* + * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test + * @desc: Test description + * @overlay_begin: Pointer to start of DT overlay to apply for test + * @overlay_end: Pointer to end of DT overlay to apply for test + * @rate0: Initial rate of first clk + * @rate1: Initial rate of second clk + * @consumer_test: true if a consumer is being tested + */ +struct clk_assigned_rates_test_param { + const char *desc; + u8 *overlay_begin; + u8 *overlay_end; + unsigned long rate0; + unsigned long rate1; + bool consumer_test; +}; + +#define TEST_PARAM_OVERLAY(overlay_name) \ + .overlay_begin = of_overlay_begin(overlay_name), \ + .overlay_end = of_overlay_end(overlay_name) + +static void +clk_assigned_rates_register_clk(struct kunit *test, + struct clk_dummy_context *ctx, + struct device_node *np, const char *name, + unsigned long rate) +{ + struct clk_init_data init = { }; + + init.name = name; + init.ops = &clk_dummy_rate_ops; + ctx->hw.init = &init; + ctx->rate = rate; + + KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw)); + KUNIT_ASSERT_EQ(test, ctx->rate, rate); +} + +/* + * Does most of the work of the test: + * + * 1. Apply the overlay to test + * 2. Register the clk or clks to test + * 3. Register the clk provider + * 4. Apply clk defaults to the consumer device if this is a consumer test + * + * The tests will set different test_param values to test different scenarios + * and validate that in their test functions. + */ +static int clk_assigned_rates_test_init(struct kunit *test) +{ + struct device_node *np, *consumer; + struct clk_hw_onecell_data *data; + struct clk_assigned_rates_context *ctx; + u32 clk_cells; + const struct clk_assigned_rates_test_param *test_param; + + test_param = test->param_value; + + KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test, + test_param->overlay_begin, + test_param->overlay_end)); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL)); + test->priv = ctx; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, + np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates")); + of_node_put_kunit(test, np); + + KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells)); + /* Only support #clock-cells = <0> or <1> */ + KUNIT_ASSERT_LT(test, clk_cells, 2); + + clk_assigned_rates_register_clk(test, &ctx->clk0, np, + "test_assigned_rate0", test_param->rate0); + if (clk_cells == 0) { + KUNIT_ASSERT_EQ(test, 0, + of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get, + &ctx->clk0.hw)); + } else if (clk_cells == 1) { + clk_assigned_rates_register_clk(test, &ctx->clk1, np, + "test_assigned_rate1", test_param->rate1); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, + data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL)); + data->num = 2; + data->hws[0] = &ctx->clk0.hw; + data->hws[1] = &ctx->clk1.hw; + + KUNIT_ASSERT_EQ(test, 0, + of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data)); + } + + /* Consumers are optional */ + if (test_param->consumer_test) { + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, + consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer")); + of_node_put_kunit(test, consumer); + + KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false)); + } + + return 0; +} + +static void clk_assigned_rates_assigns_one(struct kunit *test) +{ + struct clk_assigned_rates_context *ctx = test->priv; + + KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE); +} + +static void clk_assigned_rates_assigns_multiple(struct kunit *test) +{ + struct clk_assigned_rates_context *ctx = test->priv; + + KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE); + KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE); +} + +static void clk_assigned_rates_skips(struct kunit *test) +{ + struct clk_assigned_rates_context *ctx = test->priv; + const struct clk_assigned_rates_test_param *test_param = test->param_value; + + KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE); + KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0); +} + +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer); + +/* Test cases that assign one rate */ +static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = { + { + /* + * Test that a single cell assigned-clock-rates property + * assigns the rate when the property is in the provider. + */ + .desc = "provider assigns", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one), + }, + { + /* + * Test that a single cell assigned-clock-rates property + * assigns the rate when the property is in the consumer. + */ + .desc = "consumer assigns", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer), + .consumer_test = true, + }, + { + /* + * Test that a single cell assigned-clock-rates-u64 property + * assigns the rate when the property is in the provider. + */ + .desc = "provider assigns u64", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one), + }, + { + /* + * Test that a single cell assigned-clock-rates-u64 property + * assigns the rate when the property is in the consumer. + */ + .desc = "consumer assigns u64", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer), + .consumer_test = true, + }, +}; +KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one, + clk_assigned_rates_assigns_one_test_params, desc) + +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer); + +/* Test cases that assign multiple rates */ +static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = { + { + /* + * Test that a multiple cell assigned-clock-rates property + * assigns the rates when the property is in the provider. + */ + .desc = "provider assigns", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple), + }, + { + /* + * Test that a multiple cell assigned-clock-rates property + * assigns the rates when the property is in the consumer. + */ + .desc = "consumer assigns", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer), + .consumer_test = true, + }, + { + /* + * Test that a single cell assigned-clock-rates-u64 property + * assigns the rate when the property is in the provider. + */ + .desc = "provider assigns u64", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple), + }, + { + /* + * Test that a multiple cell assigned-clock-rates-u64 property + * assigns the rates when the property is in the consumer. + */ + .desc = "consumer assigns u64", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer), + .consumer_test = true, + }, +}; +KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple, + clk_assigned_rates_assigns_multiple_test_params, + desc) + +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null); +OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer); + +/* Test cases that skip changing the rate due to malformed DT */ +static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = { + { + /* + * Test that an assigned-clock-rates property without an assigned-clocks + * property fails when the property is in the provider. + */ + .desc = "provider missing assigned-clocks", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without), + .rate0 = 3000, + }, + { + /* + * Test that an assigned-clock-rates property without an assigned-clocks + * property fails when the property is in the consumer. + */ + .desc = "consumer missing assigned-clocks", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer), + .rate0 = 3000, + .consumer_test = true, + }, + { + /* + * Test that an assigned-clock-rates property of zero doesn't + * set a rate when the property is in the provider. + */ + .desc = "provider assigned-clock-rates of zero", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero), + .rate0 = 3000, + }, + { + /* + * Test that an assigned-clock-rates property of zero doesn't + * set a rate when the property is in the consumer. + */ + .desc = "consumer assigned-clock-rates of zero", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer), + .rate0 = 3000, + .consumer_test = true, + }, + { + /* + * Test that an assigned-clocks property with a null phandle + * doesn't set a rate when the property is in the provider. + */ + .desc = "provider assigned-clocks null phandle", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null), + .rate0 = 3000, + }, + { + /* + * Test that an assigned-clocks property with a null phandle + * doesn't set a rate when the property is in the consumer. + */ + .desc = "provider assigned-clocks null phandle", + TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer), + .rate0 = 3000, + .consumer_test = true, + }, +}; +KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips, + clk_assigned_rates_skips_test_params, + desc) + +static struct kunit_case clk_assigned_rates_test_cases[] = { + KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one, + clk_assigned_rates_assigns_one_gen_params), + KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple, + clk_assigned_rates_assigns_multiple_gen_params), + KUNIT_CASE_PARAM(clk_assigned_rates_skips, + clk_assigned_rates_skips_gen_params), + {} +}; + +/* + * Test suite for assigned-clock-rates{-u64} DT property. + */ +static struct kunit_suite clk_assigned_rates_suite = { + .name = "clk_assigned_rates", + .test_cases = clk_assigned_rates_test_cases, + .init = clk_assigned_rates_test_init, +}; + kunit_test_suites( + &clk_assigned_rates_suite, &clk_leaf_mux_set_rate_parent_test_suite, &clk_test_suite, &clk_multiple_parents_mux_test_suite, diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c index 591e0364ee5c..85771afd4698 100644 --- a/drivers/clk/imx/clk-fracn-gppll.c +++ b/drivers/clk/imx/clk-fracn-gppll.c @@ -254,9 +254,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate, pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv | FIELD_PREP(PLL_MFI_MASK, rate->mfi); writel_relaxed(pll_div, pll->base + PLL_DIV); + readl(pll->base + PLL_DIV); if (pll->flags & CLK_FRACN_GPPLL_FRACN) { writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR); writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR); + readl(pll->base + PLL_NUMERATOR); } /* Wait for 5us according to fracn mode pll doc */ @@ -265,6 +267,7 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate, /* Enable Powerup */ tmp |= POWERUP_MASK; writel_relaxed(tmp, pll->base + PLL_CTRL); + readl(pll->base + PLL_CTRL); /* Wait Lock */ ret = clk_fracn_gppll_wait_lock(pll); @@ -302,14 +305,15 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw) val |= POWERUP_MASK; writel_relaxed(val, pll->base + PLL_CTRL); - - val |= CLKMUX_EN; - writel_relaxed(val, pll->base + PLL_CTRL); + readl(pll->base + PLL_CTRL); ret = clk_fracn_gppll_wait_lock(pll); if (ret) return ret; + val |= CLKMUX_EN; + writel_relaxed(val, pll->base + PLL_CTRL); + val &= ~CLKMUX_BYPASS; writel_relaxed(val, pll->base + PLL_CTRL); diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c index 6c351050b82a..c169fe53a35f 100644 --- a/drivers/clk/imx/clk-imx8-acm.c +++ b/drivers/clk/imx/clk-imx8-acm.c @@ -294,9 +294,9 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); - if (IS_ERR(dev_pm->pd_dev_link[i])) { + if (!dev_pm->pd_dev_link[i]) { dev_pm_domain_detach(dev_pm->pd_dev[i], false); - ret = PTR_ERR(dev_pm->pd_dev_link[i]); + ret = -EINVAL; goto detach_pm; } } diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c index c6a9bc8ecc1f..58a516dd385b 100644 --- a/drivers/clk/imx/clk-imx93.c +++ b/drivers/clk/imx/clk-imx93.c @@ -15,6 +15,11 @@ #include "clk.h" +#define IMX93_CLK_END 207 + +#define PLAT_IMX93 BIT(0) +#define PLAT_IMX91 BIT(1) + enum clk_sel { LOW_SPEED_IO_SEL, NON_IO_SEL, @@ -53,6 +58,7 @@ static const struct imx93_clk_root { u32 off; enum clk_sel sel; unsigned long flags; + unsigned long plat; } root_array[] = { /* a55/m33/bus critical clk for system run */ { IMX93_CLK_A55_PERIPH, "a55_periph_root", 0x0000, FAST_SEL, CLK_IS_CRITICAL }, @@ -63,7 +69,7 @@ static const struct imx93_clk_root { { IMX93_CLK_BUS_AON, "bus_aon_root", 0x0300, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL }, { IMX93_CLK_WAKEUP_AXI, "wakeup_axi_root", 0x0380, FAST_SEL, CLK_IS_CRITICAL }, { IMX93_CLK_SWO_TRACE, "swo_trace_root", 0x0400, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, }, + { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, }, { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, }, { IMX93_CLK_LPTMR1, "lptmr1_root", 0x0700, LOW_SPEED_IO_SEL, }, @@ -120,15 +126,15 @@ static const struct imx93_clk_root { { IMX93_CLK_HSIO_ACSCAN_80M, "hsio_acscan_80m_root", 0x1f80, LOW_SPEED_IO_SEL, }, { IMX93_CLK_HSIO_ACSCAN_480M, "hsio_acscan_480m_root", 0x2000, MISC_SEL, }, { IMX93_CLK_NIC_AXI, "nic_axi_root", 0x2080, FAST_SEL, CLK_IS_CRITICAL, }, - { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, }, + { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, 0, PLAT_IMX93, }, { IMX93_CLK_MEDIA_AXI, "media_axi_root", 0x2280, FAST_SEL, }, { IMX93_CLK_MEDIA_APB, "media_apb_root", 0x2300, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, }, + { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, 0, PLAT_IMX93, }, { IMX93_CLK_MEDIA_DISP_PIX, "media_disp_pix_root", 0x2400, VIDEO_SEL, }, { IMX93_CLK_CAM_PIX, "cam_pix_root", 0x2480, VIDEO_SEL, }, - { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, }, - { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, }, + { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, 0, PLAT_IMX93, }, { IMX93_CLK_ADC, "adc_root", 0x2700, LOW_SPEED_IO_SEL, }, { IMX93_CLK_PDM, "pdm_root", 0x2780, AUDIO_SEL, }, { IMX93_CLK_TSTMR1, "tstmr1_root", 0x2800, LOW_SPEED_IO_SEL, }, @@ -137,13 +143,16 @@ static const struct imx93_clk_root { { IMX93_CLK_MQS2, "mqs2_root", 0x2980, AUDIO_SEL, }, { IMX93_CLK_AUDIO_XCVR, "audio_xcvr_root", 0x2a00, NON_IO_SEL, }, { IMX93_CLK_SPDIF, "spdif_root", 0x2a80, AUDIO_SEL, }, - { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, }, - { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, }, - { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, }, - { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, }, + { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, + { IMX91_CLK_ENET1_QOS_TSN, "enet1_qos_tsn_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX91, }, + { IMX91_CLK_ENET_TIMER, "enet_timer_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX91, }, + { IMX91_CLK_ENET2_REGULAR, "enet2_regular_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX91, }, + { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, + { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, }, { IMX93_CLK_USB_PHY_BURUNIN, "usb_phy_root", 0x2e80, LOW_SPEED_IO_SEL, }, { IMX93_CLK_PAL_CAME_SCAN, "pal_came_scan_root", 0x2f00, MISC_SEL, } }; @@ -155,6 +164,7 @@ static const struct imx93_clk_ccgr { u32 off; unsigned long flags; u32 *shared_count; + unsigned long plat; } ccgr_array[] = { { IMX93_CLK_A55_GATE, "a55_alt", "a55_alt_root", 0x8000, }, /* M33 critical clk for system run */ @@ -244,8 +254,10 @@ static const struct imx93_clk_ccgr { { IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, }, { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, }, { IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, }, - { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, }, - { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, }, + { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX93, }, + { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX93, }, + { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, }, + { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, }, /* Critical because clk accessed during CPU idle */ { IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, CLK_IS_CRITICAL}, { IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, }, @@ -265,6 +277,7 @@ static int imx93_clocks_probe(struct platform_device *pdev) const struct imx93_clk_ccgr *ccgr; void __iomem *base, *anatop_base; int i, ret; + const unsigned long plat = (unsigned long)device_get_match_data(&pdev->dev); clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX93_CLK_END), GFP_KERNEL); @@ -314,17 +327,20 @@ static int imx93_clocks_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(root_array); i++) { root = &root_array[i]; - clks[root->clk] = imx93_clk_composite_flags(root->name, - parent_names[root->sel], - 4, base + root->off, 3, - root->flags); + if (!root->plat || root->plat & plat) + clks[root->clk] = imx93_clk_composite_flags(root->name, + parent_names[root->sel], + 4, base + root->off, 3, + root->flags); } for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) { ccgr = &ccgr_array[i]; - clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name, - ccgr->flags, base + ccgr->off, 0, 1, 1, 3, - ccgr->shared_count); + if (!ccgr->plat || ccgr->plat & plat) + clks[ccgr->clk] = imx93_clk_gate(NULL, + ccgr->name, ccgr->parent_name, + ccgr->flags, base + ccgr->off, 0, 1, 1, 3, + ccgr->shared_count); } clks[IMX93_CLK_A55_SEL] = imx_clk_hw_mux2("a55_sel", base + 0x4820, 0, 1, a55_core_sels, @@ -354,7 +370,8 @@ unregister_hws: } static const struct of_device_id imx93_clk_of_match[] = { - { .compatible = "fsl,imx93-ccm" }, + { .compatible = "fsl,imx93-ccm", .data = (void *)PLAT_IMX93 }, + { .compatible = "fsl,imx91-ccm", .data = (void *)PLAT_IMX91 }, { /* Sentinel */ }, }; MODULE_DEVICE_TABLE(of, imx93_clk_of_match); diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c index 19a62da74be4..25974947ad0c 100644 --- a/drivers/clk/imx/clk-imx95-blk-ctl.c +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -277,6 +277,25 @@ static const struct imx95_blk_ctl_dev_data netcmix_dev_data = { .clk_reg_offset = 0, }; +static const struct imx95_blk_ctl_clk_dev_data hsio_blk_ctl_clk_dev_data[] = { + [0] = { + .name = "hsio_blk_ctl_clk", + .parent_names = (const char *[]){ "hsio_pll", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 6, + .bit_width = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + } +}; + +static const struct imx95_blk_ctl_dev_data hsio_blk_ctl_dev_data = { + .num_clks = 1, + .clk_dev_data = hsio_blk_ctl_clk_dev_data, + .clk_reg_offset = 0, +}; + static int imx95_bc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -447,6 +466,7 @@ static const struct of_device_id imx95_bc_of_match[] = { { .compatible = "nxp,imx95-display-master-csr", }, { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data }, { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data }, + { .compatible = "nxp,imx95-hsio-blk-ctl", .data = &hsio_blk_ctl_dev_data }, { .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data }, { .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data}, { /* Sentinel */ }, diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c index dd5abd09f3e2..6376557a3c3d 100644 --- a/drivers/clk/imx/clk-lpcg-scu.c +++ b/drivers/clk/imx/clk-lpcg-scu.c @@ -6,10 +6,12 @@ #include <linux/bits.h> #include <linux/clk-provider.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/units.h> #include "clk-scu.h" @@ -41,6 +43,29 @@ struct clk_lpcg_scu { #define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw) +/* e10858 -LPCG clock gating register synchronization errata */ +static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val) +{ + writel(val, reg); + + if (rate >= 24 * HZ_PER_MHZ || rate == 0) { + /* + * The time taken to access the LPCG registers from the AP core + * through the interconnect is longer than the minimum delay + * of 4 clock cycles required by the errata. + * Adding a readl will provide sufficient delay to prevent + * back-to-back writes. + */ + readl(reg); + } else { + /* + * For clocks running below 24MHz, wait a minimum of + * 4 clock cycles. + */ + ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate))); + } +} + static int clk_lpcg_scu_enable(struct clk_hw *hw) { struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw); @@ -57,7 +82,8 @@ static int clk_lpcg_scu_enable(struct clk_hw *hw) val |= CLK_GATE_SCU_LPCG_HW_SEL; reg |= val << clk->bit_idx; - writel(reg, clk->reg); + + lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg); spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags); @@ -74,7 +100,7 @@ static void clk_lpcg_scu_disable(struct clk_hw *hw) reg = readl_relaxed(clk->reg); reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx); - writel(reg, clk->reg); + lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg); spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags); } @@ -135,6 +161,9 @@ static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev) { struct clk_lpcg_scu *clk = dev_get_drvdata(dev); + if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg"))) + return 0; + clk->state = readl_relaxed(clk->reg); dev_dbg(dev, "save lpcg state 0x%x\n", clk->state); @@ -145,13 +174,11 @@ static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev) { struct clk_lpcg_scu *clk = dev_get_drvdata(dev); - /* - * FIXME: Sometimes writes don't work unless the CPU issues - * them twice - */ + if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg"))) + return 0; writel(clk->state, clk->reg); - writel(clk->state, clk->reg); + lpcg_e10858_writel(0, clk->reg, clk->state); dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state); return 0; diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index b1dd0c08e091..b27186aaf2a1 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c @@ -596,7 +596,7 @@ static int __maybe_unused imx_clk_scu_suspend(struct device *dev) clk->rate = clk_scu_recalc_rate(&clk->hw, 0); else clk->rate = clk_hw_get_rate(&clk->hw); - clk->is_enabled = clk_hw_is_enabled(&clk->hw); + clk->is_enabled = clk_hw_is_prepared(&clk->hw); if (clk->parent) dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent), diff --git a/drivers/clk/kunit_clk_assigned_rates.h b/drivers/clk/kunit_clk_assigned_rates.h new file mode 100644 index 000000000000..df2d84dcaa93 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _KUNIT_CLK_ASSIGNED_RATES_H +#define _KUNIT_CLK_ASSIGNED_RATES_H + +#define ASSIGNED_RATES_0_RATE 1600000 +#define ASSIGNED_RATES_1_RATE 9700000 + +#endif diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso new file mode 100644 index 000000000000..e600736e70f5 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <1>; + assigned-clocks = <&clk 0>, + <&clk 1>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>, + <ASSIGNED_RATES_1_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso new file mode 100644 index 000000000000..260aba458daf --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <1>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clocks = <&clk 0>, + <&clk 1>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>, + <ASSIGNED_RATES_1_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_null.dtso b/drivers/clk/kunit_clk_assigned_rates_null.dtso new file mode 100644 index 000000000000..0b27b38a9130 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_null.dtso @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + assigned-clocks = <0>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso new file mode 100644 index 000000000000..99fb332ae83d --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clocks = <0>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_one.dtso b/drivers/clk/kunit_clk_assigned_rates_one.dtso new file mode 100644 index 000000000000..dd95ec9b1cf9 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_one.dtso @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + assigned-clocks = <&clk>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso new file mode 100644 index 000000000000..a41dca806318 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clocks = <&clk>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso new file mode 100644 index 000000000000..389b4e2eb7f7 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <1>; + assigned-clocks = <&clk 0>, + <&clk 1>; + assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>, + /bits/ 64 <ASSIGNED_RATES_1_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso new file mode 100644 index 000000000000..3e117fd59b7d --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <1>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clocks = <&clk 0>, + <&clk 1>; + assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>, + /bits/ 64 <ASSIGNED_RATES_1_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso new file mode 100644 index 000000000000..87041264e8f5 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + assigned-clocks = <&clk>; + assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso new file mode 100644 index 000000000000..3259c003aec0 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clocks = <&clk>; + assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_without.dtso b/drivers/clk/kunit_clk_assigned_rates_without.dtso new file mode 100644 index 000000000000..22d333495cf2 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_without.dtso @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso new file mode 100644 index 000000000000..75ac09140f83 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "kunit_clk_assigned_rates.h" + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clock-rates = <ASSIGNED_RATES_0_RATE>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_zero.dtso b/drivers/clk/kunit_clk_assigned_rates_zero.dtso new file mode 100644 index 000000000000..08e042c2eafe --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_zero.dtso @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + assigned-clocks = <&clk>; + assigned-clock-rates = <0>; + }; +}; diff --git a/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso new file mode 100644 index 000000000000..1d964672e855 --- /dev/null +++ b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&{/} { + clk: kunit-clock { + compatible = "test,clk-assigned-rates"; + #clock-cells = <0>; + }; + + kunit-clock-consumer { + compatible = "test,clk-consumer"; + assigned-clocks = <&clk>; + assigned-clock-rates = <0>; + }; +}; diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index 70a005e7e1b1..5f8e6d68fa14 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -124,6 +124,43 @@ config COMMON_CLK_MT2712_VENCSYS help This driver supports MediaTek MT2712 vencsys clocks. +config COMMON_CLK_MT6735 + tristate "Main clock drivers for MediaTek MT6735" + depends on ARCH_MEDIATEK || COMPILE_TEST + select COMMON_CLK_MEDIATEK + help + This enables drivers for clocks and resets provided + by apmixedsys, topckgen, infracfg and pericfg on the + MediaTek MT6735 SoC. + +config COMMON_CLK_MT6735_IMGSYS + tristate "Clock driver for MediaTek MT6735 imgsys" + depends on COMMON_CLK_MT6735 + help + This enables a driver for clocks provided by imgsys + on the MediaTek MT6735 SoC. + +config COMMON_CLK_MT6735_MFGCFG + tristate "Clock driver for MediaTek MT6735 mfgcfg" + depends on COMMON_CLK_MT6735 + help + This enables a driver for clocks and resets provided + by mfgcfg on the MediaTek MT6735 SoC. + +config COMMON_CLK_MT6735_VDECSYS + tristate "Clock driver for MediaTek MT6735 vdecsys" + depends on COMMON_CLK_MT6735 + help + This enables a driver for clocks and resets provided + by vdecsys on the MediaTek MT6735 SoC. + +config COMMON_CLK_MT6735_VENCSYS + tristate "Clock driver for MediaTek MT6735 vencsys" + depends on COMMON_CLK_MT6735 + help + This enables a driver for clocks provided by vencsys + on the MediaTek MT6735 SoC. + config COMMON_CLK_MT6765 bool "Clock driver for MediaTek MT6765" depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST @@ -887,13 +924,6 @@ config COMMON_CLK_MT8195_APUSYS help This driver supports MediaTek MT8195 AI Processor Unit System clocks. -config COMMON_CLK_MT8195_AUDSYS - tristate "Clock driver for MediaTek MT8195 audsys" - depends on COMMON_CLK_MT8195 - default COMMON_CLK_MT8195 - help - This driver supports MediaTek MT8195 audsys clocks. - config COMMON_CLK_MT8195_IMP_IIC_WRAP tristate "Clock driver for MediaTek MT8195 imp_iic_wrap" depends on COMMON_CLK_MT8195 @@ -908,14 +938,6 @@ config COMMON_CLK_MT8195_MFGCFG help This driver supports MediaTek MT8195 mfgcfg clocks. -config COMMON_CLK_MT8195_MSDC - tristate "Clock driver for MediaTek MT8195 msdc" - depends on COMMON_CLK_MT8195 - default COMMON_CLK_MT8195 - help - This driver supports MediaTek MT8195 MMC and SD Controller's - msdc and msdc_top clocks. - config COMMON_CLK_MT8195_SCP_ADSP tristate "Clock driver for MediaTek MT8195 scp_adsp" depends on COMMON_CLK_MT8195 diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index eeccfa039896..6efec95406bd 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -2,6 +2,11 @@ obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o obj-$(CONFIG_COMMON_CLK_MEDIATEK_FHCTL) += clk-fhctl.o clk-pllfh.o +obj-$(CONFIG_COMMON_CLK_MT6735) += clk-mt6735-apmixedsys.o clk-mt6735-infracfg.o clk-mt6735-pericfg.o clk-mt6735-topckgen.o +obj-$(CONFIG_COMMON_CLK_MT6735_IMGSYS) += clk-mt6735-imgsys.o +obj-$(CONFIG_COMMON_CLK_MT6735_MFGCFG) += clk-mt6735-mfgcfg.o +obj-$(CONFIG_COMMON_CLK_MT6735_VDECSYS) += clk-mt6735-vdecsys.o +obj-$(CONFIG_COMMON_CLK_MT6735_VENCSYS) += clk-mt6735-vencsys.o obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o diff --git a/drivers/clk/mediatek/clk-mt6735-apmixedsys.c b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c new file mode 100644 index 000000000000..e0949911e8f7 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-pll.h" + +#include <dt-bindings/clock/mediatek,mt6735-apmixedsys.h> + +#define AP_PLL_CON_5 0x014 +#define ARMPLL_CON0 0x200 +#define ARMPLL_CON1 0x204 +#define ARMPLL_PWR_CON0 0x20c +#define MAINPLL_CON0 0x210 +#define MAINPLL_CON1 0x214 +#define MAINPLL_PWR_CON0 0x21c +#define UNIVPLL_CON0 0x220 +#define UNIVPLL_CON1 0x224 +#define UNIVPLL_PWR_CON0 0x22c +#define MMPLL_CON0 0x230 +#define MMPLL_CON1 0x234 +#define MMPLL_PWR_CON0 0x23c +#define MSDCPLL_CON0 0x240 +#define MSDCPLL_CON1 0x244 +#define MSDCPLL_PWR_CON0 0x24c +#define VENCPLL_CON0 0x250 +#define VENCPLL_CON1 0x254 +#define VENCPLL_PWR_CON0 0x25c +#define TVDPLL_CON0 0x260 +#define TVDPLL_CON1 0x264 +#define TVDPLL_PWR_CON0 0x26c +#define APLL1_CON0 0x270 +#define APLL1_CON1 0x274 +#define APLL1_CON2 0x278 +#define APLL1_PWR_CON0 0x280 +#define APLL2_CON0 0x284 +#define APLL2_CON1 0x288 +#define APLL2_CON2 0x28c +#define APLL2_PWR_CON0 0x294 + +#define CON0_RST_BAR BIT(24) + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _rst_bar_mask, \ + _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcwbits, _flags) { \ + .id = _id, \ + .name = _name, \ + .parent_name = "clk26m", \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .rst_bar_mask = _rst_bar_mask, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .tuner_en_reg = _tuner_en_reg, \ + .tuner_en_bit = _tuner_en_bit, \ + .pcw_reg = _pcw_reg, \ + .pcw_chg_reg = _pcw_reg, \ + .pcwbits = _pcwbits, \ + .flags = _flags, \ + } + +static const struct mtk_pll_data apmixedsys_plls[] = { + PLL(CLK_APMIXED_ARMPLL, "armpll", ARMPLL_CON0, ARMPLL_PWR_CON0, 0x00000001, 0, ARMPLL_CON1, 24, 0, 0, 0, ARMPLL_CON1, 21, PLL_AO), + PLL(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, MAINPLL_PWR_CON0, 0xf0000101, CON0_RST_BAR, MAINPLL_CON1, 24, 0, 0, 0, MAINPLL_CON1, 21, HAVE_RST_BAR), + PLL(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, UNIVPLL_PWR_CON0, 0xfc000001, CON0_RST_BAR, UNIVPLL_CON1, 24, 0, 0, 0, UNIVPLL_CON1, 21, HAVE_RST_BAR), + PLL(CLK_APMIXED_MMPLL, "mmpll", MMPLL_CON0, MMPLL_PWR_CON0, 0x00000001, 0, MMPLL_CON1, 24, 0, 0, 0, MMPLL_CON1, 21, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, MSDCPLL_PWR_CON0, 0x00000001, 0, MSDCPLL_CON1, 24, 0, 0, 0, MSDCPLL_CON1, 21, 0), + PLL(CLK_APMIXED_VENCPLL, "vencpll", VENCPLL_CON0, VENCPLL_PWR_CON0, 0x00000001, CON0_RST_BAR, VENCPLL_CON1, 24, 0, 0, 0, VENCPLL_CON1, 21, HAVE_RST_BAR), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", TVDPLL_CON0, TVDPLL_PWR_CON0, 0x00000001, 0, TVDPLL_CON1, 24, 0, 0, 0, TVDPLL_CON1, 21, 0), + PLL(CLK_APMIXED_APLL1, "apll1", APLL1_CON0, APLL1_PWR_CON0, 0x00000001, 0, APLL1_CON0, 4, APLL1_CON2, AP_PLL_CON_5, 0, APLL1_CON1, 31, 0), + PLL(CLK_APMIXED_APLL2, "apll2", APLL2_CON0, APLL2_PWR_CON0, 0x00000001, 0, APLL2_CON0, 4, APLL2_CON2, AP_PLL_CON_5, 1, APLL2_CON1, 31, 0) +}; + +static int clk_mt6735_apmixed_probe(struct platform_device *pdev) +{ + void __iomem *base; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct clk_hw_onecell_data *clk_data; + int ret; + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk_data = mtk_devm_alloc_clk_data(&pdev->dev, ARRAY_SIZE(apmixedsys_plls)); + if (!clk_data) + return -ENOMEM; + platform_set_drvdata(pdev, clk_data); + + ret = mtk_clk_register_plls(pdev->dev.of_node, apmixedsys_plls, + ARRAY_SIZE(apmixedsys_plls), clk_data); + if (ret) { + dev_err(&pdev->dev, "Failed to register PLLs: %d\n", ret); + return ret; + } + + ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get, + clk_data); + if (ret) + dev_err(&pdev->dev, + "Failed to register clock provider: %d\n", ret); + + return ret; +} + +static void clk_mt6735_apmixed_remove(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); + + mtk_clk_unregister_plls(apmixedsys_plls, ARRAY_SIZE(apmixedsys_plls), clk_data); +} + +static const struct of_device_id of_match_mt6735_apmixedsys[] = { + { .compatible = "mediatek,mt6735-apmixedsys" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_match_mt6735_apmixedsys); + +static struct platform_driver clk_mt6735_apmixedsys = { + .probe = clk_mt6735_apmixed_probe, + .remove = clk_mt6735_apmixed_remove, + .driver = { + .name = "clk-mt6735-apmixedsys", + .of_match_table = of_match_mt6735_apmixedsys, + }, +}; +module_platform_driver(clk_mt6735_apmixedsys); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("MediaTek MT6735 apmixedsys clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-imgsys.c b/drivers/clk/mediatek/clk-mt6735-imgsys.c new file mode 100644 index 000000000000..c564f8f72432 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-imgsys.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +#include <dt-bindings/clock/mediatek,mt6735-imgsys.h> + +#define IMG_CG_CON 0x00 +#define IMG_CG_SET 0x04 +#define IMG_CG_CLR 0x08 + +static struct mtk_gate_regs imgsys_cg_regs = { + .set_ofs = IMG_CG_SET, + .clr_ofs = IMG_CG_CLR, + .sta_ofs = IMG_CG_CON, +}; + +static const struct mtk_gate imgsys_gates[] = { + GATE_MTK(CLK_IMG_SMI_LARB2, "smi_larb2", "mm_sel", &imgsys_cg_regs, 0, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_CAM_SMI, "cam_smi", "mm_sel", &imgsys_cg_regs, 5, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_CAM_CAM, "cam_cam", "mm_sel", &imgsys_cg_regs, 6, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_SEN_TG, "sen_tg", "mm_sel", &imgsys_cg_regs, 7, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_SEN_CAM, "sen_cam", "mm_sel", &imgsys_cg_regs, 8, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_CAM_SV, "cam_sv", "mm_sel", &imgsys_cg_regs, 9, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_SUFOD, "sufod", "mm_sel", &imgsys_cg_regs, 10, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_IMG_FD, "fd", "mm_sel", &imgsys_cg_regs, 11, &mtk_clk_gate_ops_setclr), +}; + +static const struct mtk_clk_desc imgsys_clks = { + .clks = imgsys_gates, + .num_clks = ARRAY_SIZE(imgsys_gates), +}; + +static const struct of_device_id of_match_mt6735_imgsys[] = { + { .compatible = "mediatek,mt6735-imgsys", .data = &imgsys_clks }, + { /* sentinel */ } +}; + +static struct platform_driver clk_mt6735_imgsys = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-imgsys", + .of_match_table = of_match_mt6735_imgsys, + }, +}; +module_platform_driver(clk_mt6735_imgsys); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("MediaTek MT6735 imgsys clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-infracfg.c b/drivers/clk/mediatek/clk-mt6735-infracfg.c new file mode 100644 index 000000000000..c1171f903cfa --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-infracfg.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +#include <dt-bindings/clock/mediatek,mt6735-infracfg.h> +#include <dt-bindings/reset/mediatek,mt6735-infracfg.h> + +#define INFRA_RST0 0x30 +#define INFRA_GLOBALCON_PDN0 0x40 +#define INFRA_PDN1 0x44 +#define INFRA_PDN_STA 0x48 + +#define RST_NR_PER_BANK 32 + +static struct mtk_gate_regs infra_cg_regs = { + .set_ofs = INFRA_GLOBALCON_PDN0, + .clr_ofs = INFRA_PDN1, + .sta_ofs = INFRA_PDN_STA, +}; + +static const struct mtk_gate infracfg_gates[] = { + GATE_MTK(CLK_INFRA_DBG, "dbg", "axi_sel", &infra_cg_regs, 0, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_GCE, "gce", "axi_sel", &infra_cg_regs, 1, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_TRBG, "trbg", "axi_sel", &infra_cg_regs, 2, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_CPUM, "cpum", "axi_sel", &infra_cg_regs, 3, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_DEVAPC, "devapc", "axi_sel", &infra_cg_regs, 4, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_AUDIO, "audio", "aud_intbus_sel", &infra_cg_regs, 5, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_GCPU, "gcpu", "axi_sel", &infra_cg_regs, 6, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_L2C_SRAM, "l2csram", "axi_sel", &infra_cg_regs, 7, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_M4U, "m4u", "axi_sel", &infra_cg_regs, 8, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_CLDMA, "cldma", "axi_sel", &infra_cg_regs, 12, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_CONNMCU_BUS, "connmcu_bus", "axi_sel", &infra_cg_regs, 15, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_KP, "kp", "axi_sel", &infra_cg_regs, 16, &mtk_clk_gate_ops_setclr), + GATE_MTK_FLAGS(CLK_INFRA_APXGPT, "apxgpt", "axi_sel", &infra_cg_regs, 18, &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL), + GATE_MTK(CLK_INFRA_SEJ, "sej", "axi_sel", &infra_cg_regs, 19, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_CCIF0_AP, "ccif0ap", "axi_sel", &infra_cg_regs, 20, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_CCIF1_AP, "ccif1ap", "axi_sel", &infra_cg_regs, 21, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_PMIC_SPI, "pmicspi", "pmicspi_sel", &infra_cg_regs, 22, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_INFRA_PMIC_WRAP, "pmicwrap", "axi_sel", &infra_cg_regs, 23, &mtk_clk_gate_ops_setclr) +}; + +static u16 infracfg_rst_bank_ofs[] = { INFRA_RST0 }; + +static u16 infracfg_rst_idx_map[] = { + [MT6735_INFRA_RST0_EMI_REG] = 0 * RST_NR_PER_BANK + 0, + [MT6735_INFRA_RST0_DRAMC0_AO] = 0 * RST_NR_PER_BANK + 1, + [MT6735_INFRA_RST0_AP_CIRQ_EINT] = 0 * RST_NR_PER_BANK + 3, + [MT6735_INFRA_RST0_APXGPT] = 0 * RST_NR_PER_BANK + 4, + [MT6735_INFRA_RST0_SCPSYS] = 0 * RST_NR_PER_BANK + 5, + [MT6735_INFRA_RST0_KP] = 0 * RST_NR_PER_BANK + 6, + [MT6735_INFRA_RST0_PMIC_WRAP] = 0 * RST_NR_PER_BANK + 7, + [MT6735_INFRA_RST0_CLDMA_AO_TOP] = 0 * RST_NR_PER_BANK + 8, + [MT6735_INFRA_RST0_USBSIF_TOP] = 0 * RST_NR_PER_BANK + 9, + [MT6735_INFRA_RST0_EMI] = 0 * RST_NR_PER_BANK + 16, + [MT6735_INFRA_RST0_CCIF] = 0 * RST_NR_PER_BANK + 17, + [MT6735_INFRA_RST0_DRAMC0] = 0 * RST_NR_PER_BANK + 18, + [MT6735_INFRA_RST0_EMI_AO_REG] = 0 * RST_NR_PER_BANK + 19, + [MT6735_INFRA_RST0_CCIF_AO] = 0 * RST_NR_PER_BANK + 20, + [MT6735_INFRA_RST0_TRNG] = 0 * RST_NR_PER_BANK + 21, + [MT6735_INFRA_RST0_SYS_CIRQ] = 0 * RST_NR_PER_BANK + 22, + [MT6735_INFRA_RST0_GCE] = 0 * RST_NR_PER_BANK + 23, + [MT6735_INFRA_RST0_M4U] = 0 * RST_NR_PER_BANK + 24, + [MT6735_INFRA_RST0_CCIF1] = 0 * RST_NR_PER_BANK + 25, + [MT6735_INFRA_RST0_CLDMA_TOP_PD] = 0 * RST_NR_PER_BANK + 26 +}; + +static const struct mtk_clk_rst_desc infracfg_resets = { + .version = MTK_RST_SIMPLE, + .rst_bank_ofs = infracfg_rst_bank_ofs, + .rst_bank_nr = ARRAY_SIZE(infracfg_rst_bank_ofs), + .rst_idx_map = infracfg_rst_idx_map, + .rst_idx_map_nr = ARRAY_SIZE(infracfg_rst_idx_map) +}; + +static const struct mtk_clk_desc infracfg_clks = { + .clks = infracfg_gates, + .num_clks = ARRAY_SIZE(infracfg_gates), + + .rst_desc = &infracfg_resets +}; + +static const struct of_device_id of_match_mt6735_infracfg[] = { + { .compatible = "mediatek,mt6735-infracfg", .data = &infracfg_clks }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_match_mt6735_infracfg); + +static struct platform_driver clk_mt6735_infracfg = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-infracfg", + .of_match_table = of_match_mt6735_infracfg, + }, +}; +module_platform_driver(clk_mt6735_infracfg); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("MediaTek MT6735 infracfg clock and reset driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-mfgcfg.c b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c new file mode 100644 index 000000000000..1f5aedddf209 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +#include <dt-bindings/clock/mediatek,mt6735-mfgcfg.h> + +#define MFG_CG_CON 0x00 +#define MFG_CG_SET 0x04 +#define MFG_CG_CLR 0x08 +#define MFG_RESET 0x0c + +static struct mtk_gate_regs mfgcfg_cg_regs = { + .set_ofs = MFG_CG_SET, + .clr_ofs = MFG_CG_CLR, + .sta_ofs = MFG_CG_CON, +}; + +static const struct mtk_gate mfgcfg_gates[] = { + GATE_MTK(CLK_MFG_BG3D, "bg3d", "mfg_sel", &mfgcfg_cg_regs, 0, &mtk_clk_gate_ops_setclr), +}; + +static u16 mfgcfg_rst_ofs[] = { MFG_RESET }; + +static const struct mtk_clk_rst_desc mfgcfg_resets = { + .version = MTK_RST_SIMPLE, + .rst_bank_ofs = mfgcfg_rst_ofs, + .rst_bank_nr = ARRAY_SIZE(mfgcfg_rst_ofs) +}; + +static const struct mtk_clk_desc mfgcfg_clks = { + .clks = mfgcfg_gates, + .num_clks = ARRAY_SIZE(mfgcfg_gates), + + .rst_desc = &mfgcfg_resets +}; + +static const struct of_device_id of_match_mt6735_mfgcfg[] = { + { .compatible = "mediatek,mt6735-mfgcfg", .data = &mfgcfg_clks }, + { /* sentinel */ } +}; + +static struct platform_driver clk_mt6735_mfgcfg = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-mfgcfg", + .of_match_table = of_match_mt6735_mfgcfg, + }, +}; +module_platform_driver(clk_mt6735_mfgcfg); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("Mediatek MT6735 mfgcfg clock and reset driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-pericfg.c b/drivers/clk/mediatek/clk-mt6735-pericfg.c new file mode 100644 index 000000000000..cbdf6d25c1b2 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-pericfg.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +#include <dt-bindings/clock/mediatek,mt6735-pericfg.h> +#include <dt-bindings/reset/mediatek,mt6735-pericfg.h> + +#define PERI_GLOBALCON_RST0 0x00 +#define PERI_GLOBALCON_RST1 0x04 +#define PERI_GLOBALCON_PDN0_SET 0x08 +#define PERI_GLOBALCON_PDN0_CLR 0x10 +#define PERI_GLOBALCON_PDN0_STA 0x18 + +#define RST_NR_PER_BANK 32 + +static struct mtk_gate_regs peri_cg_regs = { + .set_ofs = PERI_GLOBALCON_PDN0_SET, + .clr_ofs = PERI_GLOBALCON_PDN0_CLR, + .sta_ofs = PERI_GLOBALCON_PDN0_STA, +}; + +static const struct mtk_gate pericfg_gates[] = { + GATE_MTK(CLK_PERI_DISP_PWM, "disp_pwm", "disppwm_sel", &peri_cg_regs, 0, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_THERM, "therm", "axi_sel", &peri_cg_regs, 1, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM1, "pwm1", "axi_sel", &peri_cg_regs, 2, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM2, "pwm2", "axi_sel", &peri_cg_regs, 3, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM3, "pwm3", "axi_sel", &peri_cg_regs, 4, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM4, "pwm4", "axi_sel", &peri_cg_regs, 5, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM5, "pwm5", "axi_sel", &peri_cg_regs, 6, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM6, "pwm6", "axi_sel", &peri_cg_regs, 7, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM7, "pwm7", "axi_sel", &peri_cg_regs, 8, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_PWM, "pwm", "axi_sel", &peri_cg_regs, 9, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_USB0, "usb0", "usb20_sel", &peri_cg_regs, 10, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_IRDA, "irda", "irda_sel", &peri_cg_regs, 11, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_APDMA, "apdma", "axi_sel", &peri_cg_regs, 12, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_MSDC30_0, "msdc30_0", "msdc30_0_sel", &peri_cg_regs, 13, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_MSDC30_1, "msdc30_1", "msdc30_1_sel", &peri_cg_regs, 14, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_MSDC30_2, "msdc30_2", "msdc30_2_sel", &peri_cg_regs, 15, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_MSDC30_3, "msdc30_3", "msdc30_3_sel", &peri_cg_regs, 16, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_UART0, "uart0", "uart_sel", &peri_cg_regs, 17, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_UART1, "uart1", "uart_sel", &peri_cg_regs, 18, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_UART2, "uart2", "uart_sel", &peri_cg_regs, 19, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_UART3, "uart3", "uart_sel", &peri_cg_regs, 20, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_UART4, "uart4", "uart_sel", &peri_cg_regs, 21, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_BTIF, "btif", "axi_sel", &peri_cg_regs, 22, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_I2C0, "i2c0", "axi_sel", &peri_cg_regs, 23, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_I2C1, "i2c1", "axi_sel", &peri_cg_regs, 24, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_I2C2, "i2c2", "axi_sel", &peri_cg_regs, 25, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_I2C3, "i2c3", "axi_sel", &peri_cg_regs, 26, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_AUXADC, "auxadc", "axi_sel", &peri_cg_regs, 27, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_SPI0, "spi0", "spi_sel", &peri_cg_regs, 28, &mtk_clk_gate_ops_setclr), + GATE_MTK(CLK_PERI_IRTX, "irtx", "irtx_sel", &peri_cg_regs, 29, &mtk_clk_gate_ops_setclr) +}; + +static u16 pericfg_rst_bank_ofs[] = { PERI_GLOBALCON_RST0, PERI_GLOBALCON_RST1 }; + +static u16 pericfg_rst_idx_map[] = { + [MT6735_PERI_RST0_UART0] = 0 * RST_NR_PER_BANK + 0, + [MT6735_PERI_RST0_UART1] = 0 * RST_NR_PER_BANK + 1, + [MT6735_PERI_RST0_UART2] = 0 * RST_NR_PER_BANK + 2, + [MT6735_PERI_RST0_UART3] = 0 * RST_NR_PER_BANK + 3, + [MT6735_PERI_RST0_UART4] = 0 * RST_NR_PER_BANK + 4, + [MT6735_PERI_RST0_BTIF] = 0 * RST_NR_PER_BANK + 6, + [MT6735_PERI_RST0_DISP_PWM_PERI] = 0 * RST_NR_PER_BANK + 7, + [MT6735_PERI_RST0_PWM] = 0 * RST_NR_PER_BANK + 8, + [MT6735_PERI_RST0_AUXADC] = 0 * RST_NR_PER_BANK + 10, + [MT6735_PERI_RST0_DMA] = 0 * RST_NR_PER_BANK + 11, + [MT6735_PERI_RST0_IRDA] = 0 * RST_NR_PER_BANK + 12, + [MT6735_PERI_RST0_IRTX] = 0 * RST_NR_PER_BANK + 13, + [MT6735_PERI_RST0_THERM] = 0 * RST_NR_PER_BANK + 16, + [MT6735_PERI_RST0_MSDC2] = 0 * RST_NR_PER_BANK + 17, + [MT6735_PERI_RST0_MSDC3] = 0 * RST_NR_PER_BANK + 18, + [MT6735_PERI_RST0_MSDC0] = 0 * RST_NR_PER_BANK + 19, + [MT6735_PERI_RST0_MSDC1] = 0 * RST_NR_PER_BANK + 20, + [MT6735_PERI_RST0_I2C0] = 0 * RST_NR_PER_BANK + 22, + [MT6735_PERI_RST0_I2C1] = 0 * RST_NR_PER_BANK + 23, + [MT6735_PERI_RST0_I2C2] = 0 * RST_NR_PER_BANK + 24, + [MT6735_PERI_RST0_I2C3] = 0 * RST_NR_PER_BANK + 25, + [MT6735_PERI_RST0_USB] = 0 * RST_NR_PER_BANK + 28, + + [MT6735_PERI_RST1_SPI0] = 1 * RST_NR_PER_BANK + 1, +}; + +static const struct mtk_clk_rst_desc pericfg_resets = { + .version = MTK_RST_SIMPLE, + .rst_bank_ofs = pericfg_rst_bank_ofs, + .rst_bank_nr = ARRAY_SIZE(pericfg_rst_bank_ofs), + .rst_idx_map = pericfg_rst_idx_map, + .rst_idx_map_nr = ARRAY_SIZE(pericfg_rst_idx_map) +}; + +static const struct mtk_clk_desc pericfg_clks = { + .clks = pericfg_gates, + .num_clks = ARRAY_SIZE(pericfg_gates), + + .rst_desc = &pericfg_resets +}; + +static const struct of_device_id of_match_mt6735_pericfg[] = { + { .compatible = "mediatek,mt6735-pericfg", .data = &pericfg_clks }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_match_mt6735_pericfg); + +static struct platform_driver clk_mt6735_pericfg = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-pericfg", + .of_match_table = of_match_mt6735_pericfg, + }, +}; +module_platform_driver(clk_mt6735_pericfg); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("MediaTek MT6735 pericfg clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-topckgen.c b/drivers/clk/mediatek/clk-mt6735-topckgen.c new file mode 100644 index 000000000000..2589ebfe2271 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-topckgen.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-mux.h" + +#include <dt-bindings/clock/mediatek,mt6735-topckgen.h> + +#define CLK_CFG_0 0x40 +#define CLK_CFG_0_SET 0x44 +#define CLK_CFG_0_CLR 0x48 +#define CLK_CFG_1 0x50 +#define CLK_CFG_1_SET 0x54 +#define CLK_CFG_1_CLR 0x58 +#define CLK_CFG_2 0x60 +#define CLK_CFG_2_SET 0x64 +#define CLK_CFG_2_CLR 0x68 +#define CLK_CFG_3 0x70 +#define CLK_CFG_3_SET 0x74 +#define CLK_CFG_3_CLR 0x78 +#define CLK_CFG_4 0x80 +#define CLK_CFG_4_SET 0x84 +#define CLK_CFG_4_CLR 0x88 +#define CLK_CFG_5 0x90 +#define CLK_CFG_5_SET 0x94 +#define CLK_CFG_5_CLR 0x98 +#define CLK_CFG_6 0xa0 +#define CLK_CFG_6_SET 0xa4 +#define CLK_CFG_6_CLR 0xa8 +#define CLK_CFG_7 0xb0 +#define CLK_CFG_7_SET 0xb4 +#define CLK_CFG_7_CLR 0xb8 + +static DEFINE_SPINLOCK(mt6735_topckgen_lock); + +/* Some clocks with unknown details are modeled as fixed clocks */ +static const struct mtk_fixed_clk topckgen_fixed_clks[] = { + /* + * This clock is available as a parent option for multiple + * muxes and seems like an alternative name for clk26m at first, + * but it appears alongside it in several muxes which should + * mean it is a separate clock. + */ + FIXED_CLK(CLK_TOP_AD_SYS_26M_CK, "ad_sys_26m_ck", "clk26m", 26 * MHZ), + /* + * This clock is the parent of DMPLL divisors. It might be MEMPLL + * or its parent, as DMPLL appears to be an alternative name for + * MEMPLL. + */ + FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", NULL, 0), + /* + * DMPLL clock (dmpll_ck), controlled by DDRPHY. + */ + FIXED_CLK(CLK_TOP_DMPLL, "dmpll", "clkph_mck_o", 0), + /* + * MIPI DPI clock. Parent option for dpi0_sel. Unknown parent. + */ + FIXED_CLK(CLK_TOP_DPI_CK, "dpi_ck", NULL, 0), + /* + * This clock is a child of WHPLL which is controlled by + * the modem. + */ + FIXED_CLK(CLK_TOP_WHPLL_AUDIO_CK, "whpll_audio_ck", NULL, 0) +}; + +static const struct mtk_fixed_factor topckgen_factors[] = { + FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3), + FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5), + FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 4), + FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 8), + FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 16), + FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 4), + FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 4), + FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26), + FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 4), + FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 8), + FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 4), + FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 8), + FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 4), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2), + FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4), + FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8), + FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16), + FACTOR(CLK_TOP_VENCPLL_D3, "vencpll_d3", "vencpll", 1, 3), + FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2), + FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4), + FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2), + FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4), + FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8), + FACTOR(CLK_TOP_AD_SYS_26M_D2, "ad_sys_26m_d2", "clk26m", 1, 2) +}; + +static const char * const axi_sel_parents[] = { + "clk26m", + "syspll1_d2", + "syspll_d5", + "syspll1_d4", + "univpll_d5", + "univpll2_d2", + "dmpll", + "dmpll_d2" +}; + +static const char * const mem_sel_parents[] = { + "clk26m", + "dmpll" +}; + +static const char * const ddrphycfg_parents[] = { + "clk26m", + "syspll1_d8" +}; + +static const char * const mm_sel_parents[] = { + "clk26m", + "vencpll", + "syspll1_d2", + "syspll_d5", + "syspll1_d4", + "univpll_d5", + "univpll2_d2", + "dmpll" +}; + +static const char * const pwm_sel_parents[] = { + "clk26m", + "univpll2_d4", + "univpll3_d2", + "univpll1_d4" +}; + +static const char * const vdec_sel_parents[] = { + "clk26m", + "syspll1_d2", + "syspll_d5", + "syspll1_d4", + "univpll_d5", + "syspll_d2", + "syspll2_d2", + "msdcpll_d2" +}; + +static const char * const mfg_sel_parents[] = { + "clk26m", + "mmpll", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "syspll_d3", + "syspll1_d2", + "syspll_d5", + "univpll_d3", + "univpll1_d2" +}; + +static const char * const camtg_sel_parents[] = { + "clk26m", + "univpll_d26", + "univpll2_d2", + "syspll3_d2", + "syspll3_d4", + "msdcpll_d4" +}; + +static const char * const uart_sel_parents[] = { + "clk26m", + "univpll2_d8" +}; + +static const char * const spi_sel_parents[] = { + "clk26m", + "syspll3_d2", + "msdcpll_d8", + "syspll2_d4", + "syspll4_d2", + "univpll2_d4", + "univpll1_d8" +}; + +static const char * const usb20_sel_parents[] = { + "clk26m", + "univpll1_d8", + "univpll3_d4" +}; + +static const char * const msdc50_0_sel_parents[] = { + "clk26m", + "syspll1_d2", + "syspll2_d2", + "syspll4_d2", + "univpll_d5", + "univpll1_d4" +}; + +static const char * const msdc30_0_sel_parents[] = { + "clk26m", + "msdcpll", + "msdcpll_d2", + "msdcpll_d4", + "syspll2_d2", + "syspll1_d4", + "univpll1_d4", + "univpll_d3", + "univpll_d26", + "syspll2_d4", + "univpll_d2" +}; + +static const char * const msdc30_1_2_sel_parents[] = { + "clk26m", + "univpll2_d2", + "msdcpll_d4", + "syspll2_d2", + "syspll1_d4", + "univpll1_d4", + "univpll_d26", + "syspll2_d4" +}; + +static const char * const msdc30_3_sel_parents[] = { + "clk26m", + "univpll2_d2", + "msdcpll_d4", + "syspll2_d2", + "syspll1_d4", + "univpll1_d4", + "univpll_d26", + "msdcpll_d16", + "syspll2_d4" +}; + +static const char * const audio_sel_parents[] = { + "clk26m", + "syspll3_d4", + "syspll4_d4", + "syspll1_d16" +}; + +static const char * const aud_intbus_sel_parents[] = { + "clk26m", + "syspll1_d4", + "syspll4_d2", + "dmpll_d4" +}; + +static const char * const pmicspi_sel_parents[] = { + "clk26m", + "syspll1_d8", + "syspll3_d4", + "syspll1_d16", + "univpll3_d4", + "univpll_d26", + "dmpll_d4", + "dmpll_d8" +}; + +static const char * const scp_sel_parents[] = { + "clk26m", + "syspll1_d8", + "dmpll_d2", + "dmpll_d4" +}; + +static const char * const atb_sel_parents[] = { + "clk26m", + "syspll1_d2", + "syspll_d5", + "dmpll" +}; + +static const char * const dpi0_sel_parents[] = { + "clk26m", + "tvdpll", + "tvdpll_d2", + "tvdpll_d4", + "dpi_ck" +}; + +static const char * const scam_sel_parents[] = { + "clk26m", + "syspll3_d2", + "univpll2_d4", + "vencpll_d3" +}; + +static const char * const mfg13m_sel_parents[] = { + "clk26m", + "ad_sys_26m_d2" +}; + +static const char * const aud_1_2_sel_parents[] = { + "clk26m", + "apll1" +}; + +static const char * const irda_sel_parents[] = { + "clk26m", + "univpll2_d4" +}; + +static const char * const irtx_sel_parents[] = { + "clk26m", + "ad_sys_26m_ck" +}; + +static const char * const disppwm_sel_parents[] = { + "clk26m", + "univpll2_d4", + "syspll4_d2_d8", + "ad_sys_26m_ck" +}; + +static const struct mtk_mux topckgen_muxes[] = { + MUX_CLR_SET_UPD(CLK_TOP_AXI_SEL, "axi_sel", axi_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 0, 3, 0, 0), + MUX_CLR_SET_UPD(CLK_TOP_MEM_SEL, "mem_sel", mem_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 8, 1, 0, 0), + MUX_CLR_SET_UPD(CLK_TOP_DDRPHY_SEL, "ddrphycfg_sel", ddrphycfg_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 1, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 3, 15, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 16, 4, 23, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 24, 3, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 0, 1, 7, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 8, 3, 15, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_USB20_SEL, "usb20_sel", usb20_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 2, 23, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 3, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 0, 4, 7, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 8, 3, 15, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 16, 3, 23, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 24, 4, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 0, 2, 7, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDINTBUS_SEL, "aud_intbus_sel", aud_intbus_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 8, 2, 15, 0, 0), + MUX_CLR_SET_UPD(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 16, 3, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 24, 2, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 3, 15, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SCAM_SEL, "scam_sel", scam_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 16, 2, 23, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG13M_SEL, "mfg13m_sel", mfg13m_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD1_SEL, "aud_1_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 1, 7, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD2_SEL, "aud_2_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 1, 15, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_IRDA_SEL, "irda_sel", irda_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 1, 23, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_IRTX_SEL, "irtx_sel", irtx_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 24, 1, 31, 0, 0), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DISPPWM_SEL, "disppwm_sel", disppwm_sel_parents, CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 0, 2, 7, 0, 0), +}; + +static const struct mtk_clk_desc topckgen_desc = { + .fixed_clks = topckgen_fixed_clks, + .num_fixed_clks = ARRAY_SIZE(topckgen_fixed_clks), + .factor_clks = topckgen_factors, + .num_factor_clks = ARRAY_SIZE(topckgen_factors), + .mux_clks = topckgen_muxes, + .num_mux_clks = ARRAY_SIZE(topckgen_muxes), + .clk_lock = &mt6735_topckgen_lock, +}; + +static const struct of_device_id of_match_mt6735_topckgen[] = { + { .compatible = "mediatek,mt6735-topckgen", .data = &topckgen_desc}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_match_mt6735_topckgen); + +static struct platform_driver clk_mt6735_topckgen = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-topckgen", + .of_match_table = of_match_mt6735_topckgen, + }, +}; +module_platform_driver(clk_mt6735_topckgen); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("MediaTek MT6735 topckgen clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-vdecsys.c b/drivers/clk/mediatek/clk-mt6735-vdecsys.c new file mode 100644 index 000000000000..8817085fc1db --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-vdecsys.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +#include <dt-bindings/clock/mediatek,mt6735-vdecsys.h> +#include <dt-bindings/reset/mediatek,mt6735-vdecsys.h> + +#define VDEC_CKEN_SET 0x00 +#define VDEC_CKEN_CLR 0x04 +#define SMI_LARB1_CKEN_SET 0x08 +#define SMI_LARB1_CKEN_CLR 0x0c +#define VDEC_RESETB_CON 0x10 +#define SMI_LARB1_RESETB_CON 0x14 + +#define RST_NR_PER_BANK 32 + +static struct mtk_gate_regs vdec_cg_regs = { + .set_ofs = VDEC_CKEN_SET, + .clr_ofs = VDEC_CKEN_CLR, + .sta_ofs = VDEC_CKEN_SET, +}; + +static struct mtk_gate_regs smi_larb1_cg_regs = { + .set_ofs = SMI_LARB1_CKEN_SET, + .clr_ofs = SMI_LARB1_CKEN_CLR, + .sta_ofs = SMI_LARB1_CKEN_SET, +}; + +static const struct mtk_gate vdecsys_gates[] = { + GATE_MTK(CLK_VDEC_VDEC, "vdec", "vdec_sel", &vdec_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv), + GATE_MTK(CLK_VDEC_SMI_LARB1, "smi_larb1", "vdec_sel", &smi_larb1_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv), +}; + +static u16 vdecsys_rst_bank_ofs[] = { VDEC_RESETB_CON, SMI_LARB1_RESETB_CON }; + +static u16 vdecsys_rst_idx_map[] = { + [MT6735_VDEC_RST0_VDEC] = 0 * RST_NR_PER_BANK + 0, + [MT6735_VDEC_RST1_SMI_LARB1] = 1 * RST_NR_PER_BANK + 0, +}; + +static const struct mtk_clk_rst_desc vdecsys_resets = { + .version = MTK_RST_SIMPLE, + .rst_bank_ofs = vdecsys_rst_bank_ofs, + .rst_bank_nr = ARRAY_SIZE(vdecsys_rst_bank_ofs), + .rst_idx_map = vdecsys_rst_idx_map, + .rst_idx_map_nr = ARRAY_SIZE(vdecsys_rst_idx_map) +}; + +static const struct mtk_clk_desc vdecsys_clks = { + .clks = vdecsys_gates, + .num_clks = ARRAY_SIZE(vdecsys_gates), + .rst_desc = &vdecsys_resets +}; + +static const struct of_device_id of_match_mt6735_vdecsys[] = { + { .compatible = "mediatek,mt6735-vdecsys", .data = &vdecsys_clks }, + { /* sentinel */ } +}; + +static struct platform_driver clk_mt6735_vdecsys = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-vdecsys", + .of_match_table = of_match_mt6735_vdecsys, + }, +}; +module_platform_driver(clk_mt6735_vdecsys); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("MediaTek MT6735 vdecsys clock and reset driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt6735-vencsys.c b/drivers/clk/mediatek/clk-mt6735-vencsys.c new file mode 100644 index 000000000000..8dec7f98492a --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6735-vencsys.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +#include <dt-bindings/clock/mediatek,mt6735-vencsys.h> + +#define VENC_CG_CON 0x00 +#define VENC_CG_SET 0x04 +#define VENC_CG_CLR 0x08 + +static struct mtk_gate_regs venc_cg_regs = { + .set_ofs = VENC_CG_SET, + .clr_ofs = VENC_CG_CLR, + .sta_ofs = VENC_CG_CON, +}; + +static const struct mtk_gate vencsys_gates[] = { + GATE_MTK(CLK_VENC_SMI_LARB3, "smi_larb3", "mm_sel", &venc_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv), + GATE_MTK(CLK_VENC_VENC, "venc", "mm_sel", &venc_cg_regs, 4, &mtk_clk_gate_ops_setclr_inv), + GATE_MTK(CLK_VENC_JPGENC, "jpgenc", "mm_sel", &venc_cg_regs, 8, &mtk_clk_gate_ops_setclr_inv), + GATE_MTK(CLK_VENC_JPGDEC, "jpgdec", "mm_sel", &venc_cg_regs, 12, &mtk_clk_gate_ops_setclr_inv), +}; + +static const struct mtk_clk_desc vencsys_clks = { + .clks = vencsys_gates, + .num_clks = ARRAY_SIZE(vencsys_gates), +}; + +static const struct of_device_id of_match_mt6735_vencsys[] = { + { .compatible = "mediatek,mt6735-vencsys", .data = &vencsys_clks }, + { /* sentinel */ } +}; + +static struct platform_driver clk_mt6735_vencsys = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt6735-vencsys", + .of_match_table = of_match_mt6735_vencsys, + }, +}; +module_platform_driver(clk_mt6735_vencsys); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("Mediatek MT6735 vencsys clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c index c4baf4076ed6..6b07abe9a8f5 100644 --- a/drivers/clk/mediatek/clk-mt8188-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c @@ -342,11 +342,14 @@ static const char * const dsp7_parents[] = { "univpll_d3" }; +/* + * MFG can be also parented to "univpll_d6" and "univpll_d7": + * these have been removed from the parents list to let us + * achieve GPU DVFS without any special clock handlers. + */ static const char * const mfg_core_tmp_parents[] = { "clk26m", - "mainpll_d5_d2", - "univpll_d6", - "univpll_d7" + "mainpll_d5_d2" }; static const char * const camtg_parents[] = { diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index 78f648c9c97d..febb5d7348ff 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -106,6 +106,7 @@ config COMMON_CLK_AXG_AUDIO select COMMON_CLK_MESON_SCLK_DIV select COMMON_CLK_MESON_CLKC_UTILS select REGMAP_MMIO + depends on RESET_MESON_AUX help Support for the audio clock controller on AmLogic A113D devices, aka axg, Say Y if you want audio subsystem to work. diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index beda86349389..7714bde5ffc0 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -15,6 +15,8 @@ #include <linux/reset-controller.h> #include <linux/slab.h> +#include <soc/amlogic/reset-meson-aux.h> + #include "meson-clkc-utils.h" #include "axg-audio.h" #include "clk-regmap.h" @@ -1678,84 +1680,6 @@ static struct clk_regmap *const sm1_clk_regmaps[] = { &sm1_earcrx_dmac_clk, }; -struct axg_audio_reset_data { - struct reset_controller_dev rstc; - struct regmap *map; - unsigned int offset; -}; - -static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst, - unsigned long id, - unsigned int *reg, - unsigned int *bit) -{ - unsigned int stride = regmap_get_reg_stride(rst->map); - - *reg = (id / (stride * BITS_PER_BYTE)) * stride; - *reg += rst->offset; - *bit = id % (stride * BITS_PER_BYTE); -} - -static int axg_audio_reset_update(struct reset_controller_dev *rcdev, - unsigned long id, bool assert) -{ - struct axg_audio_reset_data *rst = - container_of(rcdev, struct axg_audio_reset_data, rstc); - unsigned int offset, bit; - - axg_audio_reset_reg_and_bit(rst, id, &offset, &bit); - - regmap_update_bits(rst->map, offset, BIT(bit), - assert ? BIT(bit) : 0); - - return 0; -} - -static int axg_audio_reset_status(struct reset_controller_dev *rcdev, - unsigned long id) -{ - struct axg_audio_reset_data *rst = - container_of(rcdev, struct axg_audio_reset_data, rstc); - unsigned int val, offset, bit; - - axg_audio_reset_reg_and_bit(rst, id, &offset, &bit); - - regmap_read(rst->map, offset, &val); - - return !!(val & BIT(bit)); -} - -static int axg_audio_reset_assert(struct reset_controller_dev *rcdev, - unsigned long id) -{ - return axg_audio_reset_update(rcdev, id, true); -} - -static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev, - unsigned long id) -{ - return axg_audio_reset_update(rcdev, id, false); -} - -static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev, - unsigned long id) -{ - int ret; - - ret = axg_audio_reset_assert(rcdev, id); - if (ret) - return ret; - - return axg_audio_reset_deassert(rcdev, id); -} - -static const struct reset_control_ops axg_audio_rstc_ops = { - .assert = axg_audio_reset_assert, - .deassert = axg_audio_reset_deassert, - .reset = axg_audio_reset_toggle, - .status = axg_audio_reset_status, -}; - static struct regmap_config axg_audio_regmap_cfg = { .reg_bits = 32, .val_bits = 32, @@ -1766,16 +1690,14 @@ struct audioclk_data { struct clk_regmap *const *regmap_clks; unsigned int regmap_clk_num; struct meson_clk_hw_data hw_clks; - unsigned int reset_offset; - unsigned int reset_num; unsigned int max_register; + const char *rst_drvname; }; static int axg_audio_clkc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct audioclk_data *data; - struct axg_audio_reset_data *rst; struct regmap *map; void __iomem *regs; struct clk_hw *hw; @@ -1834,22 +1756,11 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) if (ret) return ret; - /* Stop here if there is no reset */ - if (!data->reset_num) - return 0; - - rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL); - if (!rst) - return -ENOMEM; - - rst->map = map; - rst->offset = data->reset_offset; - rst->rstc.nr_resets = data->reset_num; - rst->rstc.ops = &axg_audio_rstc_ops; - rst->rstc.of_node = dev->of_node; - rst->rstc.owner = THIS_MODULE; + /* Register auxiliary reset driver when applicable */ + if (data->rst_drvname) + ret = devm_meson_rst_aux_register(dev, map, data->rst_drvname); - return devm_reset_controller_register(dev, &rst->rstc); + return ret; } static const struct audioclk_data axg_audioclk_data = { @@ -1869,9 +1780,8 @@ static const struct audioclk_data g12a_audioclk_data = { .hws = g12a_audio_hw_clks, .num = ARRAY_SIZE(g12a_audio_hw_clks), }, - .reset_offset = AUDIO_SW_RESET, - .reset_num = 26, .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL, + .rst_drvname = "rst-g12a", }; static const struct audioclk_data sm1_audioclk_data = { @@ -1881,9 +1791,8 @@ static const struct audioclk_data sm1_audioclk_data = { .hws = sm1_audio_hw_clks, .num = ARRAY_SIZE(sm1_audio_hw_clks), }, - .reset_offset = AUDIO_SM1_SW_RESET0, - .reset_num = 39, .max_register = AUDIO_EARCRX_DMAC_CLK_CTRL, + .rst_drvname = "rst-sm1", }; static const struct of_device_id clkc_match_table[] = { diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 757c7a28c53d..1b08daf579b2 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -23,8 +23,6 @@ #include <dt-bindings/clock/axg-clkc.h> -static DEFINE_SPINLOCK(meson_clk_lock); - static struct clk_regmap axg_fixed_pll_dco = { .data = &(struct meson_clk_pll_data){ .en = { @@ -506,7 +504,6 @@ static struct clk_regmap axg_mpll0_div = { .shift = 0, .width = 1, }, - .lock = &meson_clk_lock, .flags = CLK_MESON_MPLL_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ @@ -557,7 +554,6 @@ static struct clk_regmap axg_mpll1_div = { .shift = 1, .width = 1, }, - .lock = &meson_clk_lock, .flags = CLK_MESON_MPLL_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ @@ -613,7 +609,6 @@ static struct clk_regmap axg_mpll2_div = { .shift = 2, .width = 1, }, - .lock = &meson_clk_lock, .flags = CLK_MESON_MPLL_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ @@ -664,7 +659,6 @@ static struct clk_regmap axg_mpll3_div = { .shift = 3, .width = 1, }, - .lock = &meson_clk_lock, .flags = CLK_MESON_MPLL_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c index 32bd2ed9d304..35fda31a19e2 100644 --- a/drivers/clk/meson/c3-pll.c +++ b/drivers/clk/meson/c3-pll.c @@ -361,6 +361,7 @@ static struct clk_regmap hifi_pll_dco = { .range = &c3_gp0_pll_mult_range, .init_regs = c3_hifi_init_regs, .init_count = ARRAY_SIZE(c3_hifi_init_regs), + .frac_max = 100000, }, .hw.init = &(struct clk_init_data) { .name = "hifi_pll_dco", diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index f639d56f0fd3..aa9abd06ae65 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -112,26 +112,15 @@ static int mpll_set_rate(struct clk_hw *hw, struct clk_regmap *clk = to_clk_regmap(hw); struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk); unsigned int sdm, n2; - unsigned long flags = 0; params_from_rate(rate, parent_rate, &sdm, &n2, mpll->flags); - if (mpll->lock) - spin_lock_irqsave(mpll->lock, flags); - else - __acquire(mpll->lock); - /* Set the fractional part */ meson_parm_write(clk->map, &mpll->sdm, sdm); /* Set the integer divider part */ meson_parm_write(clk->map, &mpll->n2, n2); - if (mpll->lock) - spin_unlock_irqrestore(mpll->lock, flags); - else - __release(mpll->lock); - return 0; } diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h index a991d568c43a..4ffd3aeef799 100644 --- a/drivers/clk/meson/clk-mpll.h +++ b/drivers/clk/meson/clk-mpll.h @@ -20,7 +20,6 @@ struct meson_clk_mpll_data { struct parm misc; const struct reg_sequence *init_regs; unsigned int init_count; - spinlock_t *lock; u8 flags; }; diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index bc570a2ff3a3..89f0f04a16ab 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -57,12 +57,13 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate, struct meson_clk_pll_data *pll) { u64 rate = (u64)parent_rate * m; + unsigned int frac_max = pll->frac_max ? pll->frac_max : + (1 << pll->frac.width); if (frac && MESON_PARM_APPLICABLE(&pll->frac)) { u64 frac_rate = (u64)parent_rate * frac; - rate += DIV_ROUND_UP_ULL(frac_rate, - (1 << pll->frac.width)); + rate += DIV_ROUND_UP_ULL(frac_rate, frac_max); } return DIV_ROUND_UP_ULL(rate, n); @@ -100,7 +101,8 @@ static unsigned int __pll_params_with_frac(unsigned long rate, unsigned int n, struct meson_clk_pll_data *pll) { - unsigned int frac_max = (1 << pll->frac.width); + unsigned int frac_max = pll->frac_max ? pll->frac_max : + (1 << pll->frac.width); u64 val = (u64)rate * n; /* Bail out if we are already over the requested rate */ diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h index 7b6b87274073..949157fb7bf5 100644 --- a/drivers/clk/meson/clk-pll.h +++ b/drivers/clk/meson/clk-pll.h @@ -43,6 +43,7 @@ struct meson_clk_pll_data { unsigned int init_count; const struct pll_params_table *table; const struct pll_mult_range *range; + unsigned int frac_max; u8 flags; }; diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 02dda57105b1..d3539fe9f7af 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -28,8 +28,6 @@ #include <dt-bindings/clock/g12a-clkc.h> -static DEFINE_SPINLOCK(meson_clk_lock); - static struct clk_regmap g12a_fixed_pll_dco = { .data = &(struct meson_clk_pll_data){ .en = { @@ -2225,7 +2223,6 @@ static struct clk_regmap g12a_mpll0_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = g12a_mpll0_init_regs, .init_count = ARRAY_SIZE(g12a_mpll0_init_regs), }, @@ -2279,7 +2276,6 @@ static struct clk_regmap g12a_mpll1_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = g12a_mpll1_init_regs, .init_count = ARRAY_SIZE(g12a_mpll1_init_regs), }, @@ -2333,7 +2329,6 @@ static struct clk_regmap g12a_mpll2_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = g12a_mpll2_init_regs, .init_count = ARRAY_SIZE(g12a_mpll2_init_regs), }, @@ -2387,7 +2382,6 @@ static struct clk_regmap g12a_mpll3_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = g12a_mpll3_init_regs, .init_count = ARRAY_SIZE(g12a_mpll3_init_regs), }, diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index f071faad1ebb..262c318edbd5 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -19,8 +19,6 @@ #include <dt-bindings/clock/gxbb-clkc.h> -static DEFINE_SPINLOCK(meson_clk_lock); - static const struct pll_params_table gxbb_gp0_pll_params_table[] = { PLL_PARAMS(32, 1), PLL_PARAMS(33, 1), @@ -731,7 +729,6 @@ static struct clk_regmap gxbb_mpll0_div = { .shift = 16, .width = 9, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll0_div", @@ -760,7 +757,6 @@ static struct clk_regmap gxl_mpll0_div = { .shift = 16, .width = 9, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll0_div", @@ -812,7 +808,6 @@ static struct clk_regmap gxbb_mpll1_div = { .shift = 16, .width = 9, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll1_div", @@ -855,7 +850,6 @@ static struct clk_regmap gxbb_mpll2_div = { .shift = 16, .width = 9, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll2_div", diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index b7417ac262d3..e4b474c5f86c 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -25,8 +25,6 @@ #include <dt-bindings/clock/meson8b-clkc.h> #include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h> -static DEFINE_SPINLOCK(meson_clk_lock); - struct meson8b_clk_reset { struct reset_controller_dev reset; struct regmap *regmap; @@ -492,7 +490,6 @@ static struct clk_regmap meson8b_mpll0_div = { .shift = 25, .width = 1, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll0_div", @@ -537,7 +534,6 @@ static struct clk_regmap meson8b_mpll1_div = { .shift = 16, .width = 9, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll1_div", @@ -582,7 +578,6 @@ static struct clk_regmap meson8b_mpll2_div = { .shift = 16, .width = 9, }, - .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ .name = "mpll2_div", @@ -3702,7 +3697,6 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, container_of(rcdev, struct meson8b_clk_reset, reset); const struct meson8b_clk_reset_line *reset; unsigned int value = 0; - unsigned long flags; if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) return -EINVAL; @@ -3712,13 +3706,9 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, if (assert != reset->active_low) value = BIT(reset->bit_idx); - spin_lock_irqsave(&meson_clk_lock, flags); - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, BIT(reset->bit_idx), value); - spin_unlock_irqrestore(&meson_clk_lock, flags); - return 0; } diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c index b0258933fb9d..d8e621e79428 100644 --- a/drivers/clk/meson/s4-pll.c +++ b/drivers/clk/meson/s4-pll.c @@ -17,8 +17,6 @@ #include "meson-clkc-utils.h" #include <dt-bindings/clock/amlogic,s4-pll-clkc.h> -static DEFINE_SPINLOCK(meson_clk_lock); - /* * These clock are a fixed value (fixed_pll is 2GHz) that is initialized by ROMcode. * The chip was changed fixed pll for security reasons. Fixed PLL registers are not writable @@ -329,7 +327,6 @@ static struct clk_regmap s4_gp0_pll = { * Internal hifi pll emulation configuration parameters */ static const struct reg_sequence s4_hifi_init_regs[] = { - { .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x00010e56 }, { .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00000000 }, { .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 }, { .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 }, @@ -354,6 +351,11 @@ static struct clk_regmap s4_hifi_pll_dco = { .shift = 10, .width = 5, }, + .frac = { + .reg_off = ANACTRL_HIFIPLL_CTRL1, + .shift = 0, + .width = 17, + }, .l = { .reg_off = ANACTRL_HIFIPLL_CTRL0, .shift = 31, @@ -367,6 +369,7 @@ static struct clk_regmap s4_hifi_pll_dco = { .range = &s4_gp0_pll_mult_range, .init_regs = s4_hifi_init_regs, .init_count = ARRAY_SIZE(s4_hifi_init_regs), + .frac_max = 100000, .flags = CLK_MESON_PLL_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ @@ -542,7 +545,6 @@ static struct clk_regmap s4_mpll0_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = s4_mpll0_init_regs, .init_count = ARRAY_SIZE(s4_mpll0_init_regs), }, @@ -596,7 +598,6 @@ static struct clk_regmap s4_mpll1_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = s4_mpll1_init_regs, .init_count = ARRAY_SIZE(s4_mpll1_init_regs), }, @@ -650,7 +651,6 @@ static struct clk_regmap s4_mpll2_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = s4_mpll2_init_regs, .init_count = ARRAY_SIZE(s4_mpll2_init_regs), }, @@ -704,7 +704,6 @@ static struct clk_regmap s4_mpll3_div = { .shift = 29, .width = 1, }, - .lock = &meson_clk_lock, .init_regs = s4_mpll3_init_regs, .init_count = ARRAY_SIZE(s4_mpll3_init_regs), }, diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile index 441bf83080a1..062cd87fa8dd 100644 --- a/drivers/clk/mmp/Makefile +++ b/drivers/clk/mmp/Makefile @@ -11,4 +11,4 @@ obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o -obj-y += clk-of-pxa1928.o +obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o clk-pxa1908-apbc.o clk-pxa1908-apbcp.o clk-pxa1908-apmu.o clk-pxa1908-mpmu.o diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c index 1b90867b60c4..6556f6ada2e8 100644 --- a/drivers/clk/mmp/clk-frac.c +++ b/drivers/clk/mmp/clk-frac.c @@ -26,14 +26,15 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate, { struct mmp_clk_factor *factor = to_clk_factor(hw); u64 rate = 0, prev_rate; + struct u32_fract *d; int i; for (i = 0; i < factor->ftbl_cnt; i++) { - prev_rate = rate; - rate = *prate; - rate *= factor->ftbl[i].den; - do_div(rate, factor->ftbl[i].num * factor->masks->factor); + d = &factor->ftbl[i]; + prev_rate = rate; + rate = (u64)(*prate) * d->denominator; + do_div(rate, d->numerator * factor->masks->factor); if (rate > drate) break; } @@ -52,23 +53,22 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, { struct mmp_clk_factor *factor = to_clk_factor(hw); struct mmp_clk_factor_masks *masks = factor->masks; - unsigned int val, num, den; + struct u32_fract d; + unsigned int val; u64 rate; val = readl_relaxed(factor->base); /* calculate numerator */ - num = (val >> masks->num_shift) & masks->num_mask; + d.numerator = (val >> masks->num_shift) & masks->num_mask; /* calculate denominator */ - den = (val >> masks->den_shift) & masks->den_mask; - - if (!den) + d.denominator = (val >> masks->den_shift) & masks->den_mask; + if (!d.denominator) return 0; - rate = parent_rate; - rate *= den; - do_div(rate, num * factor->masks->factor); + rate = (u64)parent_rate * d.denominator; + do_div(rate, d.numerator * factor->masks->factor); return rate; } @@ -82,18 +82,18 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, int i; unsigned long val; unsigned long flags = 0; + struct u32_fract *d; u64 rate = 0; for (i = 0; i < factor->ftbl_cnt; i++) { - rate = prate; - rate *= factor->ftbl[i].den; - do_div(rate, factor->ftbl[i].num * factor->masks->factor); + d = &factor->ftbl[i]; + rate = (u64)prate * d->denominator; + do_div(rate, d->numerator * factor->masks->factor); if (rate > drate) break; } - if (i > 0) - i--; + d = i ? &factor->ftbl[i - 1] : &factor->ftbl[0]; if (factor->lock) spin_lock_irqsave(factor->lock, flags); @@ -101,10 +101,10 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, val = readl_relaxed(factor->base); val &= ~(masks->num_mask << masks->num_shift); - val |= (factor->ftbl[i].num & masks->num_mask) << masks->num_shift; + val |= (d->numerator & masks->num_mask) << masks->num_shift; val &= ~(masks->den_mask << masks->den_shift); - val |= (factor->ftbl[i].den & masks->den_mask) << masks->den_shift; + val |= (d->denominator & masks->den_mask) << masks->den_shift; writel_relaxed(val, factor->base); @@ -118,7 +118,8 @@ static int clk_factor_init(struct clk_hw *hw) { struct mmp_clk_factor *factor = to_clk_factor(hw); struct mmp_clk_factor_masks *masks = factor->masks; - u32 val, num, den; + struct u32_fract d; + u32 val; int i; unsigned long flags = 0; @@ -128,23 +129,22 @@ static int clk_factor_init(struct clk_hw *hw) val = readl(factor->base); /* calculate numerator */ - num = (val >> masks->num_shift) & masks->num_mask; + d.numerator = (val >> masks->num_shift) & masks->num_mask; /* calculate denominator */ - den = (val >> masks->den_shift) & masks->den_mask; + d.denominator = (val >> masks->den_shift) & masks->den_mask; for (i = 0; i < factor->ftbl_cnt; i++) - if (den == factor->ftbl[i].den && num == factor->ftbl[i].num) + if (d.denominator == factor->ftbl[i].denominator && + d.numerator == factor->ftbl[i].numerator) break; if (i >= factor->ftbl_cnt) { val &= ~(masks->num_mask << masks->num_shift); - val |= (factor->ftbl[0].num & masks->num_mask) << - masks->num_shift; + val |= (factor->ftbl[0].numerator & masks->num_mask) << masks->num_shift; val &= ~(masks->den_mask << masks->den_shift); - val |= (factor->ftbl[0].den & masks->den_mask) << - masks->den_shift; + val |= (factor->ftbl[0].denominator & masks->den_mask) << masks->den_shift; } if (!(val & masks->enable_mask) || i >= factor->ftbl_cnt) { @@ -168,8 +168,7 @@ static const struct clk_ops clk_factor_ops = { struct clk *mmp_clk_register_factor(const char *name, const char *parent_name, unsigned long flags, void __iomem *base, struct mmp_clk_factor_masks *masks, - struct mmp_clk_factor_tbl *ftbl, - unsigned int ftbl_cnt, spinlock_t *lock) + struct u32_fract *ftbl, unsigned int ftbl_cnt, spinlock_t *lock) { struct mmp_clk_factor *factor; struct clk_init_data init; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index eaad36ee323d..a4f15cee630e 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -143,9 +143,9 @@ static struct mmp_clk_factor_masks uart_factor_masks = { .den_shift = 0, }; -static struct mmp_clk_factor_tbl uart_factor_tbl[] = { - {.num = 8125, .den = 1536}, /*14.745MHZ */ - {.num = 3521, .den = 689}, /*19.23MHZ */ +static struct u32_fract uart_factor_tbl[] = { + { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */ + { .numerator = 3521, .denominator = 689 }, /* 19.23MHZ */ }; static struct mmp_clk_factor_masks i2s_factor_masks = { @@ -157,16 +157,16 @@ static struct mmp_clk_factor_masks i2s_factor_masks = { .enable_mask = 0xd0000000, }; -static struct mmp_clk_factor_tbl i2s_factor_tbl[] = { - {.num = 24868, .den = 511}, /* 2.0480 MHz */ - {.num = 28003, .den = 793}, /* 2.8224 MHz */ - {.num = 24941, .den = 1025}, /* 4.0960 MHz */ - {.num = 28003, .den = 1586}, /* 5.6448 MHz */ - {.num = 31158, .den = 2561}, /* 8.1920 MHz */ - {.num = 16288, .den = 1845}, /* 11.2896 MHz */ - {.num = 20772, .den = 2561}, /* 12.2880 MHz */ - {.num = 8144, .den = 1845}, /* 22.5792 MHz */ - {.num = 10386, .den = 2561}, /* 24.5760 MHz */ +static struct u32_fract i2s_factor_tbl[] = { + { .numerator = 24868, .denominator = 511 }, /* 2.0480 MHz */ + { .numerator = 28003, .denominator = 793 }, /* 2.8224 MHz */ + { .numerator = 24941, .denominator = 1025 }, /* 4.0960 MHz */ + { .numerator = 28003, .denominator = 1586 }, /* 5.6448 MHz */ + { .numerator = 31158, .denominator = 2561 }, /* 8.1920 MHz */ + { .numerator = 16288, .denominator = 1845 }, /* 11.2896 MHz */ + { .numerator = 20772, .denominator = 2561 }, /* 12.2880 MHz */ + { .numerator = 8144, .denominator = 1845 }, /* 22.5792 MHz */ + { .numerator = 10386, .denominator = 2561 }, /* 24.5760 MHz */ }; static DEFINE_SPINLOCK(acgr_lock); diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c index c5a7ba1deaa3..5f250427e60d 100644 --- a/drivers/clk/mmp/clk-of-pxa168.c +++ b/drivers/clk/mmp/clk-of-pxa168.c @@ -106,8 +106,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = { .den_shift = 0, }; -static struct mmp_clk_factor_tbl uart_factor_tbl[] = { - {.num = 8125, .den = 1536}, /*14.745MHZ */ +static struct u32_fract uart_factor_tbl[] = { + { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */ }; static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit) diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c index 9def4b5f10e9..ebb6e278eda3 100644 --- a/drivers/clk/mmp/clk-of-pxa1928.c +++ b/drivers/clk/mmp/clk-of-pxa1928.c @@ -61,9 +61,9 @@ static struct mmp_clk_factor_masks uart_factor_masks = { .den_shift = 0, }; -static struct mmp_clk_factor_tbl uart_factor_tbl[] = { - {.num = 832, .den = 234}, /*58.5MHZ */ - {.num = 1, .den = 1}, /*26MHZ */ +static struct u32_fract uart_factor_tbl[] = { + { .numerator = 832, .denominator = 234 }, /* 58.5MHZ */ + { .numerator = 1, .denominator = 1 }, /* 26MHZ */ }; static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit) diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c index 7a38c424782e..fe65e7bdb411 100644 --- a/drivers/clk/mmp/clk-of-pxa910.c +++ b/drivers/clk/mmp/clk-of-pxa910.c @@ -86,8 +86,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = { .den_shift = 0, }; -static struct mmp_clk_factor_tbl uart_factor_tbl[] = { - {.num = 8125, .den = 1536}, /*14.745MHZ */ +static struct u32_fract uart_factor_tbl[] = { + { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */ }; static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit) diff --git a/drivers/clk/mmp/clk-pxa1908-apbc.c b/drivers/clk/mmp/clk-pxa1908-apbc.c new file mode 100644 index 000000000000..b93d08466198 --- /dev/null +++ b/drivers/clk/mmp/clk-pxa1908-apbc.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <dt-bindings/clock/marvell,pxa1908.h> + +#include "clk.h" + +#define APBC_UART0 0x0 +#define APBC_UART1 0x4 +#define APBC_GPIO 0x8 +#define APBC_PWM0 0xc +#define APBC_PWM1 0x10 +#define APBC_PWM2 0x14 +#define APBC_PWM3 0x18 +#define APBC_SSP0 0x1c +#define APBC_SSP1 0x20 +#define APBC_IPC_RST 0x24 +#define APBC_RTC 0x28 +#define APBC_TWSI0 0x2c +#define APBC_KPC 0x30 +#define APBC_SWJTAG 0x40 +#define APBC_SSP2 0x4c +#define APBC_TWSI1 0x60 +#define APBC_THERMAL 0x6c +#define APBC_TWSI3 0x70 + +#define APBC_NR_CLKS 19 + +struct pxa1908_clk_unit { + struct mmp_clk_unit unit; + void __iomem *base; +}; + +static DEFINE_SPINLOCK(pwm0_lock); +static DEFINE_SPINLOCK(pwm2_lock); + +static DEFINE_SPINLOCK(uart0_lock); +static DEFINE_SPINLOCK(uart1_lock); + +static const char * const uart_parent_names[] = {"pll1_117", "uart_pll"}; +static const char * const ssp_parent_names[] = {"pll1_d16", "pll1_d48", "pll1_d24", "pll1_d12"}; + +static struct mmp_param_gate_clk apbc_gate_clks[] = { + {PXA1908_CLK_TWSI0, "twsi0_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_TWSI1, "twsi1_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_TWSI3, "twsi3_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 3, 0, MMP_CLK_GATE_NEED_DELAY, NULL}, + {PXA1908_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0, MMP_CLK_GATE_NEED_DELAY, NULL}, + {PXA1908_CLK_PWM0, "pwm0_clk", "pwm01_apb_share", CLK_SET_RATE_PARENT, APBC_PWM0, 0x2, 2, 0, 0, &pwm0_lock}, + {PXA1908_CLK_PWM1, "pwm1_clk", "pwm01_apb_share", CLK_SET_RATE_PARENT, APBC_PWM1, 0x6, 2, 0, 0, NULL}, + {PXA1908_CLK_PWM2, "pwm2_clk", "pwm23_apb_share", CLK_SET_RATE_PARENT, APBC_PWM2, 0x2, 2, 0, 0, NULL}, + {PXA1908_CLK_PWM3, "pwm3_clk", "pwm23_apb_share", CLK_SET_RATE_PARENT, APBC_PWM3, 0x6, 2, 0, 0, NULL}, + {PXA1908_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 3, 0, 0, &uart0_lock}, + {PXA1908_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 3, 0, 0, &uart1_lock}, + {PXA1908_CLK_THERMAL, "thermal_clk", NULL, 0, APBC_THERMAL, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_IPC_RST, "ipc_clk", NULL, 0, APBC_IPC_RST, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_SSP0, "ssp0_clk", "ssp0_mux", 0, APBC_SSP0, 0x7, 3, 0, 0, NULL}, + {PXA1908_CLK_SSP2, "ssp2_clk", "ssp2_mux", 0, APBC_SSP2, 0x7, 3, 0, 0, NULL}, +}; + +static struct mmp_param_mux_clk apbc_mux_clks[] = { + {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock}, + {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock}, + {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), 0, APBC_SSP0, 4, 3, 0, NULL}, + {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), 0, APBC_SSP2, 4, 3, 0, NULL}, +}; + +static void pxa1908_apb_periph_clk_init(struct pxa1908_clk_unit *pxa_unit) +{ + struct mmp_clk_unit *unit = &pxa_unit->unit; + struct clk *clk; + + mmp_clk_register_gate(NULL, "pwm01_apb_share", "pll1_d48", + CLK_SET_RATE_PARENT, + pxa_unit->base + APBC_PWM0, + 0x5, 1, 0, 0, &pwm0_lock); + mmp_clk_register_gate(NULL, "pwm23_apb_share", "pll1_d48", + CLK_SET_RATE_PARENT, + pxa_unit->base + APBC_PWM2, + 0x5, 1, 0, 0, &pwm2_lock); + clk = mmp_clk_register_apbc("swjtag", NULL, + pxa_unit->base + APBC_SWJTAG, 10, 0, NULL); + mmp_clk_add(unit, PXA1908_CLK_SWJTAG, clk); + mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->base, + ARRAY_SIZE(apbc_mux_clks)); + mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->base, + ARRAY_SIZE(apbc_gate_clks)); +} + +static int pxa1908_apbc_probe(struct platform_device *pdev) +{ + struct pxa1908_clk_unit *pxa_unit; + + pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL); + if (IS_ERR(pxa_unit)) + return PTR_ERR(pxa_unit); + + pxa_unit->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pxa_unit->base)) + return PTR_ERR(pxa_unit->base); + + mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APBC_NR_CLKS); + + pxa1908_apb_periph_clk_init(pxa_unit); + + return 0; +} + +static const struct of_device_id pxa1908_apbc_match_table[] = { + { .compatible = "marvell,pxa1908-apbc" }, + { } +}; +MODULE_DEVICE_TABLE(of, pxa1908_apbc_match_table); + +static struct platform_driver pxa1908_apbc_driver = { + .probe = pxa1908_apbc_probe, + .driver = { + .name = "pxa1908-apbc", + .of_match_table = pxa1908_apbc_match_table + } +}; +module_platform_driver(pxa1908_apbc_driver); + +MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>"); +MODULE_DESCRIPTION("Marvell PXA1908 APBC Clock Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mmp/clk-pxa1908-apbcp.c b/drivers/clk/mmp/clk-pxa1908-apbcp.c new file mode 100644 index 000000000000..08f3845cbb1b --- /dev/null +++ b/drivers/clk/mmp/clk-pxa1908-apbcp.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <dt-bindings/clock/marvell,pxa1908.h> + +#include "clk.h" + +#define APBCP_UART2 0x1c +#define APBCP_TWSI2 0x28 +#define APBCP_AICER 0x38 + +#define APBCP_NR_CLKS 4 + +struct pxa1908_clk_unit { + struct mmp_clk_unit unit; + void __iomem *base; +}; + +static DEFINE_SPINLOCK(uart2_lock); + +static const char * const uart_parent_names[] = {"pll1_117", "uart_pll"}; + +static struct mmp_param_gate_clk apbcp_gate_clks[] = { + {PXA1908_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock}, + {PXA1908_CLK_TWSI2, "twsi2_clk", "pll1_32", CLK_SET_RATE_PARENT, APBCP_TWSI2, 0x7, 0x3, 0x0, 0, NULL}, + {PXA1908_CLK_AICER, "ripc_clk", NULL, 0, APBCP_AICER, 0x7, 0x2, 0x0, 0, NULL}, +}; + +static struct mmp_param_mux_clk apbcp_mux_clks[] = { + {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock}, +}; + +static void pxa1908_apb_p_periph_clk_init(struct pxa1908_clk_unit *pxa_unit) +{ + struct mmp_clk_unit *unit = &pxa_unit->unit; + + mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->base, + ARRAY_SIZE(apbcp_mux_clks)); + mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->base, + ARRAY_SIZE(apbcp_gate_clks)); +} + +static int pxa1908_apbcp_probe(struct platform_device *pdev) +{ + struct pxa1908_clk_unit *pxa_unit; + + pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL); + if (IS_ERR(pxa_unit)) + return PTR_ERR(pxa_unit); + + pxa_unit->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pxa_unit->base)) + return PTR_ERR(pxa_unit->base); + + mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APBCP_NR_CLKS); + + pxa1908_apb_p_periph_clk_init(pxa_unit); + + return 0; +} + +static const struct of_device_id pxa1908_apbcp_match_table[] = { + { .compatible = "marvell,pxa1908-apbcp" }, + { } +}; +MODULE_DEVICE_TABLE(of, pxa1908_apbcp_match_table); + +static struct platform_driver pxa1908_apbcp_driver = { + .probe = pxa1908_apbcp_probe, + .driver = { + .name = "pxa1908-apbcp", + .of_match_table = pxa1908_apbcp_match_table + } +}; +module_platform_driver(pxa1908_apbcp_driver); + +MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>"); +MODULE_DESCRIPTION("Marvell PXA1908 APBCP Clock Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c new file mode 100644 index 000000000000..8cfb1258202f --- /dev/null +++ b/drivers/clk/mmp/clk-pxa1908-apmu.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <dt-bindings/clock/marvell,pxa1908.h> + +#include "clk.h" + +#define APMU_CLK_GATE_CTRL 0x40 +#define APMU_CCIC1 0x24 +#define APMU_ISP 0x38 +#define APMU_DSI1 0x44 +#define APMU_DISP1 0x4c +#define APMU_CCIC0 0x50 +#define APMU_SDH0 0x54 +#define APMU_SDH1 0x58 +#define APMU_USB 0x5c +#define APMU_NF 0x60 +#define APMU_VPU 0xa4 +#define APMU_GC 0xcc +#define APMU_SDH2 0xe0 +#define APMU_GC2D 0xf4 +#define APMU_TRACE 0x108 +#define APMU_DVC_DFC_DEBUG 0x140 + +#define APMU_NR_CLKS 17 + +struct pxa1908_clk_unit { + struct mmp_clk_unit unit; + void __iomem *base; +}; + +static DEFINE_SPINLOCK(pll1_lock); +static struct mmp_param_general_gate_clk pll1_gate_clks[] = { + {PXA1908_CLK_PLL1_D2_GATE, "pll1_d2_gate", "pll1_d2", 0, APMU_CLK_GATE_CTRL, 29, 0, &pll1_lock}, + {PXA1908_CLK_PLL1_416_GATE, "pll1_416_gate", "pll1_416", 0, APMU_CLK_GATE_CTRL, 27, 0, &pll1_lock}, + {PXA1908_CLK_PLL1_624_GATE, "pll1_624_gate", "pll1_624", 0, APMU_CLK_GATE_CTRL, 26, 0, &pll1_lock}, + {PXA1908_CLK_PLL1_832_GATE, "pll1_832_gate", "pll1_832", 0, APMU_CLK_GATE_CTRL, 30, 0, &pll1_lock}, + {PXA1908_CLK_PLL1_1248_GATE, "pll1_1248_gate", "pll1_1248", 0, APMU_CLK_GATE_CTRL, 28, 0, &pll1_lock}, +}; + +static DEFINE_SPINLOCK(sdh0_lock); +static DEFINE_SPINLOCK(sdh1_lock); +static DEFINE_SPINLOCK(sdh2_lock); + +static const char * const sdh_parent_names[] = {"pll1_416", "pll1_624"}; + +static struct mmp_clk_mix_config sdh_mix_config = { + .reg_info = DEFINE_MIX_REG_INFO(3, 8, 2, 6, 11), +}; + +static struct mmp_param_gate_clk apmu_gate_clks[] = { + {PXA1908_CLK_USB, "usb_clk", NULL, 0, APMU_USB, 0x9, 0x9, 0x1, 0, NULL}, + {PXA1908_CLK_SDH0, "sdh0_clk", "sdh0_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH0, 0x12, 0x12, 0x0, 0, &sdh0_lock}, + {PXA1908_CLK_SDH1, "sdh1_clk", "sdh1_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH1, 0x12, 0x12, 0x0, 0, &sdh1_lock}, + {PXA1908_CLK_SDH2, "sdh2_clk", "sdh2_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH2, 0x12, 0x12, 0x0, 0, &sdh2_lock} +}; + +static void pxa1908_axi_periph_clk_init(struct pxa1908_clk_unit *pxa_unit) +{ + struct mmp_clk_unit *unit = &pxa_unit->unit; + + mmp_register_general_gate_clks(unit, pll1_gate_clks, + pxa_unit->base, ARRAY_SIZE(pll1_gate_clks)); + + sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH0; + mmp_clk_register_mix(NULL, "sdh0_mix_clk", sdh_parent_names, + ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, + &sdh_mix_config, &sdh0_lock); + sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH1; + mmp_clk_register_mix(NULL, "sdh1_mix_clk", sdh_parent_names, + ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, + &sdh_mix_config, &sdh1_lock); + sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH2; + mmp_clk_register_mix(NULL, "sdh2_mix_clk", sdh_parent_names, + ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, + &sdh_mix_config, &sdh2_lock); + + mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->base, + ARRAY_SIZE(apmu_gate_clks)); +} + +static int pxa1908_apmu_probe(struct platform_device *pdev) +{ + struct pxa1908_clk_unit *pxa_unit; + + pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL); + if (IS_ERR(pxa_unit)) + return PTR_ERR(pxa_unit); + + pxa_unit->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pxa_unit->base)) + return PTR_ERR(pxa_unit->base); + + mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APMU_NR_CLKS); + + pxa1908_axi_periph_clk_init(pxa_unit); + + return 0; +} + +static const struct of_device_id pxa1908_apmu_match_table[] = { + { .compatible = "marvell,pxa1908-apmu" }, + { } +}; +MODULE_DEVICE_TABLE(of, pxa1908_apmu_match_table); + +static struct platform_driver pxa1908_apmu_driver = { + .probe = pxa1908_apmu_probe, + .driver = { + .name = "pxa1908-apmu", + .of_match_table = pxa1908_apmu_match_table + } +}; +module_platform_driver(pxa1908_apmu_driver); + +MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>"); +MODULE_DESCRIPTION("Marvell PXA1908 APMU Clock Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mmp/clk-pxa1908-mpmu.c b/drivers/clk/mmp/clk-pxa1908-mpmu.c new file mode 100644 index 000000000000..e3337bacaadd --- /dev/null +++ b/drivers/clk/mmp/clk-pxa1908-mpmu.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/bits.h> +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/units.h> + +#include <dt-bindings/clock/marvell,pxa1908.h> + +#include "clk.h" + +#define MPMU_UART_PLL 0x14 + +#define MPMU_NR_CLKS 39 + +struct pxa1908_clk_unit { + struct mmp_clk_unit unit; + void __iomem *base; +}; + +static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = { + {PXA1908_CLK_CLK32, "clk32", NULL, 0, 32768}, + {PXA1908_CLK_VCTCXO, "vctcxo", NULL, 0, 26 * HZ_PER_MHZ}, + {PXA1908_CLK_PLL1_624, "pll1_624", NULL, 0, 624 * HZ_PER_MHZ}, + {PXA1908_CLK_PLL1_416, "pll1_416", NULL, 0, 416 * HZ_PER_MHZ}, + {PXA1908_CLK_PLL1_499, "pll1_499", NULL, 0, 499 * HZ_PER_MHZ}, + {PXA1908_CLK_PLL1_832, "pll1_832", NULL, 0, 832 * HZ_PER_MHZ}, + {PXA1908_CLK_PLL1_1248, "pll1_1248", NULL, 0, 1248 * HZ_PER_MHZ}, +}; + +static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { + {PXA1908_CLK_PLL1_D2, "pll1_d2", "pll1_624", 1, 2, 0}, + {PXA1908_CLK_PLL1_D4, "pll1_d4", "pll1_d2", 1, 2, 0}, + {PXA1908_CLK_PLL1_D6, "pll1_d6", "pll1_d2", 1, 3, 0}, + {PXA1908_CLK_PLL1_D8, "pll1_d8", "pll1_d4", 1, 2, 0}, + {PXA1908_CLK_PLL1_D12, "pll1_d12", "pll1_d6", 1, 2, 0}, + {PXA1908_CLK_PLL1_D13, "pll1_d13", "pll1_624", 1, 13, 0}, + {PXA1908_CLK_PLL1_D16, "pll1_d16", "pll1_d8", 1, 2, 0}, + {PXA1908_CLK_PLL1_D24, "pll1_d24", "pll1_d12", 1, 2, 0}, + {PXA1908_CLK_PLL1_D48, "pll1_d48", "pll1_d24", 1, 2, 0}, + {PXA1908_CLK_PLL1_D96, "pll1_d96", "pll1_d48", 1, 2, 0}, + {PXA1908_CLK_PLL1_32, "pll1_32", "pll1_d13", 2, 3, 0}, + {PXA1908_CLK_PLL1_208, "pll1_208", "pll1_d2", 2, 3, 0}, + {PXA1908_CLK_PLL1_117, "pll1_117", "pll1_624", 3, 16, 0}, +}; + +static struct u32_fract uart_factor_tbl[] = { + {.numerator = 8125, .denominator = 1536}, /* 14.745MHz */ +}; + +static struct mmp_clk_factor_masks uart_factor_masks = { + .factor = 2, + .num_mask = GENMASK(12, 0), + .den_mask = GENMASK(12, 0), + .num_shift = 16, + .den_shift = 0, +}; + +static void pxa1908_pll_init(struct pxa1908_clk_unit *pxa_unit) +{ + struct mmp_clk_unit *unit = &pxa_unit->unit; + + mmp_register_fixed_rate_clks(unit, fixed_rate_clks, + ARRAY_SIZE(fixed_rate_clks)); + + mmp_register_fixed_factor_clks(unit, fixed_factor_clks, + ARRAY_SIZE(fixed_factor_clks)); + + mmp_clk_register_factor("uart_pll", "pll1_d4", + CLK_SET_RATE_PARENT, + pxa_unit->base + MPMU_UART_PLL, + &uart_factor_masks, uart_factor_tbl, + ARRAY_SIZE(uart_factor_tbl), NULL); +} + +static int pxa1908_mpmu_probe(struct platform_device *pdev) +{ + struct pxa1908_clk_unit *pxa_unit; + + pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL); + if (IS_ERR(pxa_unit)) + return PTR_ERR(pxa_unit); + + pxa_unit->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pxa_unit->base)) + return PTR_ERR(pxa_unit->base); + + mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, MPMU_NR_CLKS); + + pxa1908_pll_init(pxa_unit); + + return 0; +} + +static const struct of_device_id pxa1908_mpmu_match_table[] = { + { .compatible = "marvell,pxa1908-mpmu" }, + { } +}; +MODULE_DEVICE_TABLE(of, pxa1908_mpmu_match_table); + +static struct platform_driver pxa1908_mpmu_driver = { + .probe = pxa1908_mpmu_probe, + .driver = { + .name = "pxa1908-mpmu", + .of_match_table = pxa1908_mpmu_match_table + } +}; +module_platform_driver(pxa1908_mpmu_driver); + +MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>"); +MODULE_DESCRIPTION("Marvell PXA1908 MPMU Clock Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h index 55ac05379781..c83cec169ddc 100644 --- a/drivers/clk/mmp/clk.h +++ b/drivers/clk/mmp/clk.h @@ -3,6 +3,7 @@ #define __MACH_MMP_CLK_H #include <linux/clk-provider.h> +#include <linux/math.h> #include <linux/pm_domain.h> #include <linux/clkdev.h> @@ -20,16 +21,11 @@ struct mmp_clk_factor_masks { unsigned int enable_mask; }; -struct mmp_clk_factor_tbl { - unsigned int num; - unsigned int den; -}; - struct mmp_clk_factor { struct clk_hw hw; void __iomem *base; struct mmp_clk_factor_masks *masks; - struct mmp_clk_factor_tbl *ftbl; + struct u32_fract *ftbl; unsigned int ftbl_cnt; spinlock_t *lock; }; @@ -37,7 +33,7 @@ struct mmp_clk_factor { extern struct clk *mmp_clk_register_factor(const char *name, const char *parent_name, unsigned long flags, void __iomem *base, struct mmp_clk_factor_masks *masks, - struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt, + struct u32_fract *ftbl, unsigned int ftbl_cnt, spinlock_t *lock); /* Clock type "mix" */ diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index a3e2a09e2105..ef89d686cbc4 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -213,6 +213,14 @@ config IPQ_GCC_5332 Say Y if you want to use peripheral devices such as UART, SPI, i2c, USB, SD/eMMC, etc. +config IPQ_GCC_5424 + tristate "IPQ5424 Global Clock Controller" + depends on ARM64 || COMPILE_TEST + help + Support for the global clock controller on ipq5424 devices. + Say Y if you want to use peripheral devices such as UART, SPI, + i2c, USB, SD/eMMC, etc. + config IPQ_GCC_6018 tristate "IPQ6018 Global Clock Controller" help @@ -467,6 +475,26 @@ config QCS_GCC_404 Say Y if you want to use multimedia devices or peripheral devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc. +config SA_CAMCC_8775P + tristate "SA8775P Camera Clock Controller" + depends on ARM64 || COMPILE_TEST + select SA_GCC_8775P + help + Support for the camera clock controller on Qualcomm Technologies, Inc + SA8775P devices. + Say Y if you want to support camera devices and functionality such as + capturing pictures. + +config QCS_GCC_8300 + tristate "QCS8300 Global Clock Controller" + depends on ARM64 || COMPILE_TEST + select QCOM_GDSC + help + Support for the global clock controller on Qualcomm Technologies, Inc + QCS8300 devices. + Say Y if you want to use peripheral devices such as UART, + SPI, I2C, USB, SD/UFS, PCIe etc. + config SC_CAMCC_7180 tristate "SC7180 Camera Clock Controller" depends on ARM64 || COMPILE_TEST @@ -497,6 +525,16 @@ config SC_CAMCC_8280XP Say Y if you want to support camera devices and functionality such as capturing pictures. +config SA_DISPCC_8775P + tristate "SA8775P Display Clock Controller" + depends on ARM64 || COMPILE_TEST + select SA_GCC_8775P + help + Support for the two display clock controllers on Qualcomm + Technologies, Inc. SA8775P devices. + Say Y if you want to support display devices and functionality such as + splash screen. + config SC_DISPCC_7180 tristate "SC7180 Display Clock Controller" depends on ARM64 || COMPILE_TEST @@ -545,6 +583,24 @@ config SA_GPUCC_8775P Say Y if you want to support graphics controller devices and functionality such as 3D graphics. +config SAR_GCC_2130P + tristate "SAR2130P Global Clock Controller" + select QCOM_GDSC + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on SAR2130P devices. + Say Y if you want to use peripheral devices such as UART, SPI, + I2C, USB, SDCC, etc. + +config SAR_GPUCC_2130P + tristate "SAR2130P Graphics clock controller" + select QCOM_GDSC + select SAR_GCC_2130P + help + Support for the graphics clock controller on SAR2130P devices. + Say Y if you want to support graphics controller devices and + functionality such as 3D graphics. + config SC_GCC_7180 tristate "SC7180 Global Clock Controller" select QCOM_GDSC @@ -857,7 +913,7 @@ config SM_CAMCC_8450 depends on ARM64 || COMPILE_TEST select SM_GCC_8450 help - Support for the camera clock controller on SM8450 devices. + Support for the camera clock controller on SM8450 or SM8475 devices. Say Y if you want to support camera devices and camera functionality. config SM_CAMCC_8550 @@ -952,17 +1008,17 @@ config SM_DISPCC_8450 depends on SM_GCC_8450 help Support for the display clock controller on Qualcomm Technologies, Inc - SM8450 devices. + SM8450 or SM8475 devices. Say Y if you want to support display devices and functionality such as splash screen. config SM_DISPCC_8550 tristate "SM8550 Display Clock Controller" depends on ARM64 || COMPILE_TEST - depends on SM_GCC_8550 || SM_GCC_8650 + depends on SM_GCC_8550 || SM_GCC_8650 || SAR_GCC_2130P help Support for the display clock controller on Qualcomm Technologies, Inc - SM8550 or SM8650 devices. + SAR2130P, SM8550 or SM8650 devices. Say Y if you want to support display devices and functionality such as splash screen. @@ -987,6 +1043,7 @@ config SM_GCC_6115 config SM_GCC_6125 tristate "SM6125 Global Clock Controller" depends on ARM64 || COMPILE_TEST + select QCOM_GDSC help Support for the global clock controller on SM6125 devices. Say Y if you want to use peripheral devices such as UART, @@ -1050,7 +1107,8 @@ config SM_GCC_8450 depends on ARM64 || COMPILE_TEST select QCOM_GDSC help - Support for the global clock controller on SM8450 devices. + Support for the global clock controller on SM8450 or SM8475 + devices. Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, SD/UFS, PCIe etc. @@ -1149,7 +1207,8 @@ config SM_GPUCC_8450 depends on ARM64 || COMPILE_TEST select SM_GCC_8450 help - Support for the graphics clock controller on SM8450 devices. + Support for the graphics clock controller on SM8450 or SM8475 + devices. Say Y if you want to support graphics controller devices and functionality such as 3D graphics. @@ -1187,6 +1246,17 @@ config SM_TCSRCC_8650 Support for the TCSR clock controller on SM8650 devices. Say Y if you want to use peripheral devices such as SD/UFS. +config SA_VIDEOCC_8775P + tristate "SA8775P Video Clock Controller" + depends on ARM64 || COMPILE_TEST + select SA_GCC_8775P + select QCOM_GDSC + help + Support for the video clock controller on Qualcomm Technologies, Inc. + SA8775P devices. + Say Y if you want to support video devices and functionality such as + video encode/decode. + config SM_VIDEOCC_7150 tristate "SM7150 Video Clock Controller" depends on ARM64 || COMPILE_TEST @@ -1230,11 +1300,11 @@ config SM_VIDEOCC_8350 config SM_VIDEOCC_8550 tristate "SM8550 Video Clock Controller" depends on ARM64 || COMPILE_TEST - select SM_GCC_8550 + depends on SM_GCC_8550 || SM_GCC_8650 select QCOM_GDSC help Support for the video clock controller on Qualcomm Technologies, Inc. - SM8550 devices. + SM8550 or SM8650 devices. Say Y if you want to support video devices and functionality such as video encode/decode. @@ -1283,7 +1353,7 @@ config SM_VIDEOCC_8450 select QCOM_GDSC help Support for the video clock controller on Qualcomm Technologies, Inc. - SM8450 devices. + SM8450 or SM8475 devices. Say Y if you want to support video devices and functionality such as video encode/decode. endif diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 2b378667a63f..b09dbdc210eb 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o +obj-$(CONFIG_IPQ_GCC_5424) += gcc-ipq5424.o obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o @@ -70,6 +71,7 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o +obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o @@ -80,8 +82,13 @@ obj-$(CONFIG_SC_CAMCC_8280XP) += camcc-sc8280xp.o obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o obj-$(CONFIG_SC_DISPCC_8280XP) += dispcc-sc8280xp.o +obj-$(CONFIG_SA_CAMCC_8775P) += camcc-sa8775p.o +obj-$(CONFIG_SA_DISPCC_8775P) += dispcc0-sa8775p.o dispcc1-sa8775p.o obj-$(CONFIG_SA_GCC_8775P) += gcc-sa8775p.o obj-$(CONFIG_SA_GPUCC_8775P) += gpucc-sa8775p.o +obj-$(CONFIG_SA_VIDEOCC_8775P) += videocc-sa8775p.o +obj-$(CONFIG_SAR_GCC_2130P) += gcc-sar2130p.o +obj-$(CONFIG_SAR_GPUCC_2130P) += gpucc-sar2130p.o obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c new file mode 100644 index 000000000000..c04801a5af35 --- /dev/null +++ b/drivers/clk/qcom/camcc-sa8775p.c @@ -0,0 +1,1868 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sa8775p-camcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_IFACE, + DT_BI_TCXO, + DT_BI_TCXO_AO, + DT_SLEEP_CLK, +}; + +enum { + P_BI_TCXO, + P_BI_TCXO_AO, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL0_OUT_MAIN, + P_CAM_CC_PLL0_OUT_ODD, + P_CAM_CC_PLL2_OUT_EVEN, + P_CAM_CC_PLL2_OUT_MAIN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CAM_CC_PLL4_OUT_EVEN, + P_CAM_CC_PLL5_OUT_EVEN, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +static const struct pll_vco rivian_evo_vco[] = { + { 864000000, 1056000000, 0 }, +}; + +static const struct alpha_pll_config cam_cc_pll0_config = { + .l = 0x3e, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00008400, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = { + { 0x2, 3 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { + .offset = 0x0, + .post_div_shift = 14, + .post_div_table = post_div_table_cam_cc_pll0_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_odd", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll2_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x90008820, + .config_ctl_hi_val = 0x00890263, + .config_ctl_hi1_val = 0x00000247, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400000, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x1000, + .vco_table = rivian_evo_vco, + .num_vco = ARRAY_SIZE(rivian_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll2", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_rivian_evo_ops, + }, + }, +}; + +static const struct alpha_pll_config cam_cc_pll3_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x2000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { + .offset = 0x2000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll3_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll3.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll4_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll cam_cc_pll4 = { + .offset = 0x3000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = { + .offset = 0x3000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll4_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll4.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll5_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll cam_cc_pll5 = { + .offset = 0x4000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll5", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = { + .offset = 0x4000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll5_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll5_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll5.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL2_OUT_EVEN, 3 }, + { P_CAM_CC_PLL2_OUT_MAIN, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll2.clkr.hw }, + { .hw = &cam_cc_pll2.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL4_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll4_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL5_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll5_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL3_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll3_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_5[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_5[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map cam_cc_parent_map_6_ao[] = { + { P_BI_TCXO_AO, 0 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_6_ao[] = { + { .index = DT_BI_TCXO_AO }, +}; + +static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = { + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { + .cmd_rcgr = 0x13170, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = { + F(37500000, P_CAM_CC_PLL0_OUT_MAIN, 16, 1, 2), + { } +}; + +static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .cmd_rcgr = 0x130a0, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .cmd_rcgr = 0x130bc, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_2_clk_src = { + .cmd_rcgr = 0x130d8, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_2_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_3_clk_src = { + .cmd_rcgr = 0x130f4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_3_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0x11034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cphy_rx_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x15074, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x15098, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x150b8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .cmd_rcgr = 0x150d8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi3phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csid_clk_src = { + .cmd_rcgr = 0x13150, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0x13120, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fast_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0x1307c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { + F(480000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .cmd_rcgr = 0x11004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_clk_src", + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = { + F(480000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .cmd_rcgr = 0x12004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_ife_1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_clk_src", + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = { + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_lite_clk_src = { + .cmd_rcgr = 0x13000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_lite_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { + .cmd_rcgr = 0x13020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ipe_clk_src[] = { + F(480000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ipe_clk_src = { + .cmd_rcgr = 0x10004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_4, + .freq_tbl = ftbl_cam_cc_ipe_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_clk_src", + .parent_data = cam_cc_parent_data_4, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50), + F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4), + F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x15004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x15020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x1503c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x15058, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sleep_clk_src = { + .cmd_rcgr = 0x131f0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_5, + .freq_tbl = ftbl_cam_cc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sleep_clk_src", + .parent_data = cam_cc_parent_data_5, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0x13138, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_slow_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO_AO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_xo_clk_src = { + .cmd_rcgr = 0x131d4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_6_ao, + .freq_tbl = ftbl_cam_cc_xo_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_xo_clk_src", + .parent_data = cam_cc_parent_data_6_ao, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_clk = { + .halt_reg = 0x13188, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13188, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_dcd_xo_clk = { + .halt_reg = 0x13190, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13190, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_dcd_xo_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_qdss_debug_xo_clk = { + .halt_reg = 0x131b8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x131b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_qdss_debug_xo_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_0_clk = { + .halt_reg = 0x130b8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x130b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_1_clk = { + .halt_reg = 0x130d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x130d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_2_clk = { + .halt_reg = 0x130f0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x130f0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_3_clk = { + .halt_reg = 0x1310c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1310c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_core_ahb_clk = { + .halt_reg = 0x131d0, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x131d0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_core_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0x13110, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13110, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_fast_ahb_clk = { + .halt_reg = 0x13118, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13118, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_0_clk = { + .halt_reg = 0x11024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_1_clk = { + .halt_reg = 0x12024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_lite_clk = { + .halt_reg = 0x1301c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1301c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_lite_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ipe_clk = { + .halt_reg = 0x10024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_sfe_lite_0_clk = { + .halt_reg = 0x13050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_sfe_lite_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_sfe_lite_1_clk = { + .halt_reg = 0x13068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_sfe_lite_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x1508c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1508c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi0phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x150b0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150b0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi1phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x150d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150d0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi2phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi3phytimer_clk = { + .halt_reg = 0x150f0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150f0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi3phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi3phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csid_clk = { + .halt_reg = 0x13168, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13168, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csid_csiphy_rx_clk = { + .halt_reg = 0x15094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15094, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csid_csiphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x15090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15090, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x150b4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150b4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x150d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy3_clk = { + .halt_reg = 0x150f4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150f4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ahb_clk = { + .halt_reg = 0x1309c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1309c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0x13094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13094, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_icp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_clk = { + .halt_reg = 0x1101c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1101c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_fast_ahb_clk = { + .halt_reg = 0x11030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_clk = { + .halt_reg = 0x1201c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1201c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_fast_ahb_clk = { + .halt_reg = 0x12030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_ahb_clk = { + .halt_reg = 0x13044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_clk = { + .halt_reg = 0x13018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = { + .halt_reg = 0x13040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_csid_clk = { + .halt_reg = 0x13038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_ahb_clk = { + .halt_reg = 0x10030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_clk = { + .halt_reg = 0x1001c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1001c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_fast_ahb_clk = { + .halt_reg = 0x10034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x1501c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1501c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x15038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x15054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x15070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15070, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_lite_0_clk = { + .halt_reg = 0x1304c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1304c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_lite_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_lite_0_fast_ahb_clk = { + .halt_reg = 0x1305c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1305c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_lite_0_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_lite_1_clk = { + .halt_reg = 0x13064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_lite_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_lite_1_fast_ahb_clk = { + .halt_reg = 0x13074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13074, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_lite_1_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sm_obs_clk = { + .halt_reg = 0x1510c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x1510c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sm_obs_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc cam_cc_titan_top_gdsc = { + .gdscr = 0x131bc, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_titan_top_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *cam_cc_sa8775p_clocks[] = { + [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr, + [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr, + [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr, + [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr, + [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr, + [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr, + [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr, + [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr, + [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr, + [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr, + [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr, + [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr, + [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr, + [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr, + [CAM_CC_CPAS_IPE_CLK] = &cam_cc_cpas_ipe_clk.clkr, + [CAM_CC_CPAS_SFE_LITE_0_CLK] = &cam_cc_cpas_sfe_lite_0_clk.clkr, + [CAM_CC_CPAS_SFE_LITE_1_CLK] = &cam_cc_cpas_sfe_lite_1_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr, + [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr, + [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr, + [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr, + [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr, + [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr, + [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr, + [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr, + [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr, + [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr, + [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr, + [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr, + [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr, + [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr, + [CAM_CC_IPE_AHB_CLK] = &cam_cc_ipe_ahb_clk.clkr, + [CAM_CC_IPE_CLK] = &cam_cc_ipe_clk.clkr, + [CAM_CC_IPE_CLK_SRC] = &cam_cc_ipe_clk_src.clkr, + [CAM_CC_IPE_FAST_AHB_CLK] = &cam_cc_ipe_fast_ahb_clk.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr, + [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr, + [CAM_CC_PLL4] = &cam_cc_pll4.clkr, + [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr, + [CAM_CC_PLL5] = &cam_cc_pll5.clkr, + [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr, + [CAM_CC_SFE_LITE_0_CLK] = &cam_cc_sfe_lite_0_clk.clkr, + [CAM_CC_SFE_LITE_0_FAST_AHB_CLK] = &cam_cc_sfe_lite_0_fast_ahb_clk.clkr, + [CAM_CC_SFE_LITE_1_CLK] = &cam_cc_sfe_lite_1_clk.clkr, + [CAM_CC_SFE_LITE_1_FAST_AHB_CLK] = &cam_cc_sfe_lite_1_fast_ahb_clk.clkr, + [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, + [CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr, + [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr, + [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr, +}; + +static struct gdsc *cam_cc_sa8775p_gdscs[] = { + [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc, +}; + +static const struct qcom_reset_map cam_cc_sa8775p_resets[] = { + [CAM_CC_ICP_BCR] = { 0x13078 }, + [CAM_CC_IFE_0_BCR] = { 0x11000 }, + [CAM_CC_IFE_1_BCR] = { 0x12000 }, + [CAM_CC_IPE_0_BCR] = { 0x10000 }, + [CAM_CC_SFE_LITE_0_BCR] = { 0x13048 }, + [CAM_CC_SFE_LITE_1_BCR] = { 0x13060 }, +}; + +static const struct regmap_config cam_cc_sa8775p_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x16218, + .fast_io = true, +}; + +static struct qcom_cc_desc cam_cc_sa8775p_desc = { + .config = &cam_cc_sa8775p_regmap_config, + .clks = cam_cc_sa8775p_clocks, + .num_clks = ARRAY_SIZE(cam_cc_sa8775p_clocks), + .resets = cam_cc_sa8775p_resets, + .num_resets = ARRAY_SIZE(cam_cc_sa8775p_resets), + .gdscs = cam_cc_sa8775p_gdscs, + .num_gdscs = ARRAY_SIZE(cam_cc_sa8775p_gdscs), +}; + +static const struct of_device_id cam_cc_sa8775p_match_table[] = { + { .compatible = "qcom,sa8775p-camcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_sa8775p_match_table); + +static int cam_cc_sa8775p_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + + regmap = qcom_cc_map(pdev, &cam_cc_sa8775p_desc); + if (IS_ERR(regmap)) { + pm_runtime_put(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); + clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */ + qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */ + + ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap); + + pm_runtime_put(&pdev->dev); + + return ret; +} + +static struct platform_driver cam_cc_sa8775p_driver = { + .probe = cam_cc_sa8775p_probe, + .driver = { + .name = "camcc-sa8775p", + .of_match_table = cam_cc_sa8775p_match_table, + }, +}; + +module_platform_driver(cam_cc_sa8775p_driver); + +MODULE_DESCRIPTION("QTI CAMCC SA8775P Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c index 26b78eed15ef..08982737e490 100644 --- a/drivers/clk/qcom/camcc-sm8450.c +++ b/drivers/clk/qcom/camcc-sm8450.c @@ -54,6 +54,10 @@ static const struct pll_vco rivian_evo_vco[] = { { 864000000, 1056000000, 0 }, }; +static const struct pll_vco rivian_ole_vco[] = { + { 864000000, 1075000000, 0 }, +}; + static const struct clk_parent_data pll_parent_data_tcxo = { .index = DT_BI_TCXO }; static const struct alpha_pll_config cam_cc_pll0_config = { @@ -66,6 +70,20 @@ static const struct alpha_pll_config cam_cc_pll0_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll0_config = { + .l = 0x3e, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00008400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll0 = { .offset = 0x0, .vco_table = lucid_evo_vco, @@ -86,6 +104,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll0_out_even_init = { + .name = "cam_cc_pll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { .offset = 0x0, .post_div_shift = 10, @@ -109,6 +137,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll0_out_odd_init = { + .name = "cam_cc_pll0_out_odd", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { .offset = 0x0, .post_div_shift = 14, @@ -137,6 +175,20 @@ static const struct alpha_pll_config cam_cc_pll1_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll1_config = { + .l = 0x25, + .alpha = 0xeaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_evo_vco, @@ -157,6 +209,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll1_out_even_init = { + .name = "cam_cc_pll1_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll1.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { .offset = 0x1000, .post_div_shift = 10, @@ -183,6 +245,16 @@ static const struct alpha_pll_config cam_cc_pll2_config = { .config_ctl_hi1_val = 0x00000217, }; +static const struct alpha_pll_config sm8475_cam_cc_pll2_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x10000030, + .config_ctl_hi_val = 0x80890263, + .config_ctl_hi1_val = 0x00000217, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000000, +}; + static struct clk_alpha_pll cam_cc_pll2 = { .offset = 0x2000, .vco_table = rivian_evo_vco, @@ -208,6 +280,20 @@ static const struct alpha_pll_config cam_cc_pll3_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll3_config = { + .l = 0x2d, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll3 = { .offset = 0x3000, .vco_table = lucid_evo_vco, @@ -228,6 +314,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll3_out_even_init = { + .name = "cam_cc_pll3_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll3.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { .offset = 0x3000, .post_div_shift = 10, @@ -256,6 +352,20 @@ static const struct alpha_pll_config cam_cc_pll4_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll4_config = { + .l = 0x2d, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll4 = { .offset = 0x4000, .vco_table = lucid_evo_vco, @@ -276,6 +386,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll4_out_even_init = { + .name = "cam_cc_pll4_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll4.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = { .offset = 0x4000, .post_div_shift = 10, @@ -304,6 +424,20 @@ static const struct alpha_pll_config cam_cc_pll5_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll5_config = { + .l = 0x2d, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll5 = { .offset = 0x5000, .vco_table = lucid_evo_vco, @@ -324,6 +458,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll5_out_even_init = { + .name = "cam_cc_pll5_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll5.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = { .offset = 0x5000, .post_div_shift = 10, @@ -352,6 +496,20 @@ static const struct alpha_pll_config cam_cc_pll6_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll6_config = { + .l = 0x2d, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll6 = { .offset = 0x6000, .vco_table = lucid_evo_vco, @@ -372,6 +530,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll6_out_even_init = { + .name = "cam_cc_pll6_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll6.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = { .offset = 0x6000, .post_div_shift = 10, @@ -400,6 +568,20 @@ static const struct alpha_pll_config cam_cc_pll7_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll7_config = { + .l = 0x2d, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll7 = { .offset = 0x7000, .vco_table = lucid_evo_vco, @@ -420,6 +602,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll7_out_even_init = { + .name = "cam_cc_pll7_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll7.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = { .offset = 0x7000, .post_div_shift = 10, @@ -448,6 +640,20 @@ static const struct alpha_pll_config cam_cc_pll8_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_cam_cc_pll8_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll cam_cc_pll8 = { .offset = 0x8000, .vco_table = lucid_evo_vco, @@ -468,6 +674,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = { { } }; +static struct clk_init_data sm8475_cam_cc_pll8_out_even_init = { + .name = "cam_cc_pll8_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll8.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = { .offset = 0x8000, .post_div_shift = 10, @@ -2817,6 +3033,7 @@ static const struct qcom_cc_desc cam_cc_sm8450_desc = { static const struct of_device_id cam_cc_sm8450_match_table[] = { { .compatible = "qcom,sm8450-camcc" }, + { .compatible = "qcom,sm8475-camcc" }, { } }; MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table); @@ -2829,15 +3046,72 @@ static int cam_cc_sm8450_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap); - clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); - clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); - clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); - clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); - clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); - clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); - clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config); - clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config); - clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-camcc")) { + /* Update CAMCC PLL0 */ + cam_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll0_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll0_out_even.clkr.hw.init = &sm8475_cam_cc_pll0_out_even_init; + cam_cc_pll0_out_odd.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll0_out_odd.clkr.hw.init = &sm8475_cam_cc_pll0_out_odd_init; + + /* Update CAMCC PLL1 */ + cam_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll1_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll1_out_even.clkr.hw.init = &sm8475_cam_cc_pll1_out_even_init; + + /* Update CAMCC PLL2 */ + cam_cc_pll2.vco_table = rivian_ole_vco; + + /* Update CAMCC PLL3 */ + cam_cc_pll3.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll3_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll3_out_even.clkr.hw.init = &sm8475_cam_cc_pll3_out_even_init; + + /* Update CAMCC PLL4 */ + cam_cc_pll4.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll4_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll4_out_even.clkr.hw.init = &sm8475_cam_cc_pll4_out_even_init; + + /* Update CAMCC PLL5 */ + cam_cc_pll5.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll5_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll5_out_even.clkr.hw.init = &sm8475_cam_cc_pll5_out_even_init; + + /* Update CAMCC PLL6 */ + cam_cc_pll6.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll6_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll6_out_even.clkr.hw.init = &sm8475_cam_cc_pll6_out_even_init; + + /* Update CAMCC PLL7 */ + cam_cc_pll7.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll7_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll7_out_even.clkr.hw.init = &sm8475_cam_cc_pll7_out_even_init; + + /* Update CAMCC PLL8 */ + cam_cc_pll8.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll8_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + cam_cc_pll8_out_even.clkr.hw.init = &sm8475_cam_cc_pll8_out_even_init; + + clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &sm8475_cam_cc_pll0_config); + clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &sm8475_cam_cc_pll1_config); + clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &sm8475_cam_cc_pll2_config); + clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &sm8475_cam_cc_pll3_config); + clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &sm8475_cam_cc_pll4_config); + clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &sm8475_cam_cc_pll5_config); + clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &sm8475_cam_cc_pll6_config); + clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &sm8475_cam_cc_pll7_config); + clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &sm8475_cam_cc_pll8_config); + } else { + clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); + clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); + clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); + clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config); + clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config); + clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config); + } return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap); } @@ -2852,5 +3126,5 @@ static struct platform_driver cam_cc_sm8450_driver = { module_platform_driver(cam_cc_sm8450_driver); -MODULE_DESCRIPTION("QCOM CAMCC SM8450 Driver"); +MODULE_DESCRIPTION("QCOM CAMCC SM8450 / SM8475 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index be9bee6ab65f..b8351f8c0b84 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -267,6 +267,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_OPMODE] = 0x30, [PLL_OFF_STATUS] = 0x3c, }, + [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = { + [PLL_OFF_L_VAL] = 0x04, + [PLL_OFF_ALPHA_VAL] = 0x08, + [PLL_OFF_TEST_CTL] = 0x0c, + [PLL_OFF_TEST_CTL_U] = 0x10, + [PLL_OFF_USER_CTL] = 0x14, + [PLL_OFF_CONFIG_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL_U] = 0x1c, + [PLL_OFF_STATUS] = 0x20, + }, + }; EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); @@ -1903,9 +1914,8 @@ static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw) } /* Check if PLL is already enabled, return if enabled */ - ret = trion_pll_is_enabled(pll, pll->clkr.regmap); - if (ret < 0) - return ret; + if (trion_pll_is_enabled(pll, pll->clkr.regmap)) + return 0; ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); if (ret) @@ -2318,13 +2328,8 @@ static int alpha_pll_lucid_evo_enable(struct clk_hw *hw) } /* Check if PLL is already enabled */ - ret = trion_pll_is_enabled(pll, regmap); - if (ret < 0) { - return ret; - } else if (ret) { - pr_warn("%s PLL is already enabled\n", clk_hw_get_name(&pll->clkr.hw)); + if (trion_pll_is_enabled(pll, regmap)) return 0; - } ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); if (ret) diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 55eca04b23a1..c6d1b8429f95 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -32,6 +32,7 @@ enum { CLK_ALPHA_PLL_TYPE_BRAMMO_EVO, CLK_ALPHA_PLL_TYPE_STROMER, CLK_ALPHA_PLL_TYPE_STROMER_PLUS, + CLK_ALPHA_PLL_TYPE_NSS_HUAYRA, CLK_ALPHA_PLL_TYPE_MAX, }; diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index 8e0f3372dc7a..80f1f4fcd52a 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops; extern const struct clk_ops clk_pixel_ops; extern const struct clk_ops clk_gfx3d_ops; extern const struct clk_ops clk_rcg2_shared_ops; +extern const struct clk_ops clk_rcg2_shared_floor_ops; extern const struct clk_ops clk_rcg2_shared_no_init_park_ops; extern const struct clk_ops clk_dp_ops; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index bf26c5448f00..bf6406f5279a 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1186,15 +1186,23 @@ clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f) return clk_rcg2_clear_force_enable(hw); } -static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate, + enum freq_policy policy) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); const struct freq_tbl *f; - f = qcom_find_freq(rcg->freq_tbl, rate); - if (!f) + switch (policy) { + case FLOOR: + f = qcom_find_freq_floor(rcg->freq_tbl, rate); + break; + case CEIL: + f = qcom_find_freq(rcg->freq_tbl, rate); + break; + default: return -EINVAL; + } /* * In case clock is disabled, update the M, N and D registers, cache @@ -1207,10 +1215,28 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, return clk_rcg2_shared_force_enable_clear(hw, f); } +static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL); +} + static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate, u8 index) { - return clk_rcg2_shared_set_rate(hw, rate, parent_rate); + return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL); +} + +static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR); +} + +static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate, u8 index) +{ + return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR); } static int clk_rcg2_shared_enable(struct clk_hw *hw) @@ -1348,6 +1374,18 @@ const struct clk_ops clk_rcg2_shared_ops = { }; EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); +const struct clk_ops clk_rcg2_shared_floor_ops = { + .enable = clk_rcg2_shared_enable, + .disable = clk_rcg2_shared_disable, + .get_parent = clk_rcg2_shared_get_parent, + .set_parent = clk_rcg2_shared_set_parent, + .recalc_rate = clk_rcg2_shared_recalc_rate, + .determine_rate = clk_rcg2_determine_floor_rate, + .set_rate = clk_rcg2_shared_set_floor_rate, + .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent, +}; +EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops); + static int clk_rcg2_shared_no_init_park(struct clk_hw *hw) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 4acde937114a..eefc322ce367 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -389,6 +389,18 @@ DEFINE_CLK_RPMH_BCM(ipa, "IP0"); DEFINE_CLK_RPMH_BCM(pka, "PKA0"); DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0"); +static struct clk_hw *sar2130p_rpmh_clocks[] = { + [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw, + [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw, + [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, + [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, +}; + +static const struct clk_rpmh_desc clk_rpmh_sar2130p = { + .clks = sar2130p_rpmh_clocks, + .num_clks = ARRAY_SIZE(sar2130p_rpmh_clocks), +}; + static struct clk_hw *sdm845_rpmh_clocks[] = { [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, @@ -880,6 +892,7 @@ static int clk_rpmh_probe(struct platform_device *pdev) static const struct of_device_id clk_rpmh_match_table[] = { { .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000}, { .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p}, + { .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p}, { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180}, { .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x}, { .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp}, diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index 7e57f8fe8ea6..7ace5d7f5836 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h @@ -35,7 +35,7 @@ struct qcom_cc_desc { size_t num_gdscs; struct clk_hw **clk_hws; size_t num_clk_hws; - struct qcom_icc_hws_data *icc_hws; + const struct qcom_icc_hws_data *icc_hws; size_t num_icc_hws; unsigned int icc_first_node_id; }; diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c index d1d3f60789ee..a1f183e6c636 100644 --- a/drivers/clk/qcom/dispcc-sm8450.c +++ b/drivers/clk/qcom/dispcc-sm8450.c @@ -85,6 +85,29 @@ static const struct alpha_pll_config disp_cc_pll0_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_disp_cc_pll0_config = { + .l = 0xd, + .alpha = 0x6492, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_init_data sm8475_disp_cc_pll0_init = { + .name = "disp_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_reset_lucid_ole_ops, +}; + static struct clk_alpha_pll disp_cc_pll0 = { .offset = 0x0, .vco_table = lucid_evo_vco, @@ -112,6 +135,29 @@ static const struct alpha_pll_config disp_cc_pll1_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_disp_cc_pll1_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_init_data sm8475_disp_cc_pll1_init = { + .name = "disp_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_reset_lucid_ole_ops, +}; + static struct clk_alpha_pll disp_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_evo_vco, @@ -1746,6 +1792,7 @@ static struct qcom_cc_desc disp_cc_sm8450_desc = { static const struct of_device_id disp_cc_sm8450_match_table[] = { { .compatible = "qcom,sm8450-dispcc" }, + { .compatible = "qcom,sm8475-dispcc" }, { } }; MODULE_DEVICE_TABLE(of, disp_cc_sm8450_match_table); @@ -1769,8 +1816,21 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev) goto err_put_rpm; } - clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); - clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-dispcc")) { + /* Update DISPCC PLL0 */ + disp_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + disp_cc_pll0.clkr.hw.init = &sm8475_disp_cc_pll0_init; + + /* Update DISPCC PLL1 */ + disp_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + disp_cc_pll1.clkr.hw.init = &sm8475_disp_cc_pll1_init; + + clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &sm8475_disp_cc_pll0_config); + clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &sm8475_disp_cc_pll1_config); + } else { + clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + } /* Enable clock gating for MDP clocks */ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10); @@ -1802,5 +1862,5 @@ static struct platform_driver disp_cc_sm8450_driver = { module_platform_driver(disp_cc_sm8450_driver); -MODULE_DESCRIPTION("QTI DISPCC SM8450 Driver"); +MODULE_DESCRIPTION("QTI DISPCC SM8450 / SM8475 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c index 7f9021ca0ecb..e41d4104d770 100644 --- a/drivers/clk/qcom/dispcc-sm8550.c +++ b/drivers/clk/qcom/dispcc-sm8550.c @@ -75,7 +75,7 @@ static struct pll_vco lucid_ole_vco[] = { { 249600000, 2000000000, 0 }, }; -static const struct alpha_pll_config disp_cc_pll0_config = { +static struct alpha_pll_config disp_cc_pll0_config = { .l = 0xd, .alpha = 0x6492, .config_ctl_val = 0x20485699, @@ -106,7 +106,7 @@ static struct clk_alpha_pll disp_cc_pll0 = { }, }; -static const struct alpha_pll_config disp_cc_pll1_config = { +static struct alpha_pll_config disp_cc_pll1_config = { .l = 0x1f, .alpha = 0x4000, .config_ctl_val = 0x20485699, @@ -594,6 +594,13 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { { } }; +static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = { + F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), @@ -1750,6 +1757,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = { }; static const struct of_device_id disp_cc_sm8550_match_table[] = { + { .compatible = "qcom,sar2130p-dispcc" }, { .compatible = "qcom,sm8550-dispcc" }, { .compatible = "qcom,sm8650-dispcc" }, { } @@ -1780,6 +1788,12 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev) disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650; disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw; + } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) { + disp_cc_pll0_config.l = 0x1f; + disp_cc_pll0_config.alpha = 0x4000; + disp_cc_pll0_config.user_ctl_val = 0x1; + disp_cc_pll1_config.user_ctl_val = 0x1; + disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p; } clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); diff --git a/drivers/clk/qcom/dispcc0-sa8775p.c b/drivers/clk/qcom/dispcc0-sa8775p.c new file mode 100644 index 000000000000..6e399b5f1383 --- /dev/null +++ b/drivers/clk/qcom/dispcc0-sa8775p.c @@ -0,0 +1,1481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sa8775p-dispcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_IFACE, + DT_BI_TCXO, + DT_BI_TCXO_AO, + DT_SLEEP_CLK, + DT_DP0_PHY_PLL_LINK_CLK, + DT_DP0_PHY_PLL_VCO_DIV_CLK, + DT_DP1_PHY_PLL_LINK_CLK, + DT_DP1_PHY_PLL_VCO_DIV_CLK, + DT_DSI0_PHY_PLL_OUT_BYTECLK, + DT_DSI0_PHY_PLL_OUT_DSICLK, + DT_DSI1_PHY_PLL_OUT_BYTECLK, + DT_DSI1_PHY_PLL_OUT_DSICLK, +}; + +enum { + P_BI_TCXO, + P_DP0_PHY_PLL_LINK_CLK, + P_DP0_PHY_PLL_VCO_DIV_CLK, + P_DP1_PHY_PLL_LINK_CLK, + P_DP1_PHY_PLL_VCO_DIV_CLK, + P_DSI0_PHY_PLL_OUT_BYTECLK, + P_DSI0_PHY_PLL_OUT_DSICLK, + P_DSI1_PHY_PLL_OUT_BYTECLK, + P_DSI1_PHY_PLL_OUT_DSICLK, + P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, + P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, + P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +static const struct alpha_pll_config mdss_0_disp_cc_pll0_config = { + .l = 0x3a, + .alpha = 0x9800, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll mdss_0_disp_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct alpha_pll_config mdss_0_disp_cc_pll1_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll mdss_0_disp_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map disp_cc_0_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, + { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DP0_PHY_PLL_LINK_CLK }, + { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK }, + { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, +}; + +static const struct parent_map disp_cc_0_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, + { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, + { .index = DT_DSI1_PHY_PLL_OUT_DSICLK }, + { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, +}; + +static const struct parent_map disp_cc_0_parent_map_2[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_2[] = { + { .index = DT_BI_TCXO }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_2_ao[] = { + { .index = DT_BI_TCXO_AO }, +}; + +static const struct parent_map disp_cc_0_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, + { P_DP1_PHY_PLL_LINK_CLK, 2 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DP0_PHY_PLL_LINK_CLK }, + { .index = DT_DP1_PHY_PLL_LINK_CLK }, +}; + +static const struct parent_map disp_cc_0_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, + { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, +}; + +static const struct parent_map disp_cc_0_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_5[] = { + { .index = DT_BI_TCXO }, + { .hw = &mdss_0_disp_cc_pll1.clkr.hw }, + { .hw = &mdss_0_disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_0_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 1 }, + { P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &mdss_0_disp_cc_pll0.clkr.hw }, + { .hw = &mdss_0_disp_cc_pll1.clkr.hw }, + { .hw = &mdss_0_disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_0_parent_map_7[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data disp_cc_0_parent_data_7[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_ahb_clk_src[] = { + F(37500000, P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), + F(75000000, P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_ahb_clk_src = { + .cmd_rcgr = 0x824c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_5, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_ahb_clk_src", + .parent_data = disp_cc_0_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_byte0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_byte0_clk_src = { + .cmd_rcgr = 0x80ec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_1, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte0_clk_src", + .parent_data = disp_cc_0_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_byte1_clk_src = { + .cmd_rcgr = 0x8108, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_1, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte1_clk_src", + .parent_data = disp_cc_0_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_aux_clk_src = { + .cmd_rcgr = 0x81b8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_2, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_aux_clk_src", + .parent_data = disp_cc_0_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_crypto_clk_src = { + .cmd_rcgr = 0x8170, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_3, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_crypto_clk_src", + .parent_data = disp_cc_0_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_link_clk_src = { + .cmd_rcgr = 0x8154, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_3, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_link_clk_src", + .parent_data = disp_cc_0_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src = { + .cmd_rcgr = 0x8188, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_0, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src", + .parent_data = disp_cc_0_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src = { + .cmd_rcgr = 0x81a0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_0, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src", + .parent_data = disp_cc_0_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src = { + .cmd_rcgr = 0x826c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_0, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src", + .parent_data = disp_cc_0_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src = { + .cmd_rcgr = 0x8284, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_0, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src", + .parent_data = disp_cc_0_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_aux_clk_src = { + .cmd_rcgr = 0x8234, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_2, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_aux_clk_src", + .parent_data = disp_cc_0_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_crypto_clk_src = { + .cmd_rcgr = 0x821c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_3, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_crypto_clk_src", + .parent_data = disp_cc_0_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_link_clk_src = { + .cmd_rcgr = 0x8200, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_3, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_link_clk_src", + .parent_data = disp_cc_0_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src = { + .cmd_rcgr = 0x81d0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_0, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src", + .parent_data = disp_cc_0_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src = { + .cmd_rcgr = 0x81e8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_0, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src", + .parent_data = disp_cc_0_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_esc0_clk_src = { + .cmd_rcgr = 0x8124, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_4, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_esc0_clk_src", + .parent_data = disp_cc_0_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_esc1_clk_src = { + .cmd_rcgr = 0x813c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_4, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_esc1_clk_src", + .parent_data = disp_cc_0_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_mdp_clk_src[] = { + F(375000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(500000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(575000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(650000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_mdp_clk_src = { + .cmd_rcgr = 0x80bc, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_6, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_mdp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_mdp_clk_src", + .parent_data = disp_cc_0_parent_data_6, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_6), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_pclk0_clk_src = { + .cmd_rcgr = 0x808c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_1, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_pclk0_clk_src", + .parent_data = disp_cc_0_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_pclk1_clk_src = { + .cmd_rcgr = 0x80a4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_1, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_pclk1_clk_src", + .parent_data = disp_cc_0_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_mdss_vsync_clk_src = { + .cmd_rcgr = 0x80d4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_2, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_vsync_clk_src", + .parent_data = disp_cc_0_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_mdss_0_disp_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_0_disp_cc_sleep_clk_src = { + .cmd_rcgr = 0xc058, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_7, + .freq_tbl = ftbl_mdss_0_disp_cc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_sleep_clk_src", + .parent_data = disp_cc_0_parent_data_7, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_7), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_0_disp_cc_xo_clk_src = { + .cmd_rcgr = 0xc03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_0_parent_map_2, + .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_xo_clk_src", + .parent_data = disp_cc_0_parent_data_2_ao, + .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div mdss_0_disp_cc_mdss_byte0_div_clk_src = { + .reg = 0x8104, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_regmap_div mdss_0_disp_cc_mdss_byte1_div_clk_src = { + .reg = 0x8120, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte1_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_regmap_div mdss_0_disp_cc_mdss_dptx0_link_div_clk_src = { + .reg = 0x816c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div mdss_0_disp_cc_mdss_dptx1_link_div_clk_src = { + .reg = 0x8218, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_ahb1_clk = { + .halt_reg = 0x8088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_ahb1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_ahb_clk = { + .halt_reg = 0x8084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_byte0_clk = { + .halt_reg = 0x8034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_byte0_intf_clk = { + .halt_reg = 0x8038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte0_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_byte0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_byte1_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_byte1_intf_clk = { + .halt_reg = 0x8040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_byte1_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_byte1_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_aux_clk = { + .halt_reg = 0x805c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x805c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_crypto_clk = { + .halt_reg = 0x8058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_crypto_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_link_clk = { + .halt_reg = 0x804c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x804c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_link_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_link_intf_clk = { + .halt_reg = 0x8050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel0_clk = { + .halt_reg = 0x8060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel1_clk = { + .halt_reg = 0x8064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel2_clk = { + .halt_reg = 0x8264, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8264, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel2_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel3_clk = { + .halt_reg = 0x8268, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8268, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_pixel3_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk = { + .halt_reg = 0x8054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_aux_clk = { + .halt_reg = 0x8080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_crypto_clk = { + .halt_reg = 0x807c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x807c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_crypto_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_link_clk = { + .halt_reg = 0x8070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8070, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_link_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_link_intf_clk = { + .halt_reg = 0x8074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8074, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_pixel0_clk = { + .halt_reg = 0x8068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_pixel0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_pixel1_clk = { + .halt_reg = 0x806c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x806c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_pixel1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk = { + .halt_reg = 0x8078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8078, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_esc0_clk = { + .halt_reg = 0x8044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_esc0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_esc0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_esc1_clk = { + .halt_reg = 0x8048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_esc1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_esc1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_mdp1_clk = { + .halt_reg = 0x8014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_mdp1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_mdp_clk = { + .halt_reg = 0x800c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x800c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_mdp_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_mdp_lut1_clk = { + .halt_reg = 0x8024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_mdp_lut1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_mdp_lut_clk = { + .halt_reg = 0x801c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x801c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_mdp_lut_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_non_gdsc_ahb_clk = { + .halt_reg = 0xa004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xa004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_non_gdsc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_pclk0_clk = { + .halt_reg = 0x8004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_pclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_pclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_pclk1_clk = { + .halt_reg = 0x8008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_pclk1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_pclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_pll_lock_monitor_clk = { + .halt_reg = 0xe000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xe000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_pll_lock_monitor_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_rscc_ahb_clk = { + .halt_reg = 0xa00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_rscc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_rscc_vsync_clk = { + .halt_reg = 0xa008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_rscc_vsync_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_vsync1_clk = { + .halt_reg = 0x8030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_vsync1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_mdss_vsync_clk = { + .halt_reg = 0x802c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x802c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_mdss_vsync_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_0_disp_cc_sm_obs_clk = { + .halt_reg = 0x11014, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x11014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_0_disp_cc_sm_obs_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc mdss_0_disp_cc_mdss_core_gdsc = { + .gdscr = 0x9000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "mdss_0_disp_cc_mdss_core_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, +}; + +static struct gdsc mdss_0_disp_cc_mdss_core_int2_gdsc = { + .gdscr = 0xd000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "mdss_0_disp_cc_mdss_core_int2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, +}; + +static struct clk_regmap *disp_cc_0_sa8775p_clocks[] = { + [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_0_disp_cc_mdss_ahb1_clk.clkr, + [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_0_disp_cc_mdss_ahb_clk.clkr, + [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_0_disp_cc_mdss_ahb_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_0_disp_cc_mdss_byte0_clk.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_0_disp_cc_mdss_byte0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_0_disp_cc_mdss_byte0_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_0_disp_cc_mdss_byte0_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_0_disp_cc_mdss_byte1_clk.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_0_disp_cc_mdss_byte1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_0_disp_cc_mdss_byte1_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_0_disp_cc_mdss_byte1_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_0_disp_cc_mdss_dptx0_aux_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_aux_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_0_disp_cc_mdss_dptx0_crypto_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_crypto_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_0_disp_cc_mdss_dptx0_link_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = + &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_0_disp_cc_mdss_dptx0_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel0_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel1_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel2_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel3_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = + &mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_0_disp_cc_mdss_dptx1_aux_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_aux_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_0_disp_cc_mdss_dptx1_crypto_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_crypto_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_0_disp_cc_mdss_dptx1_link_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = + &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_0_disp_cc_mdss_dptx1_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_0_disp_cc_mdss_dptx1_pixel0_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_0_disp_cc_mdss_dptx1_pixel1_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = + &mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_0_disp_cc_mdss_esc0_clk.clkr, + [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_0_disp_cc_mdss_esc0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_0_disp_cc_mdss_esc1_clk.clkr, + [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_0_disp_cc_mdss_esc1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_0_disp_cc_mdss_mdp1_clk.clkr, + [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_0_disp_cc_mdss_mdp_clk.clkr, + [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_0_disp_cc_mdss_mdp_clk_src.clkr, + [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_0_disp_cc_mdss_mdp_lut1_clk.clkr, + [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_0_disp_cc_mdss_mdp_lut_clk.clkr, + [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_0_disp_cc_mdss_non_gdsc_ahb_clk.clkr, + [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_0_disp_cc_mdss_pclk0_clk.clkr, + [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_0_disp_cc_mdss_pclk0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_0_disp_cc_mdss_pclk1_clk.clkr, + [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_0_disp_cc_mdss_pclk1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_0_disp_cc_mdss_pll_lock_monitor_clk.clkr, + [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_0_disp_cc_mdss_rscc_ahb_clk.clkr, + [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_0_disp_cc_mdss_rscc_vsync_clk.clkr, + [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_0_disp_cc_mdss_vsync1_clk.clkr, + [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_0_disp_cc_mdss_vsync_clk.clkr, + [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_0_disp_cc_mdss_vsync_clk_src.clkr, + [MDSS_DISP_CC_PLL0] = &mdss_0_disp_cc_pll0.clkr, + [MDSS_DISP_CC_PLL1] = &mdss_0_disp_cc_pll1.clkr, + [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_0_disp_cc_sleep_clk_src.clkr, + [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_0_disp_cc_sm_obs_clk.clkr, + [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_0_disp_cc_xo_clk_src.clkr, +}; + +static struct gdsc *disp_cc_0_sa8775p_gdscs[] = { + [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_0_disp_cc_mdss_core_gdsc, + [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_0_disp_cc_mdss_core_int2_gdsc, +}; + +static const struct qcom_reset_map disp_cc_0_sa8775p_resets[] = { + [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, + [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 }, +}; + +static const struct regmap_config disp_cc_0_sa8775p_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x12414, + .fast_io = true, +}; + +static struct qcom_cc_desc disp_cc_0_sa8775p_desc = { + .config = &disp_cc_0_sa8775p_regmap_config, + .clks = disp_cc_0_sa8775p_clocks, + .num_clks = ARRAY_SIZE(disp_cc_0_sa8775p_clocks), + .resets = disp_cc_0_sa8775p_resets, + .num_resets = ARRAY_SIZE(disp_cc_0_sa8775p_resets), + .gdscs = disp_cc_0_sa8775p_gdscs, + .num_gdscs = ARRAY_SIZE(disp_cc_0_sa8775p_gdscs), +}; + +static const struct of_device_id disp_cc_0_sa8775p_match_table[] = { + { .compatible = "qcom,sa8775p-dispcc0" }, + { } +}; +MODULE_DEVICE_TABLE(of, disp_cc_0_sa8775p_match_table); + +static int disp_cc_0_sa8775p_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + + regmap = qcom_cc_map(pdev, &disp_cc_0_sa8775p_desc); + if (IS_ERR(regmap)) { + pm_runtime_put(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_lucid_evo_pll_configure(&mdss_0_disp_cc_pll0, regmap, &mdss_0_disp_cc_pll0_config); + clk_lucid_evo_pll_configure(&mdss_0_disp_cc_pll1, regmap, &mdss_0_disp_cc_pll1_config); + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_0_DISP_CC_SLEEP_CLK */ + qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_0_DISP_CC_XO_CLK */ + + ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_0_sa8775p_desc, regmap); + + pm_runtime_put(&pdev->dev); + + return ret; +} + +static struct platform_driver disp_cc_0_sa8775p_driver = { + .probe = disp_cc_0_sa8775p_probe, + .driver = { + .name = "dispcc0-sa8775p", + .of_match_table = disp_cc_0_sa8775p_match_table, + }, +}; + +module_platform_driver(disp_cc_0_sa8775p_driver); + +MODULE_DESCRIPTION("QTI DISPCC0 SA8775P Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/dispcc1-sa8775p.c b/drivers/clk/qcom/dispcc1-sa8775p.c new file mode 100644 index 000000000000..30ccea59415a --- /dev/null +++ b/drivers/clk/qcom/dispcc1-sa8775p.c @@ -0,0 +1,1481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sa8775p-dispcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_IFACE, + DT_BI_TCXO, + DT_BI_TCXO_AO, + DT_SLEEP_CLK, + DT_DP0_PHY_PLL_LINK_CLK, + DT_DP0_PHY_PLL_VCO_DIV_CLK, + DT_DP1_PHY_PLL_LINK_CLK, + DT_DP1_PHY_PLL_VCO_DIV_CLK, + DT_DSI0_PHY_PLL_OUT_BYTECLK, + DT_DSI0_PHY_PLL_OUT_DSICLK, + DT_DSI1_PHY_PLL_OUT_BYTECLK, + DT_DSI1_PHY_PLL_OUT_DSICLK, +}; + +enum { + P_BI_TCXO, + P_DP0_PHY_PLL_LINK_CLK, + P_DP0_PHY_PLL_VCO_DIV_CLK, + P_DP1_PHY_PLL_LINK_CLK, + P_DP1_PHY_PLL_VCO_DIV_CLK, + P_DSI0_PHY_PLL_OUT_BYTECLK, + P_DSI0_PHY_PLL_OUT_DSICLK, + P_DSI1_PHY_PLL_OUT_BYTECLK, + P_DSI1_PHY_PLL_OUT_DSICLK, + P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, + P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, + P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +static const struct alpha_pll_config mdss_1_disp_cc_pll0_config = { + .l = 0x3a, + .alpha = 0x9800, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll mdss_1_disp_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct alpha_pll_config mdss_1_disp_cc_pll1_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll mdss_1_disp_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map disp_cc_1_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, + { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DP0_PHY_PLL_LINK_CLK }, + { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK }, + { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, +}; + +static const struct parent_map disp_cc_1_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, + { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, + { .index = DT_DSI1_PHY_PLL_OUT_DSICLK }, + { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, +}; + +static const struct parent_map disp_cc_1_parent_map_2[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_2[] = { + { .index = DT_BI_TCXO }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_2_ao[] = { + { .index = DT_BI_TCXO_AO }, +}; + +static const struct parent_map disp_cc_1_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, + { P_DP1_PHY_PLL_LINK_CLK, 2 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DP0_PHY_PLL_LINK_CLK }, + { .index = DT_DP1_PHY_PLL_LINK_CLK }, +}; + +static const struct parent_map disp_cc_1_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, + { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, +}; + +static const struct parent_map disp_cc_1_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_5[] = { + { .index = DT_BI_TCXO }, + { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, + { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_1_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 1 }, + { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &mdss_1_disp_cc_pll0.clkr.hw }, + { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, + { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_1_parent_map_7[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data disp_cc_1_parent_data_7_ao[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_ahb_clk_src[] = { + F(37500000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), + F(75000000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_ahb_clk_src = { + .cmd_rcgr = 0x824c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_5, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_ahb_clk_src", + .parent_data = disp_cc_1_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_byte0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_byte0_clk_src = { + .cmd_rcgr = 0x80ec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_1, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte0_clk_src", + .parent_data = disp_cc_1_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_byte1_clk_src = { + .cmd_rcgr = 0x8108, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_1, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte1_clk_src", + .parent_data = disp_cc_1_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_aux_clk_src = { + .cmd_rcgr = 0x81b8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_2, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk_src", + .parent_data = disp_cc_1_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_crypto_clk_src = { + .cmd_rcgr = 0x8170, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_3, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk_src", + .parent_data = disp_cc_1_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_link_clk_src = { + .cmd_rcgr = 0x8154, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_3, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_link_clk_src", + .parent_data = disp_cc_1_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src = { + .cmd_rcgr = 0x8188, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_0, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src", + .parent_data = disp_cc_1_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src = { + .cmd_rcgr = 0x81a0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_0, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src", + .parent_data = disp_cc_1_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src = { + .cmd_rcgr = 0x826c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_0, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src", + .parent_data = disp_cc_1_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src = { + .cmd_rcgr = 0x8284, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_0, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src", + .parent_data = disp_cc_1_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_aux_clk_src = { + .cmd_rcgr = 0x8234, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_2, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk_src", + .parent_data = disp_cc_1_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_crypto_clk_src = { + .cmd_rcgr = 0x821c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_3, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk_src", + .parent_data = disp_cc_1_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_link_clk_src = { + .cmd_rcgr = 0x8200, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_3, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_link_clk_src", + .parent_data = disp_cc_1_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src = { + .cmd_rcgr = 0x81d0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_0, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src", + .parent_data = disp_cc_1_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src = { + .cmd_rcgr = 0x81e8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_0, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src", + .parent_data = disp_cc_1_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_esc0_clk_src = { + .cmd_rcgr = 0x8124, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_4, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_esc0_clk_src", + .parent_data = disp_cc_1_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_esc1_clk_src = { + .cmd_rcgr = 0x813c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_4, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_esc1_clk_src", + .parent_data = disp_cc_1_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_mdp_clk_src[] = { + F(375000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(500000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(575000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(650000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_mdp_clk_src = { + .cmd_rcgr = 0x80bc, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_6, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_mdp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_mdp_clk_src", + .parent_data = disp_cc_1_parent_data_6, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_6), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk0_clk_src = { + .cmd_rcgr = 0x808c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_1, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_pclk0_clk_src", + .parent_data = disp_cc_1_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk1_clk_src = { + .cmd_rcgr = 0x80a4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_1, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_pclk1_clk_src", + .parent_data = disp_cc_1_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_mdss_vsync_clk_src = { + .cmd_rcgr = 0x80d4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_2, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_vsync_clk_src", + .parent_data = disp_cc_1_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_mdss_1_disp_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 mdss_1_disp_cc_sleep_clk_src = { + .cmd_rcgr = 0xc058, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_7, + .freq_tbl = ftbl_mdss_1_disp_cc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_sleep_clk_src", + .parent_data = disp_cc_1_parent_data_7_ao, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_7_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 mdss_1_disp_cc_xo_clk_src = { + .cmd_rcgr = 0xc03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_1_parent_map_2, + .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_xo_clk_src", + .parent_data = disp_cc_1_parent_data_2_ao, + .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div mdss_1_disp_cc_mdss_byte0_div_clk_src = { + .reg = 0x8104, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_regmap_div mdss_1_disp_cc_mdss_byte1_div_clk_src = { + .reg = 0x8120, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte1_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx0_link_div_clk_src = { + .reg = 0x816c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx1_link_div_clk_src = { + .reg = 0x8218, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_ahb1_clk = { + .halt_reg = 0x8088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_ahb1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_ahb_clk = { + .halt_reg = 0x8084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_byte0_clk = { + .halt_reg = 0x8034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_byte0_intf_clk = { + .halt_reg = 0x8038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte0_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_byte1_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_byte1_intf_clk = { + .halt_reg = 0x8040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_byte1_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_aux_clk = { + .halt_reg = 0x805c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x805c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_crypto_clk = { + .halt_reg = 0x8058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_clk = { + .halt_reg = 0x804c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x804c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_link_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_intf_clk = { + .halt_reg = 0x8050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel0_clk = { + .halt_reg = 0x8060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel1_clk = { + .halt_reg = 0x8064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel2_clk = { + .halt_reg = 0x8264, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8264, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel3_clk = { + .halt_reg = 0x8268, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8268, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk = { + .halt_reg = 0x8054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_aux_clk = { + .halt_reg = 0x8080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_crypto_clk = { + .halt_reg = 0x807c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x807c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_clk = { + .halt_reg = 0x8070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8070, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_link_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_intf_clk = { + .halt_reg = 0x8074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8074, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel0_clk = { + .halt_reg = 0x8068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel1_clk = { + .halt_reg = 0x806c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x806c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk = { + .halt_reg = 0x8078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8078, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_esc0_clk = { + .halt_reg = 0x8044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_esc0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_esc0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_esc1_clk = { + .halt_reg = 0x8048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_esc1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_esc1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_mdp1_clk = { + .halt_reg = 0x8014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_mdp1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_mdp_clk = { + .halt_reg = 0x800c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x800c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_mdp_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut1_clk = { + .halt_reg = 0x8024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_mdp_lut1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut_clk = { + .halt_reg = 0x801c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x801c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_mdp_lut_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_non_gdsc_ahb_clk = { + .halt_reg = 0xa004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xa004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_non_gdsc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_pclk0_clk = { + .halt_reg = 0x8004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_pclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_pclk1_clk = { + .halt_reg = 0x8008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_pclk1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_pll_lock_monitor_clk = { + .halt_reg = 0xe000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xe000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_pll_lock_monitor_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_rscc_ahb_clk = { + .halt_reg = 0xa00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_rscc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_rscc_vsync_clk = { + .halt_reg = 0xa008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_rscc_vsync_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_vsync1_clk = { + .halt_reg = 0x8030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_vsync1_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_mdss_vsync_clk = { + .halt_reg = 0x802c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x802c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_mdss_vsync_clk", + .parent_hws = (const struct clk_hw*[]) { + &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mdss_1_disp_cc_sm_obs_clk = { + .halt_reg = 0x11014, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x11014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "mdss_1_disp_cc_sm_obs_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc mdss_1_disp_cc_mdss_core_gdsc = { + .gdscr = 0x9000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "mdss_1_disp_cc_mdss_core_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, +}; + +static struct gdsc mdss_1_disp_cc_mdss_core_int2_gdsc = { + .gdscr = 0xd000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "mdss_1_disp_cc_mdss_core_int2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, +}; + +static struct clk_regmap *disp_cc_1_sa8775p_clocks[] = { + [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_1_disp_cc_mdss_ahb1_clk.clkr, + [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_1_disp_cc_mdss_ahb_clk.clkr, + [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_1_disp_cc_mdss_ahb_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_1_disp_cc_mdss_byte0_clk.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_1_disp_cc_mdss_byte0_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_1_disp_cc_mdss_byte1_clk.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_1_disp_cc_mdss_byte1_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx0_aux_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = + &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = + &mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx1_aux_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = + &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = + &mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, + [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_1_disp_cc_mdss_esc0_clk.clkr, + [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_1_disp_cc_mdss_esc0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_1_disp_cc_mdss_esc1_clk.clkr, + [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_1_disp_cc_mdss_esc1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_1_disp_cc_mdss_mdp1_clk.clkr, + [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_1_disp_cc_mdss_mdp_clk.clkr, + [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_1_disp_cc_mdss_mdp_clk_src.clkr, + [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_1_disp_cc_mdss_mdp_lut1_clk.clkr, + [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_1_disp_cc_mdss_mdp_lut_clk.clkr, + [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_1_disp_cc_mdss_non_gdsc_ahb_clk.clkr, + [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_1_disp_cc_mdss_pclk0_clk.clkr, + [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr, + [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_1_disp_cc_mdss_pclk1_clk.clkr, + [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr, + [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_1_disp_cc_mdss_pll_lock_monitor_clk.clkr, + [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_1_disp_cc_mdss_rscc_ahb_clk.clkr, + [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_1_disp_cc_mdss_rscc_vsync_clk.clkr, + [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_1_disp_cc_mdss_vsync1_clk.clkr, + [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_1_disp_cc_mdss_vsync_clk.clkr, + [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_1_disp_cc_mdss_vsync_clk_src.clkr, + [MDSS_DISP_CC_PLL0] = &mdss_1_disp_cc_pll0.clkr, + [MDSS_DISP_CC_PLL1] = &mdss_1_disp_cc_pll1.clkr, + [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_1_disp_cc_sleep_clk_src.clkr, + [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_1_disp_cc_sm_obs_clk.clkr, + [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_1_disp_cc_xo_clk_src.clkr, +}; + +static struct gdsc *disp_cc_1_sa8775p_gdscs[] = { + [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_1_disp_cc_mdss_core_gdsc, + [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_1_disp_cc_mdss_core_int2_gdsc, +}; + +static const struct qcom_reset_map disp_cc_1_sa8775p_resets[] = { + [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, + [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 }, +}; + +static const struct regmap_config disp_cc_1_sa8775p_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x12414, + .fast_io = true, +}; + +static struct qcom_cc_desc disp_cc_1_sa8775p_desc = { + .config = &disp_cc_1_sa8775p_regmap_config, + .clks = disp_cc_1_sa8775p_clocks, + .num_clks = ARRAY_SIZE(disp_cc_1_sa8775p_clocks), + .resets = disp_cc_1_sa8775p_resets, + .num_resets = ARRAY_SIZE(disp_cc_1_sa8775p_resets), + .gdscs = disp_cc_1_sa8775p_gdscs, + .num_gdscs = ARRAY_SIZE(disp_cc_1_sa8775p_gdscs), +}; + +static const struct of_device_id disp_cc_1_sa8775p_match_table[] = { + { .compatible = "qcom,sa8775p-dispcc1" }, + { } +}; +MODULE_DEVICE_TABLE(of, disp_cc_1_sa8775p_match_table); + +static int disp_cc_1_sa8775p_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + + regmap = qcom_cc_map(pdev, &disp_cc_1_sa8775p_desc); + if (IS_ERR(regmap)) { + pm_runtime_put(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll0, regmap, &mdss_1_disp_cc_pll0_config); + clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll1, regmap, &mdss_1_disp_cc_pll1_config); + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_1_DISP_CC_SLEEP_CLK */ + qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_1_DISP_CC_XO_CLK */ + + ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_1_sa8775p_desc, regmap); + + pm_runtime_put(&pdev->dev); + + return ret; +} + +static struct platform_driver disp_cc_1_sa8775p_driver = { + .probe = disp_cc_1_sa8775p_probe, + .driver = { + .name = "dispcc1-sa8775p", + .of_match_table = disp_cc_1_sa8775p_match_table, + }, +}; + +module_platform_driver(disp_cc_1_sa8775p_driver); + +MODULE_DESCRIPTION("QTI DISPCC1 SA8775P Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c index 9536b2b7d07c..9246e97d785a 100644 --- a/drivers/clk/qcom/gcc-ipq5332.c +++ b/drivers/clk/qcom/gcc-ipq5332.c @@ -2185,150 +2185,6 @@ static struct clk_branch gcc_prng_ahb_clk = { }, }; -static struct clk_branch gcc_q6_ahb_clk = { - .halt_reg = 0x25014, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x25014, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_ahb_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_wcss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6_ahb_s_clk = { - .halt_reg = 0x25018, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x25018, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_ahb_s_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_wcss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6_axim_clk = { - .halt_reg = 0x2500c, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x2500c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_axim_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_q6_axim_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6_axis_clk = { - .halt_reg = 0x25010, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x25010, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_axis_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_system_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6_tsctr_1to2_clk = { - .halt_reg = 0x25020, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x25020, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_tsctr_1to2_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_tsctr_div2_clk_src.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6ss_atbm_clk = { - .halt_reg = 0x2501c, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x2501c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_atbm_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_at_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6ss_pclkdbg_clk = { - .halt_reg = 0x25024, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x25024, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_pclkdbg_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_dap_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6ss_trig_clk = { - .halt_reg = 0x250a0, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x250a0, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_trig_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_dap_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_qdss_at_clk = { .halt_reg = 0x2d038, .halt_check = BRANCH_HALT_VOTED, @@ -2756,24 +2612,6 @@ static struct clk_branch gcc_sys_noc_at_clk = { }, }; -static struct clk_branch gcc_sys_noc_wcss_ahb_clk = { - .halt_reg = 0x2e030, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x2e030, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_sys_noc_wcss_ahb_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_wcss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_uniphy0_ahb_clk = { .halt_reg = 0x16010, .halt_check = BRANCH_HALT, @@ -2989,204 +2827,6 @@ static struct clk_branch gcc_usb0_sleep_clk = { }, }; -static struct clk_branch gcc_wcss_axim_clk = { - .halt_reg = 0x2505c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x2505c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_axim_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_system_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_axis_clk = { - .halt_reg = 0x25060, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x25060, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_axis_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_system_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_apb_bdg_clk = { - .halt_reg = 0x25048, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x25048, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_apb_bdg_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_dap_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = { - .halt_reg = 0x25038, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x25038, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_apb_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_dap_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_atb_bdg_clk = { - .halt_reg = 0x2504c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x2504c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_atb_bdg_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_at_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = { - .halt_reg = 0x2503c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x2503c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_atb_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_at_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_nts_bdg_clk = { - .halt_reg = 0x25050, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x25050, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_nts_bdg_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_tsctr_div2_clk_src.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = { - .halt_reg = 0x25040, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x25040, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_nts_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_qdss_tsctr_div2_clk_src.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_ecahb_clk = { - .halt_reg = 0x25058, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x25058, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_ecahb_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_wcss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_mst_async_bdg_clk = { - .halt_reg = 0x2e0b0, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x2e0b0, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_mst_async_bdg_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_system_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_slv_async_bdg_clk = { - .halt_reg = 0x2e0b4, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x2e0b4, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_slv_async_bdg_clk", - .parent_hws = (const struct clk_hw*[]) { - &gcc_system_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_xo_clk = { .halt_reg = 0x34018, .halt_check = BRANCH_HALT, @@ -3362,15 +3002,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { [GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr, [GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr, [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, - [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr, - [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr, - [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr, [GCC_Q6_AXIM_CLK_SRC] = &gcc_q6_axim_clk_src.clkr, - [GCC_Q6_AXIS_CLK] = &gcc_q6_axis_clk.clkr, - [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr, - [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr, - [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr, - [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr, [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr, [GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr, [GCC_QDSS_CFG_AHB_CLK] = &gcc_qdss_cfg_ahb_clk.clkr, @@ -3400,7 +3032,6 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { [GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr, [GCC_SNOC_USB_CLK] = &gcc_snoc_usb_clk.clkr, [GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr, - [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr, [GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr, [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr, [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr, @@ -3421,17 +3052,6 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr, [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr, [GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr, - [GCC_WCSS_AXIM_CLK] = &gcc_wcss_axim_clk.clkr, - [GCC_WCSS_AXIS_CLK] = &gcc_wcss_axis_clk.clkr, - [GCC_WCSS_DBG_IFC_APB_BDG_CLK] = &gcc_wcss_dbg_ifc_apb_bdg_clk.clkr, - [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr, - [GCC_WCSS_DBG_IFC_ATB_BDG_CLK] = &gcc_wcss_dbg_ifc_atb_bdg_clk.clkr, - [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr, - [GCC_WCSS_DBG_IFC_NTS_BDG_CLK] = &gcc_wcss_dbg_ifc_nts_bdg_clk.clkr, - [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr, - [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr, - [GCC_WCSS_MST_ASYNC_BDG_CLK] = &gcc_wcss_mst_async_bdg_clk.clkr, - [GCC_WCSS_SLV_ASYNC_BDG_CLK] = &gcc_wcss_slv_async_bdg_clk.clkr, [GCC_XO_CLK] = &gcc_xo_clk.clkr, [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr, [GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr, @@ -3622,7 +3242,7 @@ static const struct qcom_reset_map gcc_ipq5332_resets[] = { #define IPQ_APPS_ID 5332 /* some unique value */ -static struct qcom_icc_hws_data icc_ipq5332_hws[] = { +static const struct qcom_icc_hws_data icc_ipq5332_hws[] = { { MASTER_SNOC_PCIE3_1_M, SLAVE_SNOC_PCIE3_1_M, GCC_SNOC_PCIE3_1LANE_M_CLK }, { MASTER_ANOC_PCIE3_1_S, SLAVE_ANOC_PCIE3_1_S, GCC_SNOC_PCIE3_1LANE_S_CLK }, { MASTER_SNOC_PCIE3_2_M, SLAVE_SNOC_PCIE3_2_M, GCC_SNOC_PCIE3_2LANE_M_CLK }, diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c new file mode 100644 index 000000000000..88a7d5b2e751 --- /dev/null +++ b/drivers/clk/qcom/gcc-ipq5424.c @@ -0,0 +1,3291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,ipq5424-gcc.h> +#include <dt-bindings/reset/qcom,ipq5424-gcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "clk-regmap-phy-mux.h" +#include "common.h" +#include "reset.h" + +enum { + DT_XO, + DT_SLEEP_CLK, + DT_PCIE30_PHY0_PIPE_CLK, + DT_PCIE30_PHY1_PIPE_CLK, + DT_PCIE30_PHY2_PIPE_CLK, + DT_PCIE30_PHY3_PIPE_CLK, + DT_USB_PCIE_WRAPPER_PIPE_CLK, +}; + +enum { + P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, + P_GPLL0_OUT_AUX, + P_GPLL0_OUT_MAIN, + P_GPLL2_OUT_AUX, + P_GPLL2_OUT_MAIN, + P_GPLL4_OUT_AUX, + P_GPLL4_OUT_MAIN, + P_SLEEP_CLK, + P_XO, + P_USB3PHY_0_PIPE, +}; + +static const struct clk_parent_data gcc_parent_data_xo = { .index = DT_XO }; + +static struct clk_alpha_pll gpll0 = { + .offset = 0x20000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .clkr = { + .enable_reg = 0xb000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpll0", + .parent_data = &gcc_parent_data_xo, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_fixed_factor gpll0_div2 = { + .mult = 1, + .div = 2, + .hw.init = &(const struct clk_init_data) { + .name = "gpll0_div2", + .parent_hws = (const struct clk_hw *[]) { + &gpll0.clkr.hw + }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll gpll2 = { + .offset = 0x21000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA], + .clkr = { + .enable_reg = 0xb000, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gpll2", + .parent_data = &gcc_parent_data_xo, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll2_out_main[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll2_out_main = { + .offset = 0x21000, + .post_div_table = post_div_table_gpll2_out_main, + .num_post_div = ARRAY_SIZE(post_div_table_gpll2_out_main), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpll2_out_main", + .parent_hws = (const struct clk_hw*[]) { + &gpll2.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_ro_ops, + }, +}; + +static struct clk_alpha_pll gpll4 = { + .offset = 0x22000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO], + .clkr = { + .enable_reg = 0xb000, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gpll4", + .parent_data = &gcc_parent_data_xo, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + /* + * There are no consumers for this GPLL in kernel yet, + * (will be added soon), so the clock framework + * disables this source. But some of the clocks + * initialized by boot loaders uses this source. So we + * need to keep this clock ON. Add the + * CLK_IGNORE_UNUSED flag so the clock will not be + * disabled. Once the consumer in kernel is added, we + * can get rid of this flag. + */ + .flags = CLK_IGNORE_UNUSED, + }, + }, +}; + +static const struct parent_map gcc_parent_map_xo[] = { + { P_XO, 0 }, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 }, +}; + +static const struct clk_parent_data gcc_parent_data_0[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0_div2.hw }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, +}; + +static const struct clk_parent_data gcc_parent_data_1[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL4_OUT_MAIN, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_2[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll4.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_XO, 0 }, + { P_GPLL4_OUT_MAIN, 1 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .index = DT_XO }, + { .hw = &gpll4.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0_div2.hw }, +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_XO, 0 }, +}; + +static const struct clk_parent_data gcc_parent_data_4[] = { + { .index = DT_XO }, +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_XO, 0 }, + { P_GPLL4_OUT_AUX, 1 }, + { P_GPLL0_OUT_MAIN, 3 }, + { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 }, +}; + +static const struct clk_parent_data gcc_parent_data_5[] = { + { .index = DT_XO }, + { .hw = &gpll4.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0_div2.hw }, +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_SLEEP_CLK, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_6[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_SLEEP_CLK, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_7[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL2_OUT_MAIN, 2 }, + { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 }, +}; + +static const struct clk_parent_data gcc_parent_data_8[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll2_out_main.clkr.hw }, + { .hw = &gpll0_div2.hw }, +}; + +static const struct parent_map gcc_parent_map_9[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL4_OUT_MAIN, 2 }, + { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 }, +}; + +static const struct clk_parent_data gcc_parent_data_9[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll4.clkr.hw }, + { .hw = &gpll0_div2.hw }, +}; + +static const struct parent_map gcc_parent_map_10[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_SLEEP_CLK, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_10[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map gcc_parent_map_11[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL2_OUT_AUX, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_11[] = { + { .index = DT_XO }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll2.clkr.hw }, +}; + +static const struct freq_tbl ftbl_gcc_adss_pwm_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_adss_pwm_clk_src = { + .cmd_rcgr = 0x1c004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_adss_pwm_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_adss_pwm_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_nss_ts_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_xo_clk_src = { + .cmd_rcgr = 0x34004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo, + .freq_tbl = ftbl_gcc_nss_ts_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_xo_clk_src", + .parent_data = &gcc_parent_data_xo, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_fixed_factor gcc_xo_div4_clk_src = { + .mult = 1, + .div = 4, + .hw.init = &(const struct clk_init_data) { + .name = "gcc_xo_div4_clk_src", + .parent_hws = (const struct clk_hw *[]) { + &gcc_xo_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_rcg2 gcc_nss_ts_clk_src = { + .cmd_rcgr = 0x17088, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_nss_ts_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_nss_ts_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = ARRAY_SIZE(gcc_parent_data_4), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie0_axi_m_clk_src[] = { + F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie0_axi_m_clk_src = { + .cmd_rcgr = 0x28018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_axi_m_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie0_axi_s_clk_src = { + .cmd_rcgr = 0x28020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_axi_s_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie1_axi_m_clk_src = { + .cmd_rcgr = 0x29018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_axi_m_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie1_axi_s_clk_src = { + .cmd_rcgr = 0x29020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_axi_s_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie2_axi_m_clk_src[] = { + F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie2_axi_m_clk_src = { + .cmd_rcgr = 0x2a018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie2_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_axi_m_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie2_axi_s_clk_src = { + .cmd_rcgr = 0x2a020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_axi_s_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie3_axi_m_clk_src = { + .cmd_rcgr = 0x2b018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie2_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_axi_m_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie3_axi_s_clk_src = { + .cmd_rcgr = 0x2b020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_axi_s_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_aux_clk_src[] = { + F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4), + { } +}; + +static struct clk_rcg2 gcc_pcie_aux_clk_src = { + .cmd_rcgr = 0x28004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_pcie_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_aux_clk_src", + .parent_data = gcc_parent_data_7, + .num_parents = ARRAY_SIZE(gcc_parent_data_7), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_i2c0_clk_src[] = { + F(4800000, P_XO, 5, 0, 0), + F(9600000, P_XO, 2.5, 0, 0), + F(24000000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qupv3_i2c0_clk_src = { + .cmd_rcgr = 0x2018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_i2c0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_i2c0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_i2c1_clk_src = { + .cmd_rcgr = 0x3018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_i2c0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_i2c1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_spi0_clk_src[] = { + F(960000, P_XO, 10, 2, 5), + F(4800000, P_XO, 5, 0, 0), + F(9600000, P_XO, 2, 4, 5), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(24000000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(32000000, P_GPLL0_OUT_MAIN, 10, 2, 5), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qupv3_spi0_clk_src = { + .cmd_rcgr = 0x4004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_spi0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_spi0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_spi1_clk_src = { + .cmd_rcgr = 0x5004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_spi0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_spi1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_uart0_clk_src[] = { + F(960000, P_XO, 10, 2, 5), + F(4800000, P_XO, 5, 0, 0), + F(9600000, P_XO, 2, 4, 5), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(24000000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qupv3_uart0_clk_src = { + .cmd_rcgr = 0x202c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_uart0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_uart0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_uart1_clk_src = { + .cmd_rcgr = 0x302c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_uart0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_uart1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { + F(144000, P_XO, 16, 12, 125), + F(400000, P_XO, 12, 1, 5), + F(24000000, P_XO, 1, 0, 0), + F(48000000, P_GPLL2_OUT_MAIN, 12, 1, 2), + F(96000000, P_GPLL2_OUT_MAIN, 6, 1, 2), + F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(192000000, P_GPLL2_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0x33004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_8, + .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_apps_clk_src", + .parent_data = gcc_parent_data_8, + .num_parents = ARRAY_SIZE(gcc_parent_data_8), + .ops = &clk_rcg2_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = { + F(300000000, P_GPLL4_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x33018, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_9, + .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_data = gcc_parent_data_9, + .num_parents = ARRAY_SIZE(gcc_parent_data_9), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_uniphy_sys_clk_src = { + .cmd_rcgr = 0x17090, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_nss_ts_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy_sys_clk_src", + .parent_data = &gcc_parent_data_xo, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb0_aux_clk_src = { + .cmd_rcgr = 0x2c018, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_10, + .freq_tbl = ftbl_gcc_nss_ts_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_aux_clk_src", + .parent_data = gcc_parent_data_10, + .num_parents = ARRAY_SIZE(gcc_parent_data_10), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb0_master_clk_src[] = { + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb0_master_clk_src = { + .cmd_rcgr = 0x2c004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb0_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb0_mock_utmi_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(60000000, P_GPLL4_OUT_AUX, 10, 1, 2), + { } +}; + +static struct clk_rcg2 gcc_usb0_mock_utmi_clk_src = { + .cmd_rcgr = 0x2c02c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_mock_utmi_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = ARRAY_SIZE(gcc_parent_data_5), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb1_mock_utmi_clk_src = { + .cmd_rcgr = 0x3c004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb1_mock_utmi_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = ARRAY_SIZE(gcc_parent_data_5), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_wcss_ahb_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_wcss_ahb_clk_src = { + .cmd_rcgr = 0x25030, + .freq_tbl = ftbl_gcc_wcss_ahb_clk_src, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_wcss_ahb_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qdss_at_clk_src[] = { + F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qdss_at_clk_src = { + .cmd_rcgr = 0x2d004, + .freq_tbl = ftbl_gcc_qdss_at_clk_src, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qdss_at_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qdss_tsctr_clk_src[] = { + F(600000000, P_GPLL4_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qdss_tsctr_clk_src = { + .cmd_rcgr = 0x2d01c, + .freq_tbl = ftbl_gcc_qdss_tsctr_clk_src, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qdss_tsctr_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_fixed_factor gcc_qdss_tsctr_div2_clk_src = { + .mult = 1, + .div = 2, + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qdss_tsctr_div2_clk_src", + .parent_hws = (const struct clk_hw *[]) { + &gcc_qdss_tsctr_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_fixed_factor gcc_qdss_dap_sync_clk_src = { + .mult = 1, + .div = 4, + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qdss_dap_sync_clk_src", + .parent_hws = (const struct clk_hw *[]) { + &gcc_qdss_tsctr_clk_src.clkr.hw + }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_system_noc_bfdcd_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_system_noc_bfdcd_clk_src = { + .cmd_rcgr = 0x2e004, + .freq_tbl = ftbl_gcc_system_noc_bfdcd_clk_src, + .hid_width = 5, + .parent_map = gcc_parent_map_9, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_system_noc_bfdcd_clk_src", + .parent_data = gcc_parent_data_9, + .num_parents = ARRAY_SIZE(gcc_parent_data_9), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcnoc_bfdcd_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcnoc_bfdcd_clk_src = { + .cmd_rcgr = 0x31004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcnoc_bfdcd_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcnoc_bfdcd_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_lpass_sway_clk_src[] = { + F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_lpass_sway_clk_src = { + .cmd_rcgr = 0x27004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_lpass_sway_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_lpass_sway_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_lpass_axim_clk_src = { + .cmd_rcgr = 0x2700c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_lpass_sway_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_lpass_axim_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_fixed_factor gcc_eud_at_div_clk_src = { + .mult = 1, + .div = 6, + .hw.init = &(const struct clk_init_data) { + .name = "gcc_eud_at_div_clk_src", + .parent_hws = (const struct clk_hw *[]) { + &gcc_qdss_at_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sleep_clk_src = { + .cmd_rcgr = 0x3400c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sleep_clk_src", + .parent_data = gcc_parent_data_6, + .num_parents = ARRAY_SIZE(gcc_parent_data_6), + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qpic_io_macro_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qpic_io_macro_clk_src = { + .cmd_rcgr = 0x32004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_11, + .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qpic_io_macro_clk_src", + .parent_data = gcc_parent_data_11, + .num_parents = ARRAY_SIZE(gcc_parent_data_11), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qpic_clk_src = { + .cmd_rcgr = 0x32020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_11, + .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qpic_clk_src", + .parent_data = gcc_parent_data_11, + .num_parents = ARRAY_SIZE(gcc_parent_data_11), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie0_rchng_clk_src = { + .cmd_rcgr = 0x28028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_adss_pwm_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_rchng_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie1_rchng_clk_src = { + .cmd_rcgr = 0x29028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_adss_pwm_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_rchng_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie2_rchng_clk_src = { + .cmd_rcgr = 0x2a028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_adss_pwm_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_rchng_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie3_rchng_clk_src = { + .cmd_rcgr = 0x2b028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_adss_pwm_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_rchng_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_regmap_div gcc_qupv3_i2c0_div_clk_src = { + .reg = 0x2020, + .shift = 0, + .width = 2, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_i2c0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_i2c0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_qupv3_i2c1_div_clk_src = { + .reg = 0x3020, + .shift = 0, + .width = 2, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_i2c1_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_i2c1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb0_mock_utmi_div_clk_src = { + .reg = 0x2c040, + .shift = 0, + .width = 2, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_mock_utmi_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb0_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb1_mock_utmi_div_clk_src = { + .reg = 0x3c018, + .shift = 0, + .width = 2, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb1_mock_utmi_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb1_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gcc_adss_pwm_clk = { + .halt_reg = 0x1c00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1c00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_adss_pwm_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_adss_pwm_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_apss_dbg_clk = { + .halt_reg = 0x2402c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2402c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_apss_dbg_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qdss_dap_sync_clk_src.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie0_1lane_s_clk = { + .halt_reg = 0x31088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x31088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie0_1lane_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie0_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie1_1lane_s_clk = { + .halt_reg = 0x3108c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3108c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie1_1lane_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie1_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie2_2lane_s_clk = { + .halt_reg = 0x31090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x31090, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie2_2lane_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie2_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie3_2lane_s_clk = { + .halt_reg = 0x31094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x31094, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie3_2lane_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie3_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_usb_clk = { + .halt_reg = 0x310a8, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x310a8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_usb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb0_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdio_ahb_clk = { + .halt_reg = 0x17040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_mdio_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nss_ts_clk = { + .halt_reg = 0x17018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x17018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nss_ts_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_nss_ts_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nsscc_clk = { + .halt_reg = 0x17034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nsscc_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nsscfg_clk = { + .halt_reg = 0x1702c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1702c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nsscfg_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_atb_clk = { + .halt_reg = 0x17014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_atb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qdss_at_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_nsscc_clk = { + .halt_reg = 0x17030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_nsscc_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_pcnoc_1_clk = { + .halt_reg = 0x17080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_pcnoc_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_qosgen_ref_clk = { + .halt_reg = 0x1701c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1701c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_qosgen_ref_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_xo_div4_clk_src.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_snoc_1_clk = { + .halt_reg = 0x1707c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1707c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_snoc_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_system_noc_bfdcd_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_snoc_clk = { + .halt_reg = 0x17028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_snoc_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_system_noc_bfdcd_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_timeout_ref_clk = { + .halt_reg = 0x17020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_timeout_ref_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_xo_div4_clk_src.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_nssnoc_xo_dcd_clk = { + .halt_reg = 0x17074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17074, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_nssnoc_xo_dcd_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_ahb_clk = { + .halt_reg = 0x28030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_aux_clk = { + .halt_reg = 0x28070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28070, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_axi_m_clk = { + .halt_reg = 0x28038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_axi_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie0_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_anoc_pcie0_1lane_m_clk = { + .halt_reg = 0x2e07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2e07c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_anoc_pcie0_1lane_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie0_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_axi_s_bridge_clk = { + .halt_reg = 0x28048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_axi_s_bridge_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie0_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_axi_s_clk = { + .halt_reg = 0x28040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_axi_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie0_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie0_pipe_clk_src = { + .reg = 0x28064, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "pcie0_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE30_PHY0_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_pipe_clk = { + .halt_reg = 0x28068, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x28068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_pipe_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie0_pipe_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_ahb_clk = { + .halt_reg = 0x29030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x29030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_aux_clk = { + .halt_reg = 0x29074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x29074, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_axi_m_clk = { + .halt_reg = 0x29038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x29038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_axi_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie1_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_anoc_pcie1_1lane_m_clk = { + .halt_reg = 0x2e084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2e084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_anoc_pcie1_1lane_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie1_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_axi_s_bridge_clk = { + .halt_reg = 0x29048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x29048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_axi_s_bridge_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie1_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_axi_s_clk = { + .halt_reg = 0x29040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x29040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_axi_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie1_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie1_pipe_clk_src = { + .reg = 0x29064, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "pcie1_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE30_PHY1_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_pipe_clk = { + .halt_reg = 0x29068, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x29068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_pipe_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie1_pipe_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_ahb_clk = { + .halt_reg = 0x2a030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_aux_clk = { + .halt_reg = 0x2a078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a078, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_axi_m_clk = { + .halt_reg = 0x2a038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_axi_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie2_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_anoc_pcie2_2lane_m_clk = { + .halt_reg = 0x2e080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2e080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_anoc_pcie2_2lane_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie2_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_axi_s_bridge_clk = { + .halt_reg = 0x2a048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_axi_s_bridge_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie2_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_axi_s_clk = { + .halt_reg = 0x2a040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_axi_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie2_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie2_pipe_clk_src = { + .reg = 0x2a064, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "pcie2_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE30_PHY2_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_pipe_clk = { + .halt_reg = 0x2a068, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x2a068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_pipe_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie2_pipe_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_ahb_clk = { + .halt_reg = 0x2b030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2b030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_aux_clk = { + .halt_reg = 0x2b07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2b07c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_axi_m_clk = { + .halt_reg = 0x2b038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2b038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_axi_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie3_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_anoc_pcie3_2lane_m_clk = { + .halt_reg = 0x2e090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2e090, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_anoc_pcie3_2lane_m_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie3_axi_m_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_axi_s_bridge_clk = { + .halt_reg = 0x2b048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2b048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_axi_s_bridge_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie3_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_axi_s_clk = { + .halt_reg = 0x2b040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2b040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_axi_s_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie3_axi_s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie3_pipe_clk_src = { + .reg = 0x2b064, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "pcie3_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE30_PHY3_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_pipe_clk = { + .halt_reg = 0x2b068, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x2b068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_pipe_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie3_pipe_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x13024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb004, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_prng_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_ahb_mst_clk = { + .halt_reg = 0x1014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb004, + .enable_mask = BIT(14), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_ahb_mst_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_ahb_slv_clk = { + .halt_reg = 0x102c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb004, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_ahb_slv_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_i2c0_clk = { + .halt_reg = 0x2024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_i2c0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_i2c0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_i2c1_clk = { + .halt_reg = 0x3024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_i2c1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_i2c1_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_spi0_clk = { + .halt_reg = 0x4020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_spi0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_spi0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_spi1_clk = { + .halt_reg = 0x5020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_spi1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_spi1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_uart0_clk = { + .halt_reg = 0x2040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_uart0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_uart0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_uart1_clk = { + .halt_reg = 0x3040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_uart1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_uart1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x3303c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3303c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x3302c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3302c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_apps_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sdcc1_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x33034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x33034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ice_core_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sdcc1_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_uniphy0_ahb_clk = { + .halt_reg = 0x1704c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1704c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy0_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_uniphy0_sys_clk = { + .halt_reg = 0x17048, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x17048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy0_sys_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_uniphy_sys_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_uniphy1_ahb_clk = { + .halt_reg = 0x1705c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1705c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy1_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_uniphy1_sys_clk = { + .halt_reg = 0x17058, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x17058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy1_sys_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_uniphy_sys_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_uniphy2_ahb_clk = { + .halt_reg = 0x1706c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1706c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy2_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_uniphy2_sys_clk = { + .halt_reg = 0x17068, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x17068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_uniphy2_sys_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_uniphy_sys_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_aux_clk = { + .halt_reg = 0x2c04c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2c04c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_master_clk = { + .halt_reg = 0x2c044, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2c044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_master_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb0_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_mock_utmi_clk = { + .halt_reg = 0x2c050, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2c050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb0_mock_utmi_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb1_mock_utmi_clk = { + .halt_reg = 0x3c024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x3c024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb1_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb1_mock_utmi_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = { + .halt_reg = 0x2c05c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2c05c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_phy_cfg_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb1_phy_cfg_ahb_clk = { + .halt_reg = 0x3c01c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x3c01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb1_phy_cfg_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb1_master_clk = { + .halt_reg = 0x3c028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x3c028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb1_master_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_usb0_pipe_clk_src = { + .reg = 0x2c074, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_USB_PCIE_WRAPPER_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_pipe_clk = { + .halt_reg = 0x2c054, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x2c054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_pipe_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_usb0_pipe_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_sleep_clk = { + .halt_reg = 0x2c058, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2c058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_sleep_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sleep_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb1_sleep_clk = { + .halt_reg = 0x3c020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x3c020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb1_sleep_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sleep_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cmn_12gpll_ahb_clk = { + .halt_reg = 0x3a004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3a004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cmn_12gpll_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cmn_12gpll_sys_clk = { + .halt_reg = 0x3a008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3a008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cmn_12gpll_sys_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_uniphy_sys_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_lpass_sway_clk = { + .halt_reg = 0x27014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x27014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_lpass_sway_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_lpass_sway_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_lpass_cfg_clk = { + .halt_reg = 0x2e028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2e028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_lpass_cfg_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_lpass_sway_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_lpass_core_axim_clk = { + .halt_reg = 0x27018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x27018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_lpass_core_axim_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_lpass_axim_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_snoc_lpass_clk = { + .halt_reg = 0x31020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x31020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_snoc_lpass_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_lpass_axim_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb0_eud_at_clk = { + .halt_reg = 0x30004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x30004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb0_eud_at_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_eud_at_div_clk_src.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qpic_ahb_clk = { + .halt_reg = 0x32010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x32010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qpic_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qpic_clk = { + .halt_reg = 0x32028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x32028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qpic_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qpic_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qpic_io_macro_clk = { + .halt_reg = 0x3200c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3200c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qpic_io_macro_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qpic_io_macro_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qdss_dap_clk = { + .halt_reg = 0x2d058, + .clkr = { + .enable_reg = 0x2d058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qdss_dap_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_qdss_dap_sync_clk_src.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qdss_at_clk = { + .halt_reg = 0x2d034, + .clkr = { + .enable_reg = 0x2d034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qdss_at_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_qdss_at_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_rchng_clk = { + .halt_reg = 0x28028, + .clkr = { + .enable_reg = 0x28028, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_rchng_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie0_rchng_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_rchng_clk = { + .halt_reg = 0x29028, + .clkr = { + .enable_reg = 0x29028, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_rchng_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie1_rchng_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_rchng_clk = { + .halt_reg = 0x2a028, + .clkr = { + .enable_reg = 0x2a028, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2_rchng_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie2_rchng_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3_rchng_clk = { + .halt_reg = 0x2b028, + .clkr = { + .enable_reg = 0x2b028, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3_rchng_clk", + .parent_hws = (const struct clk_hw *[]) { + &gcc_pcie3_rchng_clk_src.clkr.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gcc_ipq5424_clocks[] = { + [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr, + [GCC_ADSS_PWM_CLK_SRC] = &gcc_adss_pwm_clk_src.clkr, + [GCC_APSS_DBG_CLK] = &gcc_apss_dbg_clk.clkr, + [GCC_CNOC_PCIE0_1LANE_S_CLK] = &gcc_cnoc_pcie0_1lane_s_clk.clkr, + [GCC_CNOC_PCIE1_1LANE_S_CLK] = &gcc_cnoc_pcie1_1lane_s_clk.clkr, + [GCC_CNOC_PCIE2_2LANE_S_CLK] = &gcc_cnoc_pcie2_2lane_s_clk.clkr, + [GCC_CNOC_PCIE3_2LANE_S_CLK] = &gcc_cnoc_pcie3_2lane_s_clk.clkr, + [GCC_CNOC_USB_CLK] = &gcc_cnoc_usb_clk.clkr, + [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr, + [GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr, + [GCC_NSS_TS_CLK_SRC] = &gcc_nss_ts_clk_src.clkr, + [GCC_NSSCC_CLK] = &gcc_nsscc_clk.clkr, + [GCC_NSSCFG_CLK] = &gcc_nsscfg_clk.clkr, + [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr, + [GCC_NSSNOC_NSSCC_CLK] = &gcc_nssnoc_nsscc_clk.clkr, + [GCC_NSSNOC_PCNOC_1_CLK] = &gcc_nssnoc_pcnoc_1_clk.clkr, + [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr, + [GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr, + [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr, + [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr, + [GCC_NSSNOC_XO_DCD_CLK] = &gcc_nssnoc_xo_dcd_clk.clkr, + [GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr, + [GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr, + [GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr, + [GCC_PCIE0_AXI_M_CLK_SRC] = &gcc_pcie0_axi_m_clk_src.clkr, + [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr, + [GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr, + [GCC_PCIE0_AXI_S_CLK_SRC] = &gcc_pcie0_axi_s_clk_src.clkr, + [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr, + [GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr, + [GCC_PCIE0_PIPE_CLK_SRC] = &gcc_pcie0_pipe_clk_src.clkr, + [GCC_PCIE0_RCHNG_CLK_SRC] = &gcc_pcie0_rchng_clk_src.clkr, + [GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr, + [GCC_PCIE1_AHB_CLK] = &gcc_pcie1_ahb_clk.clkr, + [GCC_PCIE1_AUX_CLK] = &gcc_pcie1_aux_clk.clkr, + [GCC_PCIE1_AXI_M_CLK] = &gcc_pcie1_axi_m_clk.clkr, + [GCC_PCIE1_AXI_M_CLK_SRC] = &gcc_pcie1_axi_m_clk_src.clkr, + [GCC_PCIE1_AXI_S_BRIDGE_CLK] = &gcc_pcie1_axi_s_bridge_clk.clkr, + [GCC_PCIE1_AXI_S_CLK] = &gcc_pcie1_axi_s_clk.clkr, + [GCC_PCIE1_AXI_S_CLK_SRC] = &gcc_pcie1_axi_s_clk_src.clkr, + [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr, + [GCC_ANOC_PCIE1_1LANE_M_CLK] = &gcc_anoc_pcie1_1lane_m_clk.clkr, + [GCC_PCIE1_PIPE_CLK_SRC] = &gcc_pcie1_pipe_clk_src.clkr, + [GCC_PCIE1_RCHNG_CLK_SRC] = &gcc_pcie1_rchng_clk_src.clkr, + [GCC_PCIE1_RCHNG_CLK] = &gcc_pcie1_rchng_clk.clkr, + [GCC_PCIE2_AHB_CLK] = &gcc_pcie2_ahb_clk.clkr, + [GCC_PCIE2_AUX_CLK] = &gcc_pcie2_aux_clk.clkr, + [GCC_PCIE2_AXI_M_CLK] = &gcc_pcie2_axi_m_clk.clkr, + [GCC_PCIE2_AXI_M_CLK_SRC] = &gcc_pcie2_axi_m_clk_src.clkr, + [GCC_PCIE2_AXI_S_BRIDGE_CLK] = &gcc_pcie2_axi_s_bridge_clk.clkr, + [GCC_PCIE2_AXI_S_CLK] = &gcc_pcie2_axi_s_clk.clkr, + [GCC_PCIE2_AXI_S_CLK_SRC] = &gcc_pcie2_axi_s_clk_src.clkr, + [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr, + [GCC_ANOC_PCIE2_2LANE_M_CLK] = &gcc_anoc_pcie2_2lane_m_clk.clkr, + [GCC_PCIE2_PIPE_CLK_SRC] = &gcc_pcie2_pipe_clk_src.clkr, + [GCC_PCIE2_RCHNG_CLK_SRC] = &gcc_pcie2_rchng_clk_src.clkr, + [GCC_PCIE2_RCHNG_CLK] = &gcc_pcie2_rchng_clk.clkr, + [GCC_PCIE3_AHB_CLK] = &gcc_pcie3_ahb_clk.clkr, + [GCC_PCIE3_AUX_CLK] = &gcc_pcie3_aux_clk.clkr, + [GCC_PCIE3_AXI_M_CLK] = &gcc_pcie3_axi_m_clk.clkr, + [GCC_PCIE3_AXI_M_CLK_SRC] = &gcc_pcie3_axi_m_clk_src.clkr, + [GCC_PCIE3_AXI_S_BRIDGE_CLK] = &gcc_pcie3_axi_s_bridge_clk.clkr, + [GCC_PCIE3_AXI_S_CLK] = &gcc_pcie3_axi_s_clk.clkr, + [GCC_PCIE3_AXI_S_CLK_SRC] = &gcc_pcie3_axi_s_clk_src.clkr, + [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr, + [GCC_ANOC_PCIE3_2LANE_M_CLK] = &gcc_anoc_pcie3_2lane_m_clk.clkr, + [GCC_PCIE3_PIPE_CLK_SRC] = &gcc_pcie3_pipe_clk_src.clkr, + [GCC_PCIE3_RCHNG_CLK_SRC] = &gcc_pcie3_rchng_clk_src.clkr, + [GCC_PCIE3_RCHNG_CLK] = &gcc_pcie3_rchng_clk.clkr, + [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_QUPV3_AHB_MST_CLK] = &gcc_qupv3_ahb_mst_clk.clkr, + [GCC_QUPV3_AHB_SLV_CLK] = &gcc_qupv3_ahb_slv_clk.clkr, + [GCC_QUPV3_I2C0_CLK] = &gcc_qupv3_i2c0_clk.clkr, + [GCC_QUPV3_I2C0_CLK_SRC] = &gcc_qupv3_i2c0_clk_src.clkr, + [GCC_QUPV3_I2C0_DIV_CLK_SRC] = &gcc_qupv3_i2c0_div_clk_src.clkr, + [GCC_QUPV3_I2C1_CLK] = &gcc_qupv3_i2c1_clk.clkr, + [GCC_QUPV3_I2C1_CLK_SRC] = &gcc_qupv3_i2c1_clk_src.clkr, + [GCC_QUPV3_I2C1_DIV_CLK_SRC] = &gcc_qupv3_i2c1_div_clk_src.clkr, + [GCC_QUPV3_SPI0_CLK] = &gcc_qupv3_spi0_clk.clkr, + [GCC_QUPV3_SPI0_CLK_SRC] = &gcc_qupv3_spi0_clk_src.clkr, + [GCC_QUPV3_SPI1_CLK] = &gcc_qupv3_spi1_clk.clkr, + [GCC_QUPV3_SPI1_CLK_SRC] = &gcc_qupv3_spi1_clk_src.clkr, + [GCC_QUPV3_UART0_CLK] = &gcc_qupv3_uart0_clk.clkr, + [GCC_QUPV3_UART0_CLK_SRC] = &gcc_qupv3_uart0_clk_src.clkr, + [GCC_QUPV3_UART1_CLK] = &gcc_qupv3_uart1_clk.clkr, + [GCC_QUPV3_UART1_CLK_SRC] = &gcc_qupv3_uart1_clk_src.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr, + [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr, + [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr, + [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr, + [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr, + [GCC_UNIPHY2_AHB_CLK] = &gcc_uniphy2_ahb_clk.clkr, + [GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr, + [GCC_UNIPHY_SYS_CLK_SRC] = &gcc_uniphy_sys_clk_src.clkr, + [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr, + [GCC_USB0_AUX_CLK_SRC] = &gcc_usb0_aux_clk_src.clkr, + [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr, + [GCC_USB0_MASTER_CLK_SRC] = &gcc_usb0_master_clk_src.clkr, + [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr, + [GCC_USB0_MOCK_UTMI_CLK_SRC] = &gcc_usb0_mock_utmi_clk_src.clkr, + [GCC_USB0_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb0_mock_utmi_div_clk_src.clkr, + [GCC_USB0_EUD_AT_CLK] = &gcc_usb0_eud_at_clk.clkr, + [GCC_USB0_PIPE_CLK_SRC] = &gcc_usb0_pipe_clk_src.clkr, + [GCC_USB1_MOCK_UTMI_CLK] = &gcc_usb1_mock_utmi_clk.clkr, + [GCC_USB1_MOCK_UTMI_CLK_SRC] = &gcc_usb1_mock_utmi_clk_src.clkr, + [GCC_USB1_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb1_mock_utmi_div_clk_src.clkr, + [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr, + [GCC_USB1_PHY_CFG_AHB_CLK] = &gcc_usb1_phy_cfg_ahb_clk.clkr, + [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr, + [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr, + [GCC_USB1_SLEEP_CLK] = &gcc_usb1_sleep_clk.clkr, + [GCC_USB1_MASTER_CLK] = &gcc_usb1_master_clk.clkr, + [GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr, + [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr, + [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr, + [GCC_LPASS_SWAY_CLK] = &gcc_lpass_sway_clk.clkr, + [GCC_CNOC_LPASS_CFG_CLK] = &gcc_cnoc_lpass_cfg_clk.clkr, + [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr, + [GCC_SNOC_LPASS_CLK] = &gcc_snoc_lpass_clk.clkr, + [GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr, + [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr, + [GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr, + [GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr, + [GCC_LPASS_SWAY_CLK_SRC] = &gcc_lpass_sway_clk_src.clkr, + [GCC_LPASS_AXIM_CLK_SRC] = &gcc_lpass_axim_clk_src.clkr, + [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr, + [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr, + [GCC_QPIC_IO_MACRO_CLK_SRC] = &gcc_qpic_io_macro_clk_src.clkr, + [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr, + [GCC_QPIC_CLK_SRC] = &gcc_qpic_clk_src.clkr, + [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr, + [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr, + [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, + [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL2] = &gpll2.clkr, + [GPLL2_OUT_MAIN] = &gpll2_out_main.clkr, + [GPLL4] = &gpll4.clkr, +}; + +static const struct qcom_reset_map gcc_ipq5424_resets[] = { + [GCC_QUPV3_BCR] = { 0x01000, 0 }, + [GCC_QUPV3_I2C0_BCR] = { 0x02000, 0 }, + [GCC_QUPV3_UART0_BCR] = { 0x02020, 0 }, + [GCC_QUPV3_I2C1_BCR] = { 0x03000, 0 }, + [GCC_QUPV3_UART1_BCR] = { 0x03028, 0 }, + [GCC_QUPV3_SPI0_BCR] = { 0x04000, 0 }, + [GCC_QUPV3_SPI1_BCR] = { 0x05000, 0 }, + [GCC_IMEM_BCR] = { 0x0e000, 0 }, + [GCC_TME_BCR] = { 0x100000, 0 }, + [GCC_DDRSS_BCR] = { 0x11000, 0 }, + [GCC_PRNG_BCR] = { 0x13020, 0 }, + [GCC_BOOT_ROM_BCR] = { 0x13028, 0 }, + [GCC_NSS_BCR] = { 0x17000, 0 }, + [GCC_MDIO_BCR] = { 0x1703c, 0 }, + [GCC_UNIPHY0_BCR] = { 0x17044, 0 }, + [GCC_UNIPHY1_BCR] = { 0x17054, 0 }, + [GCC_UNIPHY2_BCR] = { 0x17064, 0 }, + [GCC_WCSS_BCR] = { 0x18004, 0 }, + [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 }, + [GCC_TME_SEC_BUS_BCR] = { 0xa1030, 0 }, + [GCC_ADSS_BCR] = { 0x1c000, 0 }, + [GCC_LPASS_BCR] = { 0x27000, 0 }, + [GCC_PCIE0_BCR] = { 0x28000, 0 }, + [GCC_PCIE0_LINK_DOWN_BCR] = { 0x28054, 0 }, + [GCC_PCIE0PHY_PHY_BCR] = { 0x2805c, 0 }, + [GCC_PCIE0_PHY_BCR] = { 0x28060, 0 }, + [GCC_PCIE1_BCR] = { 0x29000, 0 }, + [GCC_PCIE1_LINK_DOWN_BCR] = { 0x29054, 0 }, + [GCC_PCIE1PHY_PHY_BCR] = { 0x2905c, 0 }, + [GCC_PCIE1_PHY_BCR] = { 0x29060, 0 }, + [GCC_PCIE2_BCR] = { 0x2a000, 0 }, + [GCC_PCIE2_LINK_DOWN_BCR] = { 0x2a054, 0 }, + [GCC_PCIE2PHY_PHY_BCR] = { 0x2a05c, 0 }, + [GCC_PCIE2_PHY_BCR] = { 0x2a060, 0 }, + [GCC_PCIE3_BCR] = { 0x2b000, 0 }, + [GCC_PCIE3_LINK_DOWN_BCR] = { 0x2b054, 0 }, + [GCC_PCIE3PHY_PHY_BCR] = { 0x2b05c, 0 }, + [GCC_PCIE3_PHY_BCR] = { 0x2b060, 0 }, + [GCC_USB_BCR] = { 0x2c000, 0 }, + [GCC_QUSB2_0_PHY_BCR] = { 0x2c068, 0 }, + [GCC_USB0_PHY_BCR] = { 0x2c06c, 0 }, + [GCC_USB3PHY_0_PHY_BCR] = { 0x2c070, 0 }, + [GCC_QDSS_BCR] = { 0x2d000, 0 }, + [GCC_SNOC_BCR] = { 0x2e000, 0 }, + [GCC_ANOC_BCR] = { 0x2e074, 0 }, + [GCC_PCNOC_BCR] = { 0x31000, 0 }, + [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x31030, 0 }, + [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x31038, 0 }, + [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x31040, 0 }, + [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x31048, 0 }, + [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x31050, 0 }, + [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x31058, 0 }, + [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x31060, 0 }, + [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x31068, 0 }, + [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x31070, 0 }, + [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x31078, 0 }, + [GCC_QPIC_BCR] = { 0x32000, 0 }, + [GCC_SDCC_BCR] = { 0x33000, 0 }, + [GCC_DCC_BCR] = { 0x35000, 0 }, + [GCC_SPDM_BCR] = { 0x36000, 0 }, + [GCC_MPM_BCR] = { 0x37000, 0 }, + [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x38000, 0 }, + [GCC_RBCPR_BCR] = { 0x39000, 0 }, + [GCC_CMN_BLK_BCR] = { 0x3a000, 0 }, + [GCC_TCSR_BCR] = { 0x3d000, 0 }, + [GCC_TLMM_BCR] = { 0x3e000, 0 }, + [GCC_QUPV3_AHB_MST_ARES] = { 0x01014, 2 }, + [GCC_QUPV3_CORE_ARES] = { 0x01018, 2 }, + [GCC_QUPV3_2X_CORE_ARES] = { 0x01020, 2 }, + [GCC_QUPV3_SLEEP_ARES] = { 0x01028, 2 }, + [GCC_QUPV3_AHB_SLV_ARES] = { 0x0102c, 2 }, + [GCC_QUPV3_I2C0_ARES] = { 0x02024, 2 }, + [GCC_QUPV3_UART0_ARES] = { 0x02040, 2 }, + [GCC_QUPV3_I2C1_ARES] = { 0x03024, 2 }, + [GCC_QUPV3_UART1_ARES] = { 0x03040, 2 }, + [GCC_QUPV3_SPI0_ARES] = { 0x04020, 2 }, + [GCC_QUPV3_SPI1_ARES] = { 0x05020, 2 }, + [GCC_DEBUG_ARES] = { 0x06068, 2 }, + [GCC_GP1_ARES] = { 0x08018, 2 }, + [GCC_GP2_ARES] = { 0x09018, 2 }, + [GCC_GP3_ARES] = { 0x0a018, 2 }, + [GCC_IMEM_AXI_ARES] = { 0x0e004, 2 }, + [GCC_IMEM_CFG_AHB_ARES] = { 0x0e00c, 2 }, + [GCC_TME_ARES] = { 0x100b4, 2 }, + [GCC_TME_TS_ARES] = { 0x100c0, 2 }, + [GCC_TME_SLOW_ARES] = { 0x100d0, 2 }, + [GCC_TME_RTC_TOGGLE_ARES] = { 0x100d8, 2 }, + [GCC_TIC_ARES] = { 0x12004, 2 }, + [GCC_PRNG_AHB_ARES] = { 0x13024, 2 }, + [GCC_BOOT_ROM_AHB_ARES] = { 0x1302c, 2 }, + [GCC_NSSNOC_ATB_ARES] = { 0x17014, 2 }, + [GCC_NSS_TS_ARES] = { 0x17018, 2 }, + [GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x1701c, 2 }, + [GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x17020, 2 }, + [GCC_NSSNOC_MEMNOC_ARES] = { 0x17024, 2 }, + [GCC_NSSNOC_SNOC_ARES] = { 0x17028, 2 }, + [GCC_NSSCFG_ARES] = { 0x1702c, 2 }, + [GCC_NSSNOC_NSSCC_ARES] = { 0x17030, 2 }, + [GCC_NSSCC_ARES] = { 0x17034, 2 }, + [GCC_MDIO_AHB_ARES] = { 0x17040, 2 }, + [GCC_UNIPHY0_SYS_ARES] = { 0x17048, 2 }, + [GCC_UNIPHY0_AHB_ARES] = { 0x1704c, 2 }, + [GCC_UNIPHY1_SYS_ARES] = { 0x17058, 2 }, + [GCC_UNIPHY1_AHB_ARES] = { 0x1705c, 2 }, + [GCC_UNIPHY2_SYS_ARES] = { 0x17068, 2 }, + [GCC_UNIPHY2_AHB_ARES] = { 0x1706c, 2 }, + [GCC_NSSNOC_XO_DCD_ARES] = { 0x17074, 2 }, + [GCC_NSSNOC_SNOC_1_ARES] = { 0x1707c, 2 }, + [GCC_NSSNOC_PCNOC_1_ARES] = { 0x17080, 2 }, + [GCC_NSSNOC_MEMNOC_1_ARES] = { 0x17084, 2 }, + [GCC_DDRSS_ATB_ARES] = { 0x19004, 2 }, + [GCC_DDRSS_AHB_ARES] = { 0x19008, 2 }, + [GCC_GEMNOC_AHB_ARES] = { 0x1900c, 2 }, + [GCC_GEMNOC_Q6_AXI_ARES] = { 0x19010, 2 }, + [GCC_GEMNOC_NSSNOC_ARES] = { 0x19014, 2 }, + [GCC_GEMNOC_SNOC_ARES] = { 0x19018, 2 }, + [GCC_GEMNOC_APSS_ARES] = { 0x1901c, 2 }, + [GCC_GEMNOC_QOSGEN_EXTREF_ARES] = { 0x19024, 2 }, + [GCC_GEMNOC_TS_ARES] = { 0x19028, 2 }, + [GCC_DDRSS_SMS_SLOW_ARES] = { 0x1902c, 2 }, + [GCC_GEMNOC_CNOC_ARES] = { 0x19038, 2 }, + [GCC_GEMNOC_XO_DBG_ARES] = { 0x19040, 2 }, + [GCC_GEMNOC_ANOC_ARES] = { 0x19048, 2 }, + [GCC_DDRSS_LLCC_ATB_ARES] = { 0x1904c, 2 }, + [GCC_LLCC_TPDM_CFG_ARES] = { 0x19050, 2 }, + [GCC_TME_BUS_ARES] = { 0x1a014, 2 }, + [GCC_SEC_CTRL_ACC_ARES] = { 0x1a018, 2 }, + [GCC_SEC_CTRL_ARES] = { 0x1a020, 2 }, + [GCC_SEC_CTRL_SENSE_ARES] = { 0x1a028, 2 }, + [GCC_SEC_CTRL_AHB_ARES] = { 0x1a038, 2 }, + [GCC_SEC_CTRL_BOOT_ROM_PATCH_ARES] = { 0x1a03c, 2 }, + [GCC_ADSS_PWM_ARES] = { 0x1c00c, 2 }, + [GCC_TME_ATB_ARES] = { 0x1e030, 2 }, + [GCC_TME_DBGAPB_ARES] = { 0x1e034, 2 }, + [GCC_TME_DEBUG_ARES] = { 0x1e038, 2 }, + [GCC_TME_AT_ARES] = { 0x1e03C, 2 }, + [GCC_TME_APB_ARES] = { 0x1e040, 2 }, + [GCC_TME_DMI_DBG_HS_ARES] = { 0x1e044, 2 }, + [GCC_APSS_AHB_ARES] = { 0x24014, 2 }, + [GCC_APSS_AXI_ARES] = { 0x24018, 2 }, + [GCC_CPUSS_TRIG_ARES] = { 0x2401c, 2 }, + [GCC_APSS_DBG_ARES] = { 0x2402c, 2 }, + [GCC_APSS_TS_ARES] = { 0x24030, 2 }, + [GCC_APSS_ATB_ARES] = { 0x24034, 2 }, + [GCC_Q6_AXIM_ARES] = { 0x2500c, 2 }, + [GCC_Q6_AXIS_ARES] = { 0x25010, 2 }, + [GCC_Q6_AHB_ARES] = { 0x25014, 2 }, + [GCC_Q6_AHB_S_ARES] = { 0x25018, 2 }, + [GCC_Q6SS_ATBM_ARES] = { 0x2501c, 2 }, + [GCC_Q6_TSCTR_1TO2_ARES] = { 0x25020, 2 }, + [GCC_Q6SS_PCLKDBG_ARES] = { 0x25024, 2 }, + [GCC_Q6SS_TRIG_ARES] = { 0x25028, 2 }, + [GCC_Q6SS_BOOT_CBCR_ARES] = { 0x2502c, 2 }, + [GCC_WCSS_DBG_IFC_APB_ARES] = { 0x25038, 2 }, + [GCC_WCSS_DBG_IFC_ATB_ARES] = { 0x2503c, 2 }, + [GCC_WCSS_DBG_IFC_NTS_ARES] = { 0x25040, 2 }, + [GCC_WCSS_DBG_IFC_DAPBUS_ARES] = { 0x25044, 2 }, + [GCC_WCSS_DBG_IFC_APB_BDG_ARES] = { 0x25048, 2 }, + [GCC_WCSS_DBG_IFC_NTS_BDG_ARES] = { 0x25050, 2 }, + [GCC_WCSS_DBG_IFC_DAPBUS_BDG_ARES] = { 0x25054, 2 }, + [GCC_WCSS_ECAHB_ARES] = { 0x25058, 2 }, + [GCC_WCSS_ACMT_ARES] = { 0x2505c, 2 }, + [GCC_WCSS_AHB_S_ARES] = { 0x25060, 2 }, + [GCC_WCSS_AXI_M_ARES] = { 0x25064, 2 }, + [GCC_PCNOC_WAPSS_ARES] = { 0x25080, 2 }, + [GCC_SNOC_WAPSS_ARES] = { 0x25090, 2 }, + [GCC_LPASS_SWAY_ARES] = { 0x27014, 2 }, + [GCC_LPASS_CORE_AXIM_ARES] = { 0x27018, 2 }, + [GCC_PCIE0_AHB_ARES] = { 0x28030, 2 }, + [GCC_PCIE0_AXI_M_ARES] = { 0x28038, 2 }, + [GCC_PCIE0_AXI_S_ARES] = { 0x28040, 2 }, + [GCC_PCIE0_AXI_S_BRIDGE_ARES] = { 0x28048, 2}, + [GCC_PCIE0_PIPE_ARES] = { 0x28068, 2}, + [GCC_PCIE0_AUX_ARES] = { 0x28070, 2 }, + [GCC_PCIE1_AHB_ARES] = { 0x29030, 2 }, + [GCC_PCIE1_AXI_M_ARES] = { 0x29038, 2 }, + [GCC_PCIE1_AXI_S_ARES] = { 0x29040, 2 }, + [GCC_PCIE1_AXI_S_BRIDGE_ARES] = { 0x29048, 2 }, + [GCC_PCIE1_PIPE_ARES] = { 0x29068, 2 }, + [GCC_PCIE1_AUX_ARES] = { 0x29074, 2 }, + [GCC_PCIE2_AHB_ARES] = { 0x2a030, 2 }, + [GCC_PCIE2_AXI_M_ARES] = { 0x2a038, 2 }, + [GCC_PCIE2_AXI_S_ARES] = { 0x2a040, 2 }, + [GCC_PCIE2_AXI_S_BRIDGE_ARES] = { 0x2a048, 2 }, + [GCC_PCIE2_PIPE_ARES] = { 0x2a068, 2 }, + [GCC_PCIE2_AUX_ARES] = { 0x2a078, 2 }, + [GCC_PCIE3_AHB_ARES] = { 0x2b030, 2 }, + [GCC_PCIE3_AXI_M_ARES] = { 0x2b038, 2 }, + [GCC_PCIE3_AXI_S_ARES] = { 0x2b040, 2 }, + [GCC_PCIE3_AXI_S_BRIDGE_ARES] = { 0x2b048, 2 }, + [GCC_PCIE3_PIPE_ARES] = { 0x2b068, 2 }, + [GCC_PCIE3_AUX_ARES] = { 0x2b07C, 2 }, + [GCC_USB0_MASTER_ARES] = { 0x2c044, 2 }, + [GCC_USB0_AUX_ARES] = { 0x2c04c, 2 }, + [GCC_USB0_MOCK_UTMI_ARES] = { 0x2c050, 2 }, + [GCC_USB0_PIPE_ARES] = { 0x2c054, 2 }, + [GCC_USB0_SLEEP_ARES] = { 0x2c058, 2 }, + [GCC_USB0_PHY_CFG_AHB_ARES] = { 0x2c05c, 2 }, + [GCC_QDSS_AT_ARES] = { 0x2d034, 2 }, + [GCC_QDSS_STM_ARES] = { 0x2d03C, 2 }, + [GCC_QDSS_TRACECLKIN_ARES] = { 0x2d040, 2 }, + [GCC_QDSS_TSCTR_DIV2_ARES] = { 0x2d044, 2 }, + [GCC_QDSS_TSCTR_DIV3_ARES] = { 0x2d048, 2 }, + [GCC_QDSS_TSCTR_DIV4_ARES] = { 0x2d04c, 2 }, + [GCC_QDSS_TSCTR_DIV8_ARES] = { 0x2d050, 2 }, + [GCC_QDSS_TSCTR_DIV16_ARES] = { 0x2d054, 2 }, + [GCC_QDSS_DAP_ARES] = { 0x2d058, 2 }, + [GCC_QDSS_APB2JTAG_ARES] = { 0x2d05c, 2 }, + [GCC_QDSS_ETR_USB_ARES] = { 0x2d060, 2 }, + [GCC_QDSS_DAP_AHB_ARES] = { 0x2d064, 2 }, + [GCC_QDSS_CFG_AHB_ARES] = { 0x2d068, 2 }, + [GCC_QDSS_EUD_AT_ARES] = { 0x2d06c, 2 }, + [GCC_QDSS_TS_ARES] = { 0x2d078, 2 }, + [GCC_QDSS_USB_ARES] = { 0x2d07c, 2 }, + [GCC_SYS_NOC_AXI_ARES] = { 0x2e01c, 2 }, + [GCC_SNOC_QOSGEN_EXTREF_ARES] = { 0x2e020, 2 }, + [GCC_CNOC_LPASS_CFG_ARES] = { 0x2e028, 2 }, + [GCC_SYS_NOC_AT_ARES] = { 0x2e038, 2 }, + [GCC_SNOC_PCNOC_AHB_ARES] = { 0x2e03c, 2 }, + [GCC_SNOC_TME_ARES] = { 0x2e05c, 2 }, + [GCC_SNOC_XO_DCD_ARES] = { 0x2e060, 2 }, + [GCC_SNOC_TS_ARES] = { 0x2e068, 2 }, + [GCC_ANOC0_AXI_ARES] = { 0x2e078, 2 }, + [GCC_ANOC_PCIE0_1LANE_M_ARES] = { 0x2e07c, 2 }, + [GCC_ANOC_PCIE2_2LANE_M_ARES] = { 0x2e080, 2 }, + [GCC_ANOC_PCIE1_1LANE_M_ARES] = { 0x2e084, 2 }, + [GCC_ANOC_PCIE3_2LANE_M_ARES] = { 0x2e090, 2 }, + [GCC_ANOC_PCNOC_AHB_ARES] = { 0x2e094, 2 }, + [GCC_ANOC_QOSGEN_EXTREF_ARES] = { 0x2e098, 2 }, + [GCC_ANOC_XO_DCD_ARES] = { 0x2e09C, 2 }, + [GCC_SNOC_XO_DBG_ARES] = { 0x2e0a0, 2 }, + [GCC_AGGRNOC_ATB_ARES] = { 0x2e0ac, 2 }, + [GCC_AGGRNOC_TS_ARES] = { 0x2e0b0, 2 }, + [GCC_USB0_EUD_AT_ARES] = { 0x30004, 2 }, + [GCC_PCNOC_TIC_ARES] = { 0x31014, 2 }, + [GCC_PCNOC_AHB_ARES] = { 0x31018, 2 }, + [GCC_PCNOC_XO_DBG_ARES] = { 0x3101c, 2 }, + [GCC_SNOC_LPASS_ARES] = { 0x31020, 2 }, + [GCC_PCNOC_AT_ARES] = { 0x31024, 2 }, + [GCC_PCNOC_XO_DCD_ARES] = { 0x31028, 2 }, + [GCC_PCNOC_TS_ARES] = { 0x3102c, 2 }, + [GCC_PCNOC_BUS_TIMEOUT0_AHB_ARES] = { 0x31034, 2 }, + [GCC_PCNOC_BUS_TIMEOUT1_AHB_ARES] = { 0x3103c, 2 }, + [GCC_PCNOC_BUS_TIMEOUT2_AHB_ARES] = { 0x31044, 2 }, + [GCC_PCNOC_BUS_TIMEOUT3_AHB_ARES] = { 0x3104c, 2 }, + [GCC_PCNOC_BUS_TIMEOUT4_AHB_ARES] = { 0x31054, 2 }, + [GCC_PCNOC_BUS_TIMEOUT5_AHB_ARES] = { 0x3105c, 2 }, + [GCC_PCNOC_BUS_TIMEOUT6_AHB_ARES] = { 0x31064, 2 }, + [GCC_PCNOC_BUS_TIMEOUT7_AHB_ARES] = { 0x3106c, 2 }, + [GCC_Q6_AXIM_RESET] = { 0x2506c, 0 }, + [GCC_Q6_AXIS_RESET] = { 0x2506c, 1 }, + [GCC_Q6_AHB_S_RESET] = { 0x2506c, 2 }, + [GCC_Q6_AHB_RESET] = { 0x2506c, 3 }, + [GCC_Q6SS_DBG_RESET] = { 0x2506c, 4 }, + [GCC_WCSS_ECAHB_RESET] = { 0x25070, 0 }, + [GCC_WCSS_DBG_BDG_RESET] = { 0x25070, 1 }, + [GCC_WCSS_DBG_RESET] = { 0x25070, 2 }, + [GCC_WCSS_AXI_M_RESET] = { 0x25070, 3 }, + [GCC_WCSS_AHB_S_RESET] = { 0x25070, 4 }, + [GCC_WCSS_ACMT_RESET] = { 0x25070, 5 }, + [GCC_WCSSAON_RESET] = { 0x25074, 0 }, + [GCC_PCIE0_PIPE_RESET] = { 0x28058, 0 }, + [GCC_PCIE0_CORE_STICKY_RESET] = { 0x28058, 1 }, + [GCC_PCIE0_AXI_S_STICKY_RESET] = { 0x28058, 2 }, + [GCC_PCIE0_AXI_S_RESET] = { 0x28058, 3 }, + [GCC_PCIE0_AXI_M_STICKY_RESET] = { 0x28058, 4 }, + [GCC_PCIE0_AXI_M_RESET] = { 0x28058, 5 }, + [GCC_PCIE0_AUX_RESET] = { 0x28058, 6 }, + [GCC_PCIE0_AHB_RESET] = { 0x28058, 7 }, + [GCC_PCIE1_PIPE_RESET] = { 0x29058, 0 }, + [GCC_PCIE1_CORE_STICKY_RESET] = { 0x29058, 1 }, + [GCC_PCIE1_AXI_S_STICKY_RESET] = { 0x29058, 2 }, + [GCC_PCIE1_AXI_S_RESET] = { 0x29058, 3 }, + [GCC_PCIE1_AXI_M_STICKY_RESET] = { 0x29058, 4 }, + [GCC_PCIE1_AXI_M_RESET] = { 0x29058, 5 }, + [GCC_PCIE1_AUX_RESET] = { 0x29058, 6 }, + [GCC_PCIE1_AHB_RESET] = { 0x29058, 7 }, + [GCC_PCIE2_PIPE_RESET] = { 0x2a058, 0 }, + [GCC_PCIE2_CORE_STICKY_RESET] = { 0x2a058, 1 }, + [GCC_PCIE2_AXI_S_STICKY_RESET] = { 0x2a058, 2 }, + [GCC_PCIE2_AXI_S_RESET] = { 0x2a058, 3 }, + [GCC_PCIE2_AXI_M_STICKY_RESET] = { 0x2a058, 4 }, + [GCC_PCIE2_AXI_M_RESET] = { 0x2a058, 5 }, + [GCC_PCIE2_AUX_RESET] = { 0x2a058, 6 }, + [GCC_PCIE2_AHB_RESET] = { 0x2a058, 7 }, + [GCC_PCIE3_PIPE_RESET] = { 0x2b058, 0 }, + [GCC_PCIE3_CORE_STICKY_RESET] = { 0x2b058, 1 }, + [GCC_PCIE3_AXI_S_STICKY_RESET] = { 0x2b058, 2 }, + [GCC_PCIE3_AXI_S_RESET] = { 0x2b058, 3 }, + [GCC_PCIE3_AXI_M_STICKY_RESET] = { 0x2b058, 4 }, + [GCC_PCIE3_AXI_M_RESET] = { 0x2b058, 5 }, + [GCC_PCIE3_AUX_RESET] = { 0x2b058, 6 }, + [GCC_PCIE3_AHB_RESET] = { 0x2b058, 7 }, + [GCC_NSS_PARTIAL_RESET] = { 0x17078, 0 }, + [GCC_UNIPHY0_XPCS_ARES] = { 0x17050, 2 }, + [GCC_UNIPHY1_XPCS_ARES] = { 0x17060, 2 }, + [GCC_UNIPHY2_XPCS_ARES] = { 0x17070, 2 }, + [GCC_USB1_BCR] = { 0x3C000, 0 }, + [GCC_QUSB2_1_PHY_BCR] = { 0x3C030, 0 }, +}; + +static const struct of_device_id gcc_ipq5424_match_table[] = { + { .compatible = "qcom,ipq5424-gcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_ipq5424_match_table); + +static const struct regmap_config gcc_ipq5424_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x3f024, + .fast_io = true, +}; + +static struct clk_hw *gcc_ipq5424_hws[] = { + &gpll0_div2.hw, + &gcc_xo_div4_clk_src.hw, + &gcc_qdss_tsctr_div2_clk_src.hw, + &gcc_qdss_dap_sync_clk_src.hw, + &gcc_eud_at_div_clk_src.hw, +}; + +static const struct qcom_cc_desc gcc_ipq5424_desc = { + .config = &gcc_ipq5424_regmap_config, + .clks = gcc_ipq5424_clocks, + .num_clks = ARRAY_SIZE(gcc_ipq5424_clocks), + .resets = gcc_ipq5424_resets, + .num_resets = ARRAY_SIZE(gcc_ipq5424_resets), + .clk_hws = gcc_ipq5424_hws, + .num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws), +}; + +static int gcc_ipq5424_probe(struct platform_device *pdev) +{ + return qcom_cc_probe(pdev, &gcc_ipq5424_desc); +} + +static struct platform_driver gcc_ipq5424_driver = { + .probe = gcc_ipq5424_probe, + .driver = { + .name = "qcom,gcc-ipq5424", + .of_match_table = gcc_ipq5424_match_table, + }, +}; + +static int __init gcc_ipq5424_init(void) +{ + return platform_driver_register(&gcc_ipq5424_driver); +} +core_initcall(gcc_ipq5424_init); + +static void __exit gcc_ipq5424_exit(void) +{ + platform_driver_unregister(&gcc_ipq5424_driver); +} +module_exit(gcc_ipq5424_exit); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ5424 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c index 645109f75b46..6bb66a7e1fb6 100644 --- a/drivers/clk/qcom/gcc-ipq9574.c +++ b/drivers/clk/qcom/gcc-ipq9574.c @@ -2645,24 +2645,6 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = { }, }; -static struct clk_branch gcc_q6ss_boot_clk = { - .halt_reg = 0x25080, - .halt_check = BRANCH_HALT_SKIP, - .clkr = { - .enable_reg = 0x25080, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_boot_clk", - .parent_hws = (const struct clk_hw *[]) { - &system_noc_bfdcd_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_nssnoc_snoc_clk = { .halt_reg = 0x17028, .clkr = { @@ -2733,91 +2715,6 @@ static struct clk_rcg2 wcss_ahb_clk_src = { }, }; -static struct clk_branch gcc_q6_ahb_clk = { - .halt_reg = 0x25014, - .clkr = { - .enable_reg = 0x25014, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_ahb_clk", - .parent_hws = (const struct clk_hw *[]) { - &wcss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6_ahb_s_clk = { - .halt_reg = 0x25018, - .clkr = { - .enable_reg = 0x25018, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_ahb_s_clk", - .parent_hws = (const struct clk_hw *[]) { - &wcss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_ecahb_clk = { - .halt_reg = 0x25058, - .clkr = { - .enable_reg = 0x25058, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_ecahb_clk", - .parent_hws = (const struct clk_hw *[]) { - &wcss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_acmt_clk = { - .halt_reg = 0x2505c, - .clkr = { - .enable_reg = 0x2505c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_acmt_clk", - .parent_hws = (const struct clk_hw *[]) { - &wcss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_sys_noc_wcss_ahb_clk = { - .halt_reg = 0x2e030, - .clkr = { - .enable_reg = 0x2e030, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_sys_noc_wcss_ahb_clk", - .parent_hws = (const struct clk_hw *[]) { - &wcss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static const struct freq_tbl ftbl_wcss_axi_m_clk_src[] = { F(24000000, P_XO, 1, 0, 0), F(133333333, P_GPLL0, 6, 0, 0), @@ -2838,23 +2735,6 @@ static struct clk_rcg2 wcss_axi_m_clk_src = { }, }; -static struct clk_branch gcc_anoc_wcss_axi_m_clk = { - .halt_reg = 0x2e0a8, - .clkr = { - .enable_reg = 0x2e0a8, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_anoc_wcss_axi_m_clk", - .parent_hws = (const struct clk_hw *[]) { - &wcss_axi_m_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static const struct freq_tbl ftbl_qdss_at_clk_src[] = { F(240000000, P_GPLL4, 5, 0, 0), { } @@ -2873,40 +2753,6 @@ static struct clk_rcg2 qdss_at_clk_src = { }, }; -static struct clk_branch gcc_q6ss_atbm_clk = { - .halt_reg = 0x2501c, - .clkr = { - .enable_reg = 0x2501c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_atbm_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_at_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = { - .halt_reg = 0x2503c, - .clkr = { - .enable_reg = 0x2503c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_atb_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_at_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_nssnoc_atb_clk = { .halt_reg = 0x17014, .clkr = { @@ -3143,40 +2989,6 @@ static struct clk_fixed_factor qdss_tsctr_div2_clk_src = { }, }; -static struct clk_branch gcc_q6_tsctr_1to2_clk = { - .halt_reg = 0x25020, - .clkr = { - .enable_reg = 0x25020, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_tsctr_1to2_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_tsctr_div2_clk_src.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = { - .halt_reg = 0x25040, - .clkr = { - .enable_reg = 0x25040, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_nts_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_tsctr_div2_clk_src.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_qdss_tsctr_div2_clk = { .halt_reg = 0x2d044, .clkr = { @@ -3351,74 +3163,6 @@ static struct clk_branch gcc_qdss_tsctr_div16_clk = { }, }; -static struct clk_branch gcc_q6ss_pclkdbg_clk = { - .halt_reg = 0x25024, - .clkr = { - .enable_reg = 0x25024, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_pclkdbg_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_dap_sync_clk_src.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_q6ss_trig_clk = { - .halt_reg = 0x25068, - .clkr = { - .enable_reg = 0x25068, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6ss_trig_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_dap_sync_clk_src.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = { - .halt_reg = 0x25038, - .clkr = { - .enable_reg = 0x25038, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_apb_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_dap_sync_clk_src.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_dbg_ifc_dapbus_clk = { - .halt_reg = 0x25044, - .clkr = { - .enable_reg = 0x25044, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_dbg_ifc_dapbus_clk", - .parent_hws = (const struct clk_hw *[]) { - &qdss_dap_sync_clk_src.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_qdss_dap_clk = { .halt_reg = 0x2d058, .clkr = { @@ -3540,58 +3284,6 @@ static struct clk_rcg2 q6_axi_clk_src = { }, }; -static struct clk_branch gcc_q6_axim_clk = { - .halt_reg = 0x2500c, - .clkr = { - .enable_reg = 0x2500c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_q6_axim_clk", - .parent_hws = (const struct clk_hw *[]) { - &q6_axi_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_wcss_q6_tbu_clk = { - .halt_reg = 0x12050, - .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0xb00c, - .enable_mask = BIT(6), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_wcss_q6_tbu_clk", - .parent_hws = (const struct clk_hw *[]) { - &q6_axi_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch gcc_mem_noc_q6_axi_clk = { - .halt_reg = 0x19010, - .clkr = { - .enable_reg = 0x19010, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_mem_noc_q6_axi_clk", - .parent_hws = (const struct clk_hw *[]) { - &q6_axi_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static const struct freq_tbl ftbl_q6_axim2_clk_src[] = { F(342857143, P_GPLL4, 3.5, 0, 0), { } @@ -4141,16 +3833,8 @@ static struct clk_regmap *gcc_ipq9574_clks[] = { [GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr, [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr, [WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr, - [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr, - [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr, - [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr, - [GCC_WCSS_ACMT_CLK] = &gcc_wcss_acmt_clk.clkr, - [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr, [WCSS_AXI_M_CLK_SRC] = &wcss_axi_m_clk_src.clkr, - [GCC_ANOC_WCSS_AXI_M_CLK] = &gcc_anoc_wcss_axi_m_clk.clkr, [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr, - [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr, - [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr, [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr, [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr, [GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr, @@ -4163,27 +3847,18 @@ static struct clk_regmap *gcc_ipq9574_clks[] = { [QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr, [GCC_QDSS_TRACECLKIN_CLK] = &gcc_qdss_traceclkin_clk.clkr, [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr, - [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr, - [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr, [GCC_QDSS_TSCTR_DIV2_CLK] = &gcc_qdss_tsctr_div2_clk.clkr, [GCC_QDSS_TS_CLK] = &gcc_qdss_ts_clk.clkr, [GCC_QDSS_TSCTR_DIV4_CLK] = &gcc_qdss_tsctr_div4_clk.clkr, [GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr, [GCC_QDSS_TSCTR_DIV8_CLK] = &gcc_qdss_tsctr_div8_clk.clkr, [GCC_QDSS_TSCTR_DIV16_CLK] = &gcc_qdss_tsctr_div16_clk.clkr, - [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr, - [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr, - [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr, - [GCC_WCSS_DBG_IFC_DAPBUS_CLK] = &gcc_wcss_dbg_ifc_dapbus_clk.clkr, [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, [GCC_QDSS_APB2JTAG_CLK] = &gcc_qdss_apb2jtag_clk.clkr, [GCC_QDSS_TSCTR_DIV3_CLK] = &gcc_qdss_tsctr_div3_clk.clkr, [QPIC_IO_MACRO_CLK_SRC] = &qpic_io_macro_clk_src.clkr, [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr, [Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr, - [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr, - [GCC_WCSS_Q6_TBU_CLK] = &gcc_wcss_q6_tbu_clk.clkr, - [GCC_MEM_NOC_Q6_AXI_CLK] = &gcc_mem_noc_q6_axi_clk.clkr, [Q6_AXIM2_CLK_SRC] = &q6_axim2_clk_src.clkr, [NSSNOC_MEMNOC_BFDCD_CLK_SRC] = &nssnoc_memnoc_bfdcd_clk_src.clkr, [GCC_NSSNOC_MEMNOC_CLK] = &gcc_nssnoc_memnoc_clk.clkr, @@ -4207,7 +3882,6 @@ static struct clk_regmap *gcc_ipq9574_clks[] = { [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr, [GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr, [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr, - [GCC_Q6SS_BOOT_CLK] = &gcc_q6ss_boot_clk.clkr, [UNIPHY_SYS_CLK_SRC] = &uniphy_sys_clk_src.clkr, [NSS_TS_CLK_SRC] = &nss_ts_clk_src.clkr, [GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr, @@ -4384,7 +4058,7 @@ static const struct qcom_reset_map gcc_ipq9574_resets[] = { #define IPQ_APPS_ID 9574 /* some unique value */ -static struct qcom_icc_hws_data icc_ipq9574_hws[] = { +static const struct qcom_icc_hws_data icc_ipq9574_hws[] = { { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK }, { MASTER_SNOC_PCIE0, SLAVE_SNOC_PCIE0, GCC_SNOC_PCIE0_1LANE_S_CLK }, { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK }, diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index c3cfd572e7c1..5ca003c9bfba 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -131,6 +131,7 @@ static struct clk_alpha_pll gpll1_out_main = { /* 930MHz configuration */ static const struct alpha_pll_config gpll3_config = { .l = 48, + .alpha_hi = 0x70, .alpha = 0x0, .alpha_en_mask = BIT(24), .post_div_mask = 0xf << 8, diff --git a/drivers/clk/qcom/gcc-qcs8300.c b/drivers/clk/qcom/gcc-qcs8300.c new file mode 100644 index 000000000000..80831c7dea3b --- /dev/null +++ b/drivers/clk/qcom/gcc-qcs8300.c @@ -0,0 +1,3640 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,qcs8300-gcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "clk-regmap-phy-mux.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_BI_TCXO, + DT_SLEEP_CLK, + DT_PCIE_0_PIPE_CLK, + DT_PCIE_1_PIPE_CLK, + DT_PCIE_PHY_AUX_CLK, + DT_RXC0_REF_CLK, + DT_UFS_PHY_RX_SYMBOL_0_CLK, + DT_UFS_PHY_RX_SYMBOL_1_CLK, + DT_UFS_PHY_TX_SYMBOL_0_CLK, + DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK, +}; + +enum { + P_BI_TCXO, + P_GCC_GPLL0_OUT_EVEN, + P_GCC_GPLL0_OUT_MAIN, + P_GCC_GPLL1_OUT_MAIN, + P_GCC_GPLL4_OUT_MAIN, + P_GCC_GPLL7_OUT_MAIN, + P_GCC_GPLL9_OUT_MAIN, + P_PCIE_0_PIPE_CLK, + P_PCIE_1_PIPE_CLK, + P_PCIE_PHY_AUX_CLK, + P_RXC0_REF_CLK, + P_SLEEP_CLK, + P_UFS_PHY_RX_SYMBOL_0_CLK, + P_UFS_PHY_RX_SYMBOL_1_CLK, + P_UFS_PHY_TX_SYMBOL_0_CLK, + P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK, +}; + +static struct clk_alpha_pll gcc_gpll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .enable_reg = 0x4b028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_gcc_gpll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static struct clk_alpha_pll gcc_gpll1 = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .enable_reg = 0x4b028, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_evo_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll4 = { + .offset = 0x4000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .enable_reg = 0x4b028, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll4", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_evo_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll7 = { + .offset = 0x7000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .enable_reg = 0x4b028, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll7", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_evo_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll9 = { + .offset = 0x9000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .enable_reg = 0x4b028, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll9", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .index = DT_SLEEP_CLK }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const struct clk_parent_data gcc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL1_OUT_MAIN, 4 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll1.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data gcc_parent_data_5[] = { + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll7.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_RXC0_REF_CLK, 3 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_7[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll7.clkr.hw }, + { .index = DT_RXC0_REF_CLK }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_PCIE_PHY_AUX_CLK, 1 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_8[] = { + { .index = DT_PCIE_PHY_AUX_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_10[] = { + { P_PCIE_PHY_AUX_CLK, 1 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_10[] = { + { .index = DT_PCIE_PHY_AUX_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_12[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL9_OUT_MAIN, 2 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_12[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll9.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_13[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, +}; + +static const struct clk_parent_data gcc_parent_data_13[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_14[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL4_OUT_MAIN, 3 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_14[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_15[] = { + { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_15[] = { + { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_16[] = { + { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_16[] = { + { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_17[] = { + { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_17[] = { + { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_18[] = { + { P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_18[] = { + { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static struct clk_regmap_mux gcc_pcie_0_phy_aux_clk_src = { + .reg = 0xa9074, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_8, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_aux_clk_src", + .parent_data = gcc_parent_data_8, + .num_parents = ARRAY_SIZE(gcc_parent_data_8), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = { + .reg = 0xa906c, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE_0_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = { + .reg = 0x77074, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_10, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_aux_clk_src", + .parent_data = gcc_parent_data_10, + .num_parents = ARRAY_SIZE(gcc_parent_data_10), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = { + .reg = 0x7706c, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE_1_PIPE_CLK, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = { + .reg = 0x83060, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_15, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_0_clk_src", + .parent_data = gcc_parent_data_15, + .num_parents = ARRAY_SIZE(gcc_parent_data_15), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = { + .reg = 0x830d0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_16, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_1_clk_src", + .parent_data = gcc_parent_data_16, + .num_parents = ARRAY_SIZE(gcc_parent_data_16), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = { + .reg = 0x83050, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_17, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_tx_symbol_0_clk_src", + .parent_data = gcc_parent_data_17, + .num_parents = ARRAY_SIZE(gcc_parent_data_17), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { + .reg = 0x1b068, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_18, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_pipe_clk_src", + .parent_data = gcc_parent_data_18, + .num_parents = ARRAY_SIZE(gcc_parent_data_18), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac0_phy_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac0_phy_aux_clk_src = { + .cmd_rcgr = 0xb6028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_phy_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = { + F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0), + F(230400000, P_GCC_GPLL4_OUT_MAIN, 3.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac0_ptp_clk_src = { + .cmd_rcgr = 0xb6060, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_emac0_ptp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_ptp_clk_src", + .parent_data = gcc_parent_data_6, + .num_parents = ARRAY_SIZE(gcc_parent_data_6), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = { + F(5000000, P_GCC_GPLL0_OUT_EVEN, 10, 1, 6), + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0), + F(250000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac0_rgmii_clk_src = { + .cmd_rcgr = 0xb6048, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_rgmii_clk_src", + .parent_data = gcc_parent_data_7, + .num_parents = ARRAY_SIZE(gcc_parent_data_7), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x70004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp1_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x71004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp2_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x62004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp3_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp4_clk_src = { + .cmd_rcgr = 0x1e004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp4_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp5_clk_src = { + .cmd_rcgr = 0x1f004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp5_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0xa9078, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = { + .cmd_rcgr = 0xa9054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_aux_clk_src = { + .cmd_rcgr = 0x77078, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = { + .cmd_rcgr = 0x77054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x3f010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x23154, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x23288, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x233bc, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x234f0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = ARRAY_SIZE(gcc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x23624, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x23758, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = { + .name = "gcc_qupv3_wrap0_s6_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { + .cmd_rcgr = 0x2388c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = { + .name = "gcc_qupv3_wrap0_s7_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { + .cmd_rcgr = 0x239c0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x24154, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x24288, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x243bc, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x244f0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = ARRAY_SIZE(gcc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x24624, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x24758, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = { + .name = "gcc_qupv3_wrap1_s6_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { + .cmd_rcgr = 0x2488c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = { + .name = "gcc_qupv3_wrap1_s7_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { + .cmd_rcgr = 0x249c0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap3_s0_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap3_s0_clk_src_init = { + .name = "gcc_qupv3_wrap3_s0_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap3_s0_clk_src = { + .cmd_rcgr = 0xc4158, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_qupv3_wrap3_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap3_s0_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { + F(144000, P_BI_TCXO, 16, 3, 25), + F(400000, P_BI_TCXO, 12, 1, 4), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3), + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(192000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0), + F(384000000, P_GCC_GPLL9_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0x20014, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_12, + .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_apps_clk_src", + .parent_data = gcc_parent_data_12, + .num_parents = ARRAY_SIZE(gcc_parent_data_12), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = { + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x2002c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_13, + .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_data = gcc_parent_data_13, + .num_parents = ARRAY_SIZE(gcc_parent_data_13), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = { + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { + .cmd_rcgr = 0x8302c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_axi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = { + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(201600000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0), + F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { + .cmd_rcgr = 0x83074, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_14, + .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_data = gcc_parent_data_14, + .num_parents = ARRAY_SIZE(gcc_parent_data_14), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { + .cmd_rcgr = 0x830a8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_phy_aux_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = ARRAY_SIZE(gcc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = { + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { + .cmd_rcgr = 0x8308c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = { + F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb20_master_clk_src = { + .cmd_rcgr = 0x1c028, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb20_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb20_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = { + .cmd_rcgr = 0x1c040, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb20_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0x1b028, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0x1b040, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0x1b06c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_0_pipe_div_clk_src = { + .reg = 0xa9070, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_1_pipe_div_clk_src = { + .reg = 0x77070, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_qupv3_wrap3_s0_div_clk_src = { + .reg = 0xc4288, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap3_s0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap3_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = { + .reg = 0x1c058, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb20_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb20_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = { + .reg = 0x1b058, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gcc_aggre_noc_qupv3_axi_clk = { + .halt_reg = 0x8e200, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8e200, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(28), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_noc_qupv3_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { + .halt_reg = 0x830d4, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x830d4, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x830d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_ufs_phy_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb2_prim_axi_clk = { + .halt_reg = 0x1c05c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1c05c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1c05c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb2_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb20_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0x1b084, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1b084, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1b084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy0_clk = { + .halt_reg = 0x76004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x76004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x76004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ahb2phy0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy2_clk = { + .halt_reg = 0x76008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x76008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x76008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ahb2phy2_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy3_clk = { + .halt_reg = 0x7600c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7600c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7600c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ahb2phy3_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x44004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x44004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_hf_axi_clk = { + .halt_reg = 0x32010, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x32010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x32010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_sf_axi_clk = { + .halt_reg = 0x32018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x32018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x32018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_throttle_xo_clk = { + .halt_reg = 0x32024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x32024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_throttle_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = { + .halt_reg = 0x1c060, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1c060, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1c060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_usb2_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb20_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0x1b088, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1b088, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1b088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x7d164, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7d164, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7d164, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0x33010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x33010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x33010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_edp_ref_clkref_en = { + .halt_reg = 0x97448, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x97448, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_edp_ref_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_axi_clk = { + .halt_reg = 0xb6018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb6018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb6018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_phy_aux_clk = { + .halt_reg = 0xb6024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb6024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_emac0_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_ptp_clk = { + .halt_reg = 0xb6040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb6040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_ptp_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_emac0_ptp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_rgmii_clk = { + .halt_reg = 0xb6044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb6044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_rgmii_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_emac0_rgmii_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_slv_ahb_clk = { + .halt_reg = 0xb6020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb6020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb6020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_slv_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x70000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x70000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x71000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x71000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x62000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp3_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp4_clk = { + .halt_reg = 0x1e000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp4_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp5_clk = { + .halt_reg = 0x1f000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1f000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp5_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0_out_even.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_center_pipeline_clk = { + .halt_reg = 0x7d160, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7d160, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7d160, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_memnoc_gfx_center_pipeline_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x7d010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7d010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7d010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x7d01c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x7d01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_tcu_throttle_ahb_clk = { + .halt_reg = 0x7d008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7d008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7d008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_tcu_throttle_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_tcu_throttle_clk = { + .halt_reg = 0x7d014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7d014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7d014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_tcu_throttle_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0xa9038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0xa902c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa902c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0xa9024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(11), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_phy_aux_clk = { + .halt_reg = 0xa9030, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(13), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_phy_rchng_clk = { + .halt_reg = 0xa9050, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0xa9040, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(14), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipediv2_clk = { + .halt_reg = 0xa9048, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x4b018, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0xa901c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b010, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0xa9018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b018, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x77038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(31), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .halt_reg = 0x7702c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7702c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .halt_reg = 0x77024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_phy_aux_clk = { + .halt_reg = 0x77030, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(3), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_phy_rchng_clk = { + .halt_reg = 0x77050, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x77040, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipediv2_clk = { + .halt_reg = 0x77048, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x4b018, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .halt_reg = 0x7701c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = { + .halt_reg = 0x77018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_clkref_en = { + .halt_reg = 0x9746c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x9746c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_throttle_cfg_clk = { + .halt_reg = 0xb2034, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b020, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_throttle_cfg_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x3f00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3f00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pdm2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x3f004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x3f004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x3f004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x3f008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3f008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0x32008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x32008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x32008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_camera_nrt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_rt_ahb_clk = { + .halt_reg = 0x3200c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x3200c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x3200c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_camera_rt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_ahb_clk = { + .halt_reg = 0x33008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x33008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x33008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_disp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_rot_ahb_clk = { + .halt_reg = 0x3300c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x3300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_disp_rot_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cvp_ahb_clk = { + .halt_reg = 0x34008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x34008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x34008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_cvp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0x3400c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x3400c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x3400c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcpu_ahb_clk = { + .halt_reg = 0x34010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x34010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x34010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_vcpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x23018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x2300c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x2314c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x23280, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(11), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x233b4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x234e8, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(13), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x2361c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(14), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x23750, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s6_clk = { + .halt_reg = 0x23884, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s6_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s7_clk = { + .halt_reg = 0x239b8, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(17), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s7_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = { + .halt_reg = 0x24018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(18), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_clk = { + .halt_reg = 0x2400c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x2414c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x24280, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x243b4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(24), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x244e8, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x2461c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x24750, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s6_clk = { + .halt_reg = 0x24884, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b018, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s6_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s7_clk = { + .halt_reg = 0x249b8, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b018, + .enable_mask = BIT(28), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s7_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap3_core_2x_clk = { + .halt_reg = 0xc4018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(24), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap3_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap3_core_clk = { + .halt_reg = 0xc400c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap3_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap3_qspi_clk = { + .halt_reg = 0xc4284, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap3_qspi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap3_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap3_s0_clk = { + .halt_reg = 0xc4150, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap3_s0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap3_s0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x23004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x23004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(6), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x23008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x23008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x24004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x24004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(20), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x24008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x24008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b008, + .enable_mask = BIT(21), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_3_m_ahb_clk = { + .halt_reg = 0xc4004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xc4004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_3_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_3_s_ahb_clk = { + .halt_reg = 0xc4008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xc4008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(20), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_3_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x2000c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2000c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x20004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x20004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_apps_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sdcc1_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x20044, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x20044, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x20044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ice_core_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sdcc1_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sgmi_clkref_en = { + .halt_reg = 0x97034, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x97034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sgmi_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ahb_clk = { + .halt_reg = 0x83020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x83020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x83020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_clk = { + .halt_reg = 0x83018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x83018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x83018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_clk = { + .halt_reg = 0x8306c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8306c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x8306c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ice_core_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_clk = { + .halt_reg = 0x830a4, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x830a4, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x830a4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_reg = 0x83028, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x83028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_reg = 0x830c0, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x830c0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_reg = 0x83024, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x83024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_clk = { + .halt_reg = 0x83064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x83064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x83064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_unipro_core_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ufs_phy_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_master_clk = { + .halt_reg = 0x1c018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1c018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb20_master_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb20_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_mock_utmi_clk = { + .halt_reg = 0x1c024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1c024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb20_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_sleep_clk = { + .halt_reg = 0x1c020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1c020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb20_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0x1b018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_master_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0x1b024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0x1b020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0x1b05c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b05c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0x1b060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_reg = 0x1b064, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x1b064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1b064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_pipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_clkref_en = { + .halt_reg = 0x97468, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x97468, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0x34014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x34014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x34014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_clk = { + .halt_reg = 0x3401c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x3401c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x3401c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc gcc_emac0_gdsc = { + .gdscr = 0xb6004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "gcc_emac0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc gcc_pcie_0_gdsc = { + .gdscr = 0xa9004, + .collapse_ctrl = 0x4b104, + .collapse_mask = BIT(0), + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "gcc_pcie_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct gdsc gcc_pcie_1_gdsc = { + .gdscr = 0x77004, + .collapse_ctrl = 0x4b104, + .collapse_mask = BIT(1), + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "gcc_pcie_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct gdsc gcc_ufs_phy_gdsc = { + .gdscr = 0x83004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "gcc_ufs_phy_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct gdsc gcc_usb20_prim_gdsc = { + .gdscr = 0x1c004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "gcc_usb20_prim_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct gdsc gcc_usb30_prim_gdsc = { + .gdscr = 0x1b004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "gcc_usb30_prim_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct clk_regmap *gcc_qcs8300_clocks[] = { + [GCC_AGGRE_NOC_QUPV3_AXI_CLK] = &gcc_aggre_noc_qupv3_axi_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, + [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_AHB2PHY0_CLK] = &gcc_ahb2phy0_clk.clkr, + [GCC_AHB2PHY2_CLK] = &gcc_ahb2phy2_clk.clkr, + [GCC_AHB2PHY3_CLK] = &gcc_ahb2phy3_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, + [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr, + [GCC_CAMERA_THROTTLE_XO_CLK] = &gcc_camera_throttle_xo_clk.clkr, + [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_EDP_REF_CLKREF_EN] = &gcc_edp_ref_clkref_en.clkr, + [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr, + [GCC_EMAC0_PHY_AUX_CLK] = &gcc_emac0_phy_aux_clk.clkr, + [GCC_EMAC0_PHY_AUX_CLK_SRC] = &gcc_emac0_phy_aux_clk_src.clkr, + [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr, + [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr, + [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr, + [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr, + [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GP4_CLK] = &gcc_gp4_clk.clkr, + [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr, + [GCC_GP5_CLK] = &gcc_gp5_clk.clkr, + [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr, + [GCC_GPLL0] = &gcc_gpll0.clkr, + [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr, + [GCC_GPLL1] = &gcc_gpll1.clkr, + [GCC_GPLL4] = &gcc_gpll4.clkr, + [GCC_GPLL7] = &gcc_gpll7.clkr, + [GCC_GPLL9] = &gcc_gpll9.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_MEMNOC_GFX_CENTER_PIPELINE_CLK] = &gcc_gpu_memnoc_gfx_center_pipeline_clk.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_GPU_TCU_THROTTLE_AHB_CLK] = &gcc_gpu_tcu_throttle_ahb_clk.clkr, + [GCC_GPU_TCU_THROTTLE_CLK] = &gcc_gpu_tcu_throttle_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PHY_AUX_CLK] = &gcc_pcie_0_phy_aux_clk.clkr, + [GCC_PCIE_0_PHY_AUX_CLK_SRC] = &gcc_pcie_0_phy_aux_clk_src.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr, + [GCC_PCIE_0_PIPE_DIV_CLK_SRC] = &gcc_pcie_0_pipe_div_clk_src.clkr, + [GCC_PCIE_0_PIPEDIV2_CLK] = &gcc_pcie_0_pipediv2_clk.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr, + [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr, + [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr, + [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr, + [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr, + [GCC_PCIE_1_PIPE_DIV_CLK_SRC] = &gcc_pcie_1_pipe_div_clk_src.clkr, + [GCC_PCIE_1_PIPEDIV2_CLK] = &gcc_pcie_1_pipediv2_clk.clkr, + [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr, + [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr, + [GCC_PCIE_CLKREF_EN] = &gcc_pcie_clkref_en.clkr, + [GCC_PCIE_THROTTLE_CFG_CLK] = &gcc_pcie_throttle_cfg_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, + [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, + [GCC_QMIP_DISP_ROT_AHB_CLK] = &gcc_qmip_disp_rot_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCPU_AHB_CLK] = &gcc_qmip_video_vcpu_ahb_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr, + [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr, + [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr, + [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr, + [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr, + [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr, + [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr, + [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr, + [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr, + [GCC_QUPV3_WRAP3_CORE_2X_CLK] = &gcc_qupv3_wrap3_core_2x_clk.clkr, + [GCC_QUPV3_WRAP3_CORE_CLK] = &gcc_qupv3_wrap3_core_clk.clkr, + [GCC_QUPV3_WRAP3_QSPI_CLK] = &gcc_qupv3_wrap3_qspi_clk.clkr, + [GCC_QUPV3_WRAP3_S0_CLK] = &gcc_qupv3_wrap3_s0_clk.clkr, + [GCC_QUPV3_WRAP3_S0_CLK_SRC] = &gcc_qupv3_wrap3_s0_clk_src.clkr, + [GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC] = &gcc_qupv3_wrap3_s0_div_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_3_M_AHB_CLK] = &gcc_qupv3_wrap_3_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_3_S_AHB_CLK] = &gcc_qupv3_wrap_3_s_ahb_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr, + [GCC_SGMI_CLKREF_EN] = &gcc_sgmi_clkref_en.clkr, + [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr, + [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr, + [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr, + [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr, + [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr, + [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr, + [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr, + [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr, + [GCC_USB_CLKREF_EN] = &gcc_usb_clkref_en.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, +}; + +static struct gdsc *gcc_qcs8300_gdscs[] = { + [GCC_EMAC0_GDSC] = &gcc_emac0_gdsc, + [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc, + [GCC_PCIE_1_GDSC] = &gcc_pcie_1_gdsc, + [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc, + [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc, + [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc, +}; + +static const struct qcom_reset_map gcc_qcs8300_resets[] = { + [GCC_EMAC0_BCR] = { 0xb6000 }, + [GCC_PCIE_0_BCR] = { 0xa9000 }, + [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbf000 }, + [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbf008 }, + [GCC_PCIE_0_PHY_BCR] = { 0xa9144 }, + [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbf00c }, + [GCC_PCIE_1_BCR] = { 0x77000 }, + [GCC_PCIE_1_LINK_DOWN_BCR] = { 0xae084 }, + [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0xae090 }, + [GCC_PCIE_1_PHY_BCR] = { 0xae08c }, + [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0xae094 }, + [GCC_SDCC1_BCR] = { 0x20000 }, + [GCC_UFS_PHY_BCR] = { 0x83000 }, + [GCC_USB20_PRIM_BCR] = { 0x1c000 }, + [GCC_USB2_PHY_PRIM_BCR] = { 0x5c01c }, + [GCC_USB2_PHY_SEC_BCR] = { 0x5c020 }, + [GCC_USB30_PRIM_BCR] = { 0x1b000 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x5c008 }, + [GCC_USB3_PHY_PRIM_BCR] = { 0x5c000 }, + [GCC_USB3_PHY_TERT_BCR] = { 0x5c024 }, + [GCC_USB3_UNIPHY_MP0_BCR] = { 0x5c00c }, + [GCC_USB3_UNIPHY_MP1_BCR] = { 0x5c010 }, + [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x5c004 }, + [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5c014 }, + [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x5c018 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x76000 }, + [GCC_VIDEO_AXI0_CLK_ARES] = { 0x34014, 2 }, + [GCC_VIDEO_AXI1_CLK_ARES] = { 0x3401c, 2 }, + [GCC_VIDEO_BCR] = { 0x34000 }, +}; + +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap3_s0_clk_src), +}; + +static const struct regmap_config gcc_qcs8300_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x472cffc, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_qcs8300_desc = { + .config = &gcc_qcs8300_regmap_config, + .clks = gcc_qcs8300_clocks, + .num_clks = ARRAY_SIZE(gcc_qcs8300_clocks), + .resets = gcc_qcs8300_resets, + .num_resets = ARRAY_SIZE(gcc_qcs8300_resets), + .gdscs = gcc_qcs8300_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_qcs8300_gdscs), +}; + +static const struct of_device_id gcc_qcs8300_match_table[] = { + { .compatible = "qcom,qcs8300-gcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_qcs8300_match_table); + +static int gcc_qcs8300_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gcc_qcs8300_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_CAMERA_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x32020); /* GCC_CAMERA_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x33004); /* GCC_DISP_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x33018); /* GCC_DISP_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x7d004); /* GCC_GPU_CFG_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x34004); /* GCC_VIDEO_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x34024); /* GCC_VIDEO_XO_CLK */ + + /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */ + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + + return qcom_cc_really_probe(&pdev->dev, &gcc_qcs8300_desc, regmap); +} + +static struct platform_driver gcc_qcs8300_driver = { + .probe = gcc_qcs8300_probe, + .driver = { + .name = "gcc-qcs8300", + .of_match_table = gcc_qcs8300_match_table, + }, +}; + +static int __init gcc_qcs8300_init(void) +{ + return platform_driver_register(&gcc_qcs8300_driver); +} +subsys_initcall(gcc_qcs8300_init); + +static void __exit gcc_qcs8300_exit(void) +{ + platform_driver_unregister(&gcc_qcs8300_driver); +} +module_exit(gcc_qcs8300_exit); + +MODULE_DESCRIPTION("QTI GCC QCS8300 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/gcc-sar2130p.c b/drivers/clk/qcom/gcc-sar2130p.c new file mode 100644 index 000000000000..475e2cda3618 --- /dev/null +++ b/drivers/clk/qcom/gcc-sar2130p.c @@ -0,0 +1,2366 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sar2130p-gcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "clk-regmap-phy-mux.h" +#include "gdsc.h" +#include "reset.h" + +/* Need to match the order of clocks in DT binding */ +enum { + DT_BI_TCXO, + DT_SLEEP_CLK, + + DT_PCIE_0_PIPE, + DT_PCIE_1_PIPE, + + DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE, +}; + +enum { + P_BI_TCXO, + P_GCC_GPLL0_OUT_EVEN, + P_GCC_GPLL0_OUT_MAIN, + P_GCC_GPLL1_OUT_EVEN, + P_GCC_GPLL1_OUT_MAIN, + P_GCC_GPLL4_OUT_MAIN, + P_GCC_GPLL5_OUT_MAIN, + P_GCC_GPLL7_OUT_MAIN, + P_GCC_GPLL9_OUT_EVEN, + P_PCIE_0_PIPE_CLK, + P_PCIE_1_PIPE_CLK, + P_SLEEP_CLK, + P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, +}; + +static struct clk_alpha_pll gcc_gpll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_gcc_gpll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static struct clk_alpha_pll gcc_gpll1 = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll4 = { + .offset = 0x4000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll4", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll5 = { + .offset = 0x5000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll5", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll7 = { + .offset = 0x7000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll7", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll9 = { + .offset = 0x9000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll9", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_gcc_gpll9_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gcc_gpll9_out_even = { + .offset = 0x9000, + .post_div_shift = 10, + .post_div_table = post_div_table_gcc_gpll9_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll9_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll9_out_even", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll9.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .index = DT_SLEEP_CLK }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_GCC_GPLL5_OUT_MAIN, 3 }, + { P_GCC_GPLL1_OUT_MAIN, 4 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll7.clkr.hw }, + { .hw = &gcc_gpll5.clkr.hw }, + { .hw = &gcc_gpll1.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL9_OUT_EVEN, 2 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll9_out_even.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL1_OUT_EVEN, 2 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_7[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll1.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_8[] = { + { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE }, + { .index = DT_BI_TCXO }, +}; + +static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = { + .reg = 0x7b070, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE_0_PIPE, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = { + .reg = 0x9d06c, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipe_clk_src", + .parent_data = &(const struct clk_parent_data) { + .index = DT_PCIE_1_PIPE, + }, + .num_parents = 1, + .ops = &clk_regmap_phy_mux_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { + .reg = 0x4906c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_8, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_pipe_clk_src", + .parent_data = gcc_parent_data_8, + .num_parents = ARRAY_SIZE(gcc_parent_data_8), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_ddrss_spad_clk_src[] = { + F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0), + F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0), + F(426400000, P_GCC_GPLL1_OUT_MAIN, 2.5, 0, 0), + F(500000000, P_GCC_GPLL7_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ddrss_spad_clk_src = { + .cmd_rcgr = 0x70004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_ddrss_spad_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_spad_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x74004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp1_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x75004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp2_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x76004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp3_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0x7b074, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = { + .cmd_rcgr = 0x7b058, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_aux_clk_src = { + .cmd_rcgr = 0x9d070, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = { + .cmd_rcgr = 0x9d054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x43010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375), + F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75), + F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625), + F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x28018, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s1_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x28150, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x28288, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x283c0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x284f8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x28630, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x2e018, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x2e150, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x2e288, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x2e3c0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x2e4f8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x2e630, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { + F(144000, P_BI_TCXO, 16, 3, 25), + F(400000, P_BI_TCXO, 12, 1, 4), + F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3), + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(192000000, P_GCC_GPLL9_OUT_EVEN, 2, 0, 0), + F(384000000, P_GCC_GPLL9_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0x26018, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_apps_clk_src", + .parent_data = gcc_parent_data_6, + .num_parents = ARRAY_SIZE(gcc_parent_data_6), + .ops = &clk_rcg2_shared_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x2603c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_data = gcc_parent_data_7, + .num_parents = ARRAY_SIZE(gcc_parent_data_7), + .ops = &clk_rcg2_shared_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0), + F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0x4902c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0x49044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0x49070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = { + .reg = 0x4905c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = { + .halt_reg = 0x7b094, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x7b094, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(17), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_noc_pcie_1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0x4908c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4908c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4908c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x48004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x48004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = { + .halt_reg = 0x20034, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x20034, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(20), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_pcie_anoc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0x49088, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x49088, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x49088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x81154, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x81154, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x81154, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_pcie_sf_clk = { + .halt_reg = 0x9d098, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x9d098, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_pcie_sf_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_spad_clk = { + .halt_reg = 0x70000, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x70000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x70000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_spad_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ddrss_spad_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0x37008, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x37008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x37008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x74000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x74000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x75000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x75000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x76000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x76000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp3_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gp3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0_out_even.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x9b010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9b010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x9b010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x9b018, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x9b018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_iris_ss_hf_axi1_clk = { + .halt_reg = 0x42030, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x42030, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_iris_ss_hf_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_iris_ss_spd_axi1_clk = { + .halt_reg = 0x70020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x70020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x70020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_iris_ss_spd_axi1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_ddrss_spad_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x7b03c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(3), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0x7b038, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7b038, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0x7b02c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x7b02c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_phy_rchng_clk = { + .halt_reg = 0x7b054, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0x7b048, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_0_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0x7b020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7b020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0x7b01c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x9d038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(29), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .halt_reg = 0x9d034, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9d034, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(28), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .halt_reg = 0x9d028, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x9d028, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_phy_rchng_clk = { + .halt_reg = 0x9d050, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x9d044, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(30), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pcie_1_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .halt_reg = 0x9d01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9d01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = { + .halt_reg = 0x9d018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x4300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_pdm2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x43004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x43004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x43004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x43008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x43008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_gpu_ahb_clk = { + .halt_reg = 0x9b008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9b008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x9b008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_gpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_pcie_ahb_clk = { + .halt_reg = 0x7b018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7b018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(11), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_pcie_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = { + .halt_reg = 0x42014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x42014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_cv_cpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cvp_ahb_clk = { + .halt_reg = 0x42008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x42008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_cvp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_lsr_ahb_clk = { + .halt_reg = 0x4204c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4204c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4204c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_lsr_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = { + .halt_reg = 0x42010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x42010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_v_cpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0x4200c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4200c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4200c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x33034, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(18), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x33024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x2800c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x28144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x2827c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(24), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x283b4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x284ec, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x28624, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap0_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = { + .halt_reg = 0x3317c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(3), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_clk = { + .halt_reg = 0x3316c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x2e00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x2e144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x2e27c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(6), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x2e3b4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x2e4ec, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x2e624, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_qupv3_wrap1_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x28004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x28004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(20), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x28008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x28008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(21), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x2e004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2e004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x2e008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2e008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x26010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x26004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_apps_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sdcc1_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x26030, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x26030, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x26030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc1_ice_core_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_sdcc1_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0x49018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_master_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0x49028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0x49024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0x49060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0x49064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_reg = 0x49068, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x49068, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x49068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_pipe_clk", + .parent_hws = (const struct clk_hw*[]) { + &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0x42018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x42018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_clk = { + .halt_reg = 0x42024, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x42024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc = { + .gdscr = 0x8d204, + .pd = { + .name = "hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc = { + .gdscr = 0x8d054, + .pd = { + .name = "hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = { + .gdscr = 0x8d05c, + .pd = { + .name = "hlos1_vote_turing_mmu_tbu0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = { + .gdscr = 0x8d060, + .pd = { + .name = "hlos1_vote_turing_mmu_tbu1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc pcie_0_gdsc = { + .gdscr = 0x7b004, + .collapse_ctrl = 0x62200, + .collapse_mask = BIT(0), + .pd = { + .name = "pcie_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, +}; + +static struct gdsc pcie_0_phy_gdsc = { + .gdscr = 0x7c000, + .collapse_ctrl = 0x62200, + .collapse_mask = BIT(3), + .pd = { + .name = "pcie_0_phy_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, +}; + +static struct gdsc pcie_1_gdsc = { + .gdscr = 0x9d004, + .collapse_ctrl = 0x62200, + .collapse_mask = BIT(1), + .pd = { + .name = "pcie_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, +}; + +static struct gdsc pcie_1_phy_gdsc = { + .gdscr = 0x9e000, + .collapse_ctrl = 0x62200, + .collapse_mask = BIT(4), + .pd = { + .name = "pcie_1_phy_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, +}; + +static struct gdsc usb30_prim_gdsc = { + .gdscr = 0x49004, + .pd = { + .name = "usb30_prim_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE, +}; + +static struct gdsc usb3_phy_gdsc = { + .gdscr = 0x60018, + .pd = { + .name = "usb3_phy_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *gcc_sar2130p_clocks[] = { + [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr, + [GCC_DDRSS_SPAD_CLK] = &gcc_ddrss_spad_clk.clkr, + [GCC_DDRSS_SPAD_CLK_SRC] = &gcc_ddrss_spad_clk_src.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GPLL0] = &gcc_gpll0.clkr, + [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr, + [GCC_GPLL1] = &gcc_gpll1.clkr, + [GCC_GPLL4] = &gcc_gpll4.clkr, + [GCC_GPLL5] = &gcc_gpll5.clkr, + [GCC_GPLL7] = &gcc_gpll7.clkr, + [GCC_GPLL9] = &gcc_gpll9.clkr, + [GCC_GPLL9_OUT_EVEN] = &gcc_gpll9_out_even.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_IRIS_SS_HF_AXI1_CLK] = &gcc_iris_ss_hf_axi1_clk.clkr, + [GCC_IRIS_SS_SPD_AXI1_CLK] = &gcc_iris_ss_spd_axi1_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr, + [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr, + [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr, + [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr, + [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr, + [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_LSR_AHB_CLK] = &gcc_qmip_video_lsr_ahb_clk.clkr, + [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr, + [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, +}; + +static const struct qcom_reset_map gcc_sar2130p_resets[] = { + [GCC_DISPLAY_BCR] = { 0x37000 }, + [GCC_GPU_BCR] = { 0x9b000 }, + [GCC_PCIE_0_BCR] = { 0x7b000 }, + [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x7c014 }, + [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x7c020 }, + [GCC_PCIE_0_PHY_BCR] = { 0x7c01c }, + [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x7c028 }, + [GCC_PCIE_1_BCR] = { 0x9d000 }, + [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e014 }, + [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x9e020 }, + [GCC_PCIE_1_PHY_BCR] = { 0x9e01c }, + [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x9e024 }, + [GCC_PCIE_PHY_BCR] = { 0x7f000 }, + [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c }, + [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 }, + [GCC_PDM_BCR] = { 0x43000 }, + [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 }, + [GCC_QUPV3_WRAPPER_1_BCR] = { 0x2e000 }, + [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 }, + [GCC_SDCC1_BCR] = { 0x26000 }, + [GCC_USB30_PRIM_BCR] = { 0x49000 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 }, + [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 }, + [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 }, + [GCC_USB3_PHY_SEC_BCR] = { 0x6000c }, + [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 }, + [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 }, + [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x42018, .bit = 2, .udelay = 1000 }, + [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x42024, .bit = 2, .udelay = 1000 }, + [GCC_VIDEO_BCR] = { 0x42000 }, + [GCC_IRIS_SS_HF_AXI_CLK_ARES] = { .reg = 0x42030, .bit = 2 }, + [GCC_IRIS_SS_SPD_AXI_CLK_ARES] = { .reg = 0x70020, .bit = 2 }, + [GCC_DDRSS_SPAD_CLK_ARES] = { .reg = 0x70000, .bit = 2 }, +}; + +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src), +}; + +static struct gdsc *gcc_sar2130p_gdscs[] = { + [HLOS1_VOTE_MM_SNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc, + [HLOS1_VOTE_MM_SNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc, + [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc, + [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc, + [PCIE_0_GDSC] = &pcie_0_gdsc, + [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc, + [PCIE_1_GDSC] = &pcie_1_gdsc, + [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc, + [USB30_PRIM_GDSC] = &usb30_prim_gdsc, + [USB3_PHY_GDSC] = &usb3_phy_gdsc, +}; + +static const struct regmap_config gcc_sar2130p_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1f1030, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sar2130p_desc = { + .config = &gcc_sar2130p_regmap_config, + .clks = gcc_sar2130p_clocks, + .num_clks = ARRAY_SIZE(gcc_sar2130p_clocks), + .resets = gcc_sar2130p_resets, + .num_resets = ARRAY_SIZE(gcc_sar2130p_resets), + .gdscs = gcc_sar2130p_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_sar2130p_gdscs), +}; + +static const struct of_device_id gcc_sar2130p_match_table[] = { + { .compatible = "qcom,sar2130p-gcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sar2130p_match_table); + +static int gcc_sar2130p_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gcc_sar2130p_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + + /* Keep some clocks always-on */ + qcom_branch_set_clk_en(regmap, 0x37004); /* GCC_DISP_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x42004); /* GCC_VIDEO_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x42028); /* GCC_VIDEO_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x9b004); /* GCC_GPU_CFG_AHB_CLK */ + + /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */ + regmap_write(regmap, 0x62204, 0x0); + + return qcom_cc_really_probe(&pdev->dev, &gcc_sar2130p_desc, regmap); +} + +static struct platform_driver gcc_sar2130p_driver = { + .probe = gcc_sar2130p_probe, + .driver = { + .name = "gcc-sar2130p", + .of_match_table = gcc_sar2130p_match_table, + }, +}; + +static int __init gcc_sar2130p_init(void) +{ + return platform_driver_register(&gcc_sar2130p_driver); +} +subsys_initcall(gcc_sar2130p_init); + +static void __exit gcc_sar2130p_exit(void) +{ + platform_driver_unregister(&gcc_sar2130p_driver); +} +module_exit(gcc_sar2130p_exit); + +MODULE_DESCRIPTION("QTI GCC SAR2130P Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c index c445c271678a..65d7d52bce03 100644 --- a/drivers/clk/qcom/gcc-sm8450.c +++ b/drivers/clk/qcom/gcc-sm8450.c @@ -26,6 +26,8 @@ enum { P_BI_TCXO, P_GCC_GPLL0_OUT_EVEN, P_GCC_GPLL0_OUT_MAIN, + P_SM8475_GCC_GPLL2_OUT_EVEN, + P_SM8475_GCC_GPLL3_OUT_EVEN, P_GCC_GPLL4_OUT_MAIN, P_GCC_GPLL9_OUT_MAIN, P_PCIE_1_PHY_AUX_CLK, @@ -36,6 +38,15 @@ enum { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, }; +static struct clk_init_data sm8475_gcc_gpll0_init = { + .name = "gcc_gpll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, +}; + static struct clk_alpha_pll gcc_gpll0 = { .offset = 0x0, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], @@ -53,6 +64,15 @@ static struct clk_alpha_pll gcc_gpll0 = { }, }; +static struct clk_init_data sm8475_gcc_gpll0_out_even_init = { + .name = "gcc_gpll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, +}; + static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = { { 0x1, 2 }, { } @@ -75,6 +95,49 @@ static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = { }, }; +static struct clk_alpha_pll sm8475_gcc_gpll2 = { + .offset = 0x2000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll2", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static struct clk_alpha_pll sm8475_gcc_gpll3 = { + .offset = 0x3000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll3", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + }, +}; + +static struct clk_init_data sm8475_gcc_gpll4_init = { + .name = "gcc_gpll4", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, +}; + static struct clk_alpha_pll gcc_gpll4 = { .offset = 0x4000, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], @@ -92,6 +155,15 @@ static struct clk_alpha_pll gcc_gpll4 = { }, }; +static struct clk_init_data sm8475_gcc_gpll9_init = { + .name = "gcc_gpll9", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, +}; + static struct clk_alpha_pll gcc_gpll9 = { .offset = 0x9000, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], @@ -153,6 +225,22 @@ static const struct clk_parent_data gcc_parent_data_3[] = { { .fw_name = "bi_tcxo" }, }; +static const struct parent_map sm8475_gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_SM8475_GCC_GPLL2_OUT_EVEN, 2 }, + { P_SM8475_GCC_GPLL3_OUT_EVEN, 3 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data sm8475_gcc_parent_data_3[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &sm8475_gcc_gpll2.clkr.hw }, + { .hw = &sm8475_gcc_gpll3.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + static const struct parent_map gcc_parent_map_5[] = { { P_PCIE_1_PHY_AUX_CLK, 0 }, { P_BI_TCXO, 2 }, @@ -915,6 +1003,16 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init, }; +static const struct freq_tbl sm8475_ftbl_gcc_sdcc2_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(37000000, P_GCC_GPLL9_OUT_MAIN, 16, 0, 0), + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(148000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0), + { } +}; + static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { F(400000, P_BI_TCXO, 12, 1, 4), F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), @@ -963,6 +1061,25 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { }, }; +static const struct freq_tbl sm8475_ftbl_gcc_ufs_phy_axi_clk_src[] = { + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0), + F(806400000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0), + F(850000000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_init_data sm8475_gcc_ufs_phy_axi_clk_src_init = { + .name = "gcc_ufs_phy_axi_clk_src", + .parent_data = sm8475_gcc_parent_data_3, + .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, +}; + static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = { F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), @@ -987,6 +1104,24 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { }, }; +static const struct freq_tbl sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src[] = { + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0), + F(806400000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0), + F(850000000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_init_data sm8475_gcc_ufs_phy_ice_core_clk_src_init = { + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_data = sm8475_gcc_parent_data_3, + .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, +}; + static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = { F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), @@ -1032,6 +1167,14 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { }, }; +static struct clk_init_data sm8475_gcc_ufs_phy_unipro_core_clk_src_init = { + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_data = sm8475_gcc_parent_data_3, + .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, +}; + static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { .cmd_rcgr = 0x8708c, .mnd_width = 0, @@ -3166,6 +3309,8 @@ static struct clk_regmap *gcc_sm8450_clocks[] = { [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr, [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, + [SM8475_GCC_GPLL2] = NULL, + [SM8475_GCC_GPLL3] = NULL, }; static const struct qcom_reset_map gcc_sm8450_resets[] = { @@ -3259,6 +3404,7 @@ static const struct qcom_cc_desc gcc_sm8450_desc = { static const struct of_device_id gcc_sm8450_match_table[] = { { .compatible = "qcom,gcc-sm8450" }, + { .compatible = "qcom,sm8475-gcc" }, { } }; MODULE_DEVICE_TABLE(of, gcc_sm8450_match_table); @@ -3277,6 +3423,39 @@ static int gcc_sm8450_probe(struct platform_device *pdev) if (ret) return ret; + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-gcc")) { + /* Update GCC PLL0 */ + gcc_gpll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + gcc_gpll0.clkr.hw.init = &sm8475_gcc_gpll0_init; + gcc_gpll0_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + gcc_gpll0_out_even.clkr.hw.init = &sm8475_gcc_gpll0_out_even_init; + + /* Update GCC PLL4 */ + gcc_gpll4.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + gcc_gpll4.clkr.hw.init = &sm8475_gcc_gpll4_init; + + /* Update GCC PLL9 */ + gcc_gpll9.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + gcc_gpll9.clkr.hw.init = &sm8475_gcc_gpll9_init; + + gcc_sdcc2_apps_clk_src.freq_tbl = sm8475_ftbl_gcc_sdcc2_apps_clk_src; + + gcc_ufs_phy_axi_clk_src.parent_map = sm8475_gcc_parent_map_3; + gcc_ufs_phy_axi_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_axi_clk_src; + gcc_ufs_phy_axi_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_axi_clk_src_init; + + gcc_ufs_phy_ice_core_clk_src.parent_map = sm8475_gcc_parent_map_3; + gcc_ufs_phy_ice_core_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src; + gcc_ufs_phy_ice_core_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_ice_core_clk_src_init; + + gcc_ufs_phy_unipro_core_clk_src.parent_map = sm8475_gcc_parent_map_3; + gcc_ufs_phy_unipro_core_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src; + gcc_ufs_phy_unipro_core_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_unipro_core_clk_src_init; + + gcc_sm8450_desc.clks[SM8475_GCC_GPLL2] = &sm8475_gcc_gpll2.clkr; + gcc_sm8450_desc.clks[SM8475_GCC_GPLL3] = &sm8475_gcc_gpll3.clkr; + } + /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */ regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14)); @@ -3312,5 +3491,5 @@ static void __exit gcc_sm8450_exit(void) } module_exit(gcc_sm8450_exit); -MODULE_DESCRIPTION("QTI GCC SM8450 Driver"); +MODULE_DESCRIPTION("QTI GCC SM8450 / SM8475 Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gpucc-sar2130p.c b/drivers/clk/qcom/gpucc-sar2130p.c new file mode 100644 index 000000000000..dd72b2a48c42 --- /dev/null +++ b/drivers/clk/qcom/gpucc-sar2130p.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2024, Linaro Limited + */ + +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sar2130p-gpucc.h> +#include <dt-bindings/reset/qcom,sar2130p-gpucc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_BI_TCXO, + DT_GPLL0_OUT_MAIN, + DT_GPLL0_OUT_MAIN_DIV, +}; + +enum { + P_BI_TCXO, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL0_OUT_MAIN, + P_GPU_CC_PLL1_OUT_MAIN, +}; + +static const struct pll_vco lucid_ole_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 470MHz Configuration */ +static const struct alpha_pll_config gpu_cc_pll0_config = { + .l = 0x18, + .alpha = 0x7aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll gpu_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +/* 440MHz Configuration */ +static const struct alpha_pll_config gpu_cc_pll1_config = { + .l = 0x16, + .alpha = 0xeaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .index = DT_GPLL0_OUT_MAIN }, + { .index = DT_GPLL0_OUT_MAIN_DIV }, +}; + +static const struct parent_map gpu_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_MAIN, 1 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &gpu_cc_pll0.clkr.hw }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .index = DT_GPLL0_OUT_MAIN }, + { .index = DT_GPLL0_OUT_MAIN_DIV }, +}; + +static const struct parent_map gpu_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .index = DT_GPLL0_OUT_MAIN }, + { .index = DT_GPLL0_OUT_MAIN_DIV }, +}; + +static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = { + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_ff_clk_src = { + .cmd_rcgr = 0x9474, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_ff_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_ff_clk_src", + .parent_data = gpu_cc_parent_data_0, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0), + F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x9318, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_1, + .freq_tbl = ftbl_gpu_cc_gmu_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gmu_clk_src", + .parent_data = gpu_cc_parent_data_1, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gpu_cc_hub_clk_src = { + .cmd_rcgr = 0x93ec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_2, + .freq_tbl = ftbl_gpu_cc_ff_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_clk_src", + .parent_data = gpu_cc_parent_data_2, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x911c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x911c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x9120, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9120, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_crc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_ff_clk = { + .halt_reg = 0x914c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x914c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_ff_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_ff_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x913c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x913c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_gmu_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_aon_clk = { + .halt_reg = 0x9004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cxo_aon_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x9144, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9144, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cxo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gmu_clk = { + .halt_reg = 0x90bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90bc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_gmu_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hub_aon_clk = { + .halt_reg = 0x93e8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x93e8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_aon_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hub_cx_int_clk = { + .halt_reg = 0x9148, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9148, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_cx_int_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_memnoc_gfx_clk = { + .halt_reg = 0x9150, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9150, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = { + .halt_reg = 0x7000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hlos1_vote_gpu_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_sleep_clk = { + .halt_reg = 0x9134, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9134, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc gpu_cx_gdsc = { + .gdscr = 0x9108, + .gds_hw_ctrl = 0x953c, + .clk_dis_wait_val = 8, + .pd = { + .name = "gpu_cx_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, +}; + +static struct gdsc gpu_gx_gdsc = { + .gdscr = 0x905c, + .clamp_io_ctrl = 0x9504, + .resets = (unsigned int []){ GPUCC_GPU_CC_GX_BCR, + GPUCC_GPU_CC_ACD_BCR, + GPUCC_GPU_CC_GX_ACD_IROOT_BCR }, + .reset_count = 3, + .pd = { + .name = "gpu_gx_gdsc", + .power_on = gdsc_gx_do_nothing_enable, + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = CLAMP_IO | AON_RESET | SW_RESET, +}; + +static struct clk_regmap *gpu_cc_sar2130p_clocks[] = { + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr, + [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr, + [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr, + [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr, + [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr, + [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr, + [GPU_CC_PLL0] = &gpu_cc_pll0.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, + [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr, +}; + +static const struct qcom_reset_map gpu_cc_sar2130p_resets[] = { + [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 }, + [GPUCC_GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c }, + [GPUCC_GPU_CC_GX_BCR] = { 0x9058 }, +}; + +static struct gdsc *gpu_cc_sar2130p_gdscs[] = { + [GPU_CX_GDSC] = &gpu_cx_gdsc, + [GPU_GX_GDSC] = &gpu_gx_gdsc, +}; + +static const struct regmap_config gpu_cc_sar2130p_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xa000, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_sar2130p_desc = { + .config = &gpu_cc_sar2130p_regmap_config, + .clks = gpu_cc_sar2130p_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_sar2130p_clocks), + .resets = gpu_cc_sar2130p_resets, + .num_resets = ARRAY_SIZE(gpu_cc_sar2130p_resets), + .gdscs = gpu_cc_sar2130p_gdscs, + .num_gdscs = ARRAY_SIZE(gpu_cc_sar2130p_gdscs), +}; + +static const struct of_device_id gpu_cc_sar2130p_match_table[] = { + { .compatible = "qcom,sar2130p-gpucc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_sar2130p_match_table); + +static int gpu_cc_sar2130p_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gpu_cc_sar2130p_desc); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), "Couldn't map GPU_CC\n"); + + clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); + clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + + /* Keep some clocks always-on */ + qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */ + + return qcom_cc_really_probe(dev, &gpu_cc_sar2130p_desc, regmap); +} + +static struct platform_driver gpu_cc_sar2130p_driver = { + .probe = gpu_cc_sar2130p_probe, + .driver = { + .name = "gpu_cc-sar2130p", + .of_match_table = gpu_cc_sar2130p_match_table, + }, +}; +module_platform_driver(gpu_cc_sar2130p_driver); + +MODULE_DESCRIPTION("QTI GPU_CC SAR2130P Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/gpucc-sm8450.c b/drivers/clk/qcom/gpucc-sm8450.c index b3c5d6923cd2..059df72deaa1 100644 --- a/drivers/clk/qcom/gpucc-sm8450.c +++ b/drivers/clk/qcom/gpucc-sm8450.c @@ -40,7 +40,7 @@ static const struct pll_vco lucid_evo_vco[] = { { 249600000, 2000000000, 0 }, }; -static struct alpha_pll_config gpu_cc_pll0_config = { +static const struct alpha_pll_config gpu_cc_pll0_config = { .l = 0x1d, .alpha = 0xb000, .config_ctl_val = 0x20485699, @@ -50,6 +50,20 @@ static struct alpha_pll_config gpu_cc_pll0_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_gpu_cc_pll0_config = { + .l = 0x1d, + .alpha = 0xb000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll gpu_cc_pll0 = { .offset = 0x0, .vco_table = lucid_evo_vco, @@ -67,7 +81,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = { }, }; -static struct alpha_pll_config gpu_cc_pll1_config = { +static const struct alpha_pll_config gpu_cc_pll1_config = { .l = 0x34, .alpha = 0x1555, .config_ctl_val = 0x20485699, @@ -77,6 +91,20 @@ static struct alpha_pll_config gpu_cc_pll1_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_gpu_cc_pll1_config = { + .l = 0x34, + .alpha = 0x1555, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll gpu_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_evo_vco, @@ -736,6 +764,7 @@ static const struct qcom_cc_desc gpu_cc_sm8450_desc = { static const struct of_device_id gpu_cc_sm8450_match_table[] = { { .compatible = "qcom,sm8450-gpucc" }, + { .compatible = "qcom,sm8475-gpucc" }, { } }; MODULE_DEVICE_TABLE(of, gpu_cc_sm8450_match_table); @@ -748,8 +777,19 @@ static int gpu_cc_sm8450_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap); - clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); - clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-gpucc")) { + /* Update GPUCC PLL0 */ + gpu_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + + /* Update GPUCC PLL1 */ + gpu_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + + clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &sm8475_gpu_cc_pll0_config); + clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &sm8475_gpu_cc_pll1_config); + } else { + clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); + clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + } return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8450_desc, regmap); } @@ -763,5 +803,5 @@ static struct platform_driver gpu_cc_sm8450_driver = { }; module_platform_driver(gpu_cc_sm8450_driver); -MODULE_DESCRIPTION("QTI GPU_CC SM8450 Driver"); +MODULE_DESCRIPTION("QTI GPU_CC SM8450 / SM8475 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c index e5e8f2e82b94..41d73f92a000 100644 --- a/drivers/clk/qcom/tcsrcc-sm8550.c +++ b/drivers/clk/qcom/tcsrcc-sm8550.c @@ -129,6 +129,13 @@ static struct clk_branch tcsr_usb3_clkref_en = { }, }; +static struct clk_regmap *tcsr_cc_sar2130p_clocks[] = { + [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr, + [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr, + [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr, + [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr, +}; + static struct clk_regmap *tcsr_cc_sm8550_clocks[] = { [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr, [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr, @@ -146,6 +153,12 @@ static const struct regmap_config tcsr_cc_sm8550_regmap_config = { .fast_io = true, }; +static const struct qcom_cc_desc tcsr_cc_sar2130p_desc = { + .config = &tcsr_cc_sm8550_regmap_config, + .clks = tcsr_cc_sar2130p_clocks, + .num_clks = ARRAY_SIZE(tcsr_cc_sar2130p_clocks), +}; + static const struct qcom_cc_desc tcsr_cc_sm8550_desc = { .config = &tcsr_cc_sm8550_regmap_config, .clks = tcsr_cc_sm8550_clocks, @@ -153,7 +166,8 @@ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = { }; static const struct of_device_id tcsr_cc_sm8550_match_table[] = { - { .compatible = "qcom,sm8550-tcsr" }, + { .compatible = "qcom,sar2130p-tcsr", .data = &tcsr_cc_sar2130p_desc }, + { .compatible = "qcom,sm8550-tcsr", .data = &tcsr_cc_sm8550_desc }, { } }; MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table); @@ -162,7 +176,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev) { struct regmap *regmap; - regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc); + regmap = qcom_cc_map(pdev, of_device_get_match_data(&pdev->dev)); if (IS_ERR(regmap)) return PTR_ERR(regmap); diff --git a/drivers/clk/qcom/videocc-sa8775p.c b/drivers/clk/qcom/videocc-sa8775p.c new file mode 100644 index 000000000000..bf5de411fd5d --- /dev/null +++ b/drivers/clk/qcom/videocc-sa8775p.c @@ -0,0 +1,576 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sa8775p-videocc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_IFACE, + DT_BI_TCXO, + DT_BI_TCXO_AO, + DT_SLEEP_CLK, +}; + +enum { + P_BI_TCXO, + P_BI_TCXO_AO, + P_SLEEP_CLK, + P_VIDEO_PLL0_OUT_MAIN, + P_VIDEO_PLL1_OUT_MAIN, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +static const struct alpha_pll_config video_pll0_config = { + .l = 0x39, + .alpha = 0x3000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll video_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "video_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct alpha_pll_config video_pll1_config = { + .l = 0x39, + .alpha = 0x3000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00400805, +}; + +static struct clk_alpha_pll video_pll1 = { + .offset = 0x1000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "video_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map video_cc_parent_map_0_ao[] = { + { P_BI_TCXO_AO, 0 }, +}; + +static const struct clk_parent_data video_cc_parent_data_0_ao[] = { + { .index = DT_BI_TCXO_AO }, +}; + +static const struct parent_map video_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_VIDEO_PLL0_OUT_MAIN, 1 }, +}; + +static const struct clk_parent_data video_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &video_pll0.clkr.hw }, +}; + +static const struct parent_map video_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_VIDEO_PLL1_OUT_MAIN, 1 }, +}; + +static const struct clk_parent_data video_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &video_pll1.clkr.hw }, +}; + +static const struct parent_map video_cc_parent_map_3[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data video_cc_parent_data_3[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = { + F(19200000, P_BI_TCXO_AO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_ahb_clk_src = { + .cmd_rcgr = 0x8030, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_0_ao, + .freq_tbl = ftbl_video_cc_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_ahb_clk_src", + .parent_data = video_cc_parent_data_0_ao, + .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = { + F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0), + F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0), + F(1599000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0), + F(1680000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_mvs0_clk_src = { + .cmd_rcgr = 0x8000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_1, + .freq_tbl = ftbl_video_cc_mvs0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0_clk_src", + .parent_data = video_cc_parent_data_1, + .num_parents = ARRAY_SIZE(video_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = { + F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0), + F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0), + F(1600000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0), + F(1800000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_mvs1_clk_src = { + .cmd_rcgr = 0x8018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_2, + .freq_tbl = ftbl_video_cc_mvs1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1_clk_src", + .parent_data = video_cc_parent_data_2, + .num_parents = ARRAY_SIZE(video_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_sleep_clk_src = { + .cmd_rcgr = 0x812c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_3, + .freq_tbl = ftbl_video_cc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_sleep_clk_src", + .parent_data = video_cc_parent_data_3, + .num_parents = ARRAY_SIZE(video_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 video_cc_xo_clk_src = { + .cmd_rcgr = 0x8110, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_0_ao, + .freq_tbl = ftbl_video_cc_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_xo_clk_src", + .parent_data = video_cc_parent_data_0_ao, + .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs0_div_clk_src = { + .reg = 0x80b8, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = { + .reg = 0x806c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0c_div2_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs1_div_clk_src = { + .reg = 0x80dc, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = { + .reg = 0x8094, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1c_div2_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_sm_div_clk_src = { + .reg = 0x8108, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_sm_div_clk_src", + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch video_cc_mvs0_clk = { + .halt_reg = 0x80b0, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x80b0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x80b0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_mvs0c_clk = { + .halt_reg = 0x8064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0c_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs0c_div2_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_mvs1_clk = { + .halt_reg = 0x80d4, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x80d4, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x80d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs1_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_mvs1c_clk = { + .halt_reg = 0x808c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x808c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1c_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_mvs1c_div2_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_pll_lock_monitor_clk = { + .halt_reg = 0x9000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_pll_lock_monitor_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_sm_obs_clk = { + .halt_reg = 0x810c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x810c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_sm_obs_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_sm_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc video_cc_mvs0c_gdsc = { + .gdscr = 0x804c, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x6, + .pd = { + .name = "video_cc_mvs0c_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct gdsc video_cc_mvs0_gdsc = { + .gdscr = 0x809c, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x6, + .pd = { + .name = "video_cc_mvs0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &video_cc_mvs0c_gdsc.pd, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR | HW_CTRL_TRIGGER, +}; + +static struct gdsc video_cc_mvs1c_gdsc = { + .gdscr = 0x8074, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x6, + .pd = { + .name = "video_cc_mvs1c_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, +}; + +static struct gdsc video_cc_mvs1_gdsc = { + .gdscr = 0x80c0, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x6, + .pd = { + .name = "video_cc_mvs1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &video_cc_mvs1c_gdsc.pd, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR | HW_CTRL_TRIGGER, +}; + +static struct clk_regmap *video_cc_sa8775p_clocks[] = { + [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr, + [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr, + [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr, + [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr, + [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr, + [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr, + [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr, + [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr, + [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr, + [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr, + [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr, + [VIDEO_CC_PLL_LOCK_MONITOR_CLK] = &video_cc_pll_lock_monitor_clk.clkr, + [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr, + [VIDEO_CC_SM_DIV_CLK_SRC] = &video_cc_sm_div_clk_src.clkr, + [VIDEO_CC_SM_OBS_CLK] = &video_cc_sm_obs_clk.clkr, + [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr, + [VIDEO_PLL0] = &video_pll0.clkr, + [VIDEO_PLL1] = &video_pll1.clkr, +}; + +static struct gdsc *video_cc_sa8775p_gdscs[] = { + [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc, + [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc, + [VIDEO_CC_MVS1_GDSC] = &video_cc_mvs1_gdsc, + [VIDEO_CC_MVS1C_GDSC] = &video_cc_mvs1c_gdsc, +}; + +static const struct qcom_reset_map video_cc_sa8775p_resets[] = { + [VIDEO_CC_INTERFACE_BCR] = { 0x80e8 }, + [VIDEO_CC_MVS0_BCR] = { 0x8098 }, + [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 }, + [VIDEO_CC_MVS0C_BCR] = { 0x8048 }, + [VIDEO_CC_MVS1_BCR] = { 0x80bc }, + [VIDEO_CC_MVS1C_CLK_ARES] = { 0x808c, 2 }, + [VIDEO_CC_MVS1C_BCR] = { 0x8070 }, +}; + +static const struct regmap_config video_cc_sa8775p_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xb000, + .fast_io = true, +}; + +static struct qcom_cc_desc video_cc_sa8775p_desc = { + .config = &video_cc_sa8775p_regmap_config, + .clks = video_cc_sa8775p_clocks, + .num_clks = ARRAY_SIZE(video_cc_sa8775p_clocks), + .resets = video_cc_sa8775p_resets, + .num_resets = ARRAY_SIZE(video_cc_sa8775p_resets), + .gdscs = video_cc_sa8775p_gdscs, + .num_gdscs = ARRAY_SIZE(video_cc_sa8775p_gdscs), +}; + +static const struct of_device_id video_cc_sa8775p_match_table[] = { + { .compatible = "qcom,sa8775p-videocc" }, + { } +}; +MODULE_DEVICE_TABLE(of, video_cc_sa8775p_match_table); + +static int video_cc_sa8775p_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + + regmap = qcom_cc_map(pdev, &video_cc_sa8775p_desc); + if (IS_ERR(regmap)) { + pm_runtime_put(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_lucid_evo_pll_configure(&video_pll0, regmap, &video_pll0_config); + clk_lucid_evo_pll_configure(&video_pll1, regmap, &video_pll1_config); + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x80ec); /* VIDEO_CC_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x8144); /* VIDEO_CC_SLEEP_CLK */ + qcom_branch_set_clk_en(regmap, 0x8128); /* VIDEO_CC_XO_CLK */ + + ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sa8775p_desc, regmap); + + pm_runtime_put(&pdev->dev); + + return ret; +} + +static struct platform_driver video_cc_sa8775p_driver = { + .probe = video_cc_sa8775p_probe, + .driver = { + .name = "videocc-sa8775p", + .of_match_table = video_cc_sa8775p_match_table, + }, +}; + +module_platform_driver(video_cc_sa8775p_driver); + +MODULE_DESCRIPTION("QTI VIDEOCC SA8775P Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c index ed9163d64244..f26c7eccb62e 100644 --- a/drivers/clk/qcom/videocc-sm8450.c +++ b/drivers/clk/qcom/videocc-sm8450.c @@ -46,6 +46,21 @@ static const struct alpha_pll_config video_cc_pll0_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_video_cc_pll0_config = { + /* .l includes CAL_L_VAL, L_VAL fields */ + .l = 0x1e, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll video_cc_pll0 = { .offset = 0x0, .vco_table = lucid_evo_vco, @@ -74,6 +89,21 @@ static const struct alpha_pll_config video_cc_pll1_config = { .user_ctl_hi_val = 0x00000805, }; +static const struct alpha_pll_config sm8475_video_cc_pll1_config = { + /* .l includes CAL_L_VAL, L_VAL fields */ + .l = 0x2b, + .alpha = 0xc000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + static struct clk_alpha_pll video_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_evo_vco, @@ -397,6 +427,7 @@ static struct qcom_cc_desc video_cc_sm8450_desc = { static const struct of_device_id video_cc_sm8450_match_table[] = { { .compatible = "qcom,sm8450-videocc" }, + { .compatible = "qcom,sm8475-videocc" }, { } }; MODULE_DEVICE_TABLE(of, video_cc_sm8450_match_table); @@ -420,8 +451,19 @@ static int video_cc_sm8450_probe(struct platform_device *pdev) return PTR_ERR(regmap); } - clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config); - clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-videocc")) { + /* Update VideoCC PLL0 */ + video_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + + /* Update VideoCC PLL1 */ + video_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE]; + + clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &sm8475_video_cc_pll0_config); + clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &sm8475_video_cc_pll1_config); + } else { + clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config); + clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config); + } /* Keep some clocks always-on */ qcom_branch_set_clk_en(regmap, 0x80e4); /* VIDEO_CC_AHB_CLK */ @@ -445,5 +487,5 @@ static struct platform_driver video_cc_sm8450_driver = { module_platform_driver(video_cc_sm8450_driver); -MODULE_DESCRIPTION("QTI VIDEOCC SM8450 Driver"); +MODULE_DESCRIPTION("QTI VIDEOCC SM8450 / SM8475 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c index 50a443bf79ec..97b8ca0f9181 100644 --- a/drivers/clk/ralink/clk-mtmips.c +++ b/drivers/clk/ralink/clk-mtmips.c @@ -207,6 +207,7 @@ static struct mtmips_clk mt7620_pherip_clks[] = { { CLK_PERIPH("10000b00.spi", "bus") }, { CLK_PERIPH("10000b40.spi", "bus") }, { CLK_PERIPH("10000c00.uartlite", "periph") }, + { CLK_PERIPH("10130000.mmc", "sdhc") }, { CLK_PERIPH("10180000.wmac", "xtal") } }; @@ -220,6 +221,7 @@ static struct mtmips_clk mt76x8_pherip_clks[] = { { CLK_PERIPH("10000c00.uart0", "periph") }, { CLK_PERIPH("10000d00.uart1", "periph") }, { CLK_PERIPH("10000e00.uart2", "periph") }, + { CLK_PERIPH("10130000.mmc", "sdhc") }, { CLK_PERIPH("10300000.wmac", "xtal") } }; @@ -263,16 +265,22 @@ err_clk_unreg: .rate = _rate \ } -static struct mtmips_clk_fixed rt305x_fixed_clocks[] = { - CLK_FIXED("xtal", NULL, 40000000) +static struct mtmips_clk_fixed rt3883_fixed_clocks[] = { + CLK_FIXED("xtal", NULL, 40000000), + CLK_FIXED("periph", "xtal", 40000000) }; static struct mtmips_clk_fixed rt3352_fixed_clocks[] = { CLK_FIXED("periph", "xtal", 40000000) }; +static struct mtmips_clk_fixed mt7620_fixed_clocks[] = { + CLK_FIXED("bbppll", "xtal", 480000000) +}; + static struct mtmips_clk_fixed mt76x8_fixed_clocks[] = { - CLK_FIXED("pcmi2s", "xtal", 480000000), + CLK_FIXED("bbppll", "xtal", 480000000), + CLK_FIXED("pcmi2s", "bbppll", 480000000), CLK_FIXED("periph", "xtal", 40000000) }; @@ -327,6 +335,15 @@ static struct mtmips_clk_factor rt305x_factor_clocks[] = { CLK_FACTOR("bus", "cpu", 1, 3) }; +static struct mtmips_clk_factor mt7620_factor_clocks[] = { + CLK_FACTOR("sdhc", "bbppll", 1, 10) +}; + +static struct mtmips_clk_factor mt76x8_factor_clocks[] = { + CLK_FACTOR("bus", "cpu", 1, 3), + CLK_FACTOR("sdhc", "bbppll", 1, 10) +}; + static int mtmips_register_factor_clocks(struct clk_hw_onecell_data *clk_data, struct mtmips_clk_priv *priv) { @@ -366,6 +383,12 @@ static inline struct mtmips_clk *to_mtmips_clk(struct clk_hw *hw) return container_of(hw, struct mtmips_clk, hw); } +static unsigned long rt2880_xtal_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return 40000000; +} + static unsigned long rt5350_xtal_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -677,10 +700,12 @@ static unsigned long mt76x8_cpu_recalc_rate(struct clk_hw *hw, } static struct mtmips_clk rt2880_clks_base[] = { + { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) }, { CLK_BASE("cpu", "xtal", rt2880_cpu_recalc_rate) } }; static struct mtmips_clk rt305x_clks_base[] = { + { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) }, { CLK_BASE("cpu", "xtal", rt305x_cpu_recalc_rate) } }; @@ -690,6 +715,7 @@ static struct mtmips_clk rt3352_clks_base[] = { }; static struct mtmips_clk rt3883_clks_base[] = { + { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) }, { CLK_BASE("cpu", "xtal", rt3883_cpu_recalc_rate) }, { CLK_BASE("bus", "cpu", rt3883_bus_recalc_rate) } }; @@ -746,8 +772,8 @@ err_clk_unreg: static const struct mtmips_clk_data rt2880_clk_data = { .clk_base = rt2880_clks_base, .num_clk_base = ARRAY_SIZE(rt2880_clks_base), - .clk_fixed = rt305x_fixed_clocks, - .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks), + .clk_fixed = NULL, + .num_clk_fixed = 0, .clk_factor = rt2880_factor_clocks, .num_clk_factor = ARRAY_SIZE(rt2880_factor_clocks), .clk_periph = rt2880_pherip_clks, @@ -757,8 +783,8 @@ static const struct mtmips_clk_data rt2880_clk_data = { static const struct mtmips_clk_data rt305x_clk_data = { .clk_base = rt305x_clks_base, .num_clk_base = ARRAY_SIZE(rt305x_clks_base), - .clk_fixed = rt305x_fixed_clocks, - .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks), + .clk_fixed = NULL, + .num_clk_fixed = 0, .clk_factor = rt305x_factor_clocks, .num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks), .clk_periph = rt305x_pherip_clks, @@ -779,8 +805,8 @@ static const struct mtmips_clk_data rt3352_clk_data = { static const struct mtmips_clk_data rt3883_clk_data = { .clk_base = rt3883_clks_base, .num_clk_base = ARRAY_SIZE(rt3883_clks_base), - .clk_fixed = rt305x_fixed_clocks, - .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks), + .clk_fixed = rt3883_fixed_clocks, + .num_clk_fixed = ARRAY_SIZE(rt3883_fixed_clocks), .clk_factor = NULL, .num_clk_factor = 0, .clk_periph = rt5350_pherip_clks, @@ -801,10 +827,10 @@ static const struct mtmips_clk_data rt5350_clk_data = { static const struct mtmips_clk_data mt7620_clk_data = { .clk_base = mt7620_clks_base, .num_clk_base = ARRAY_SIZE(mt7620_clks_base), - .clk_fixed = NULL, - .num_clk_fixed = 0, - .clk_factor = NULL, - .num_clk_factor = 0, + .clk_fixed = mt7620_fixed_clocks, + .num_clk_fixed = ARRAY_SIZE(mt7620_fixed_clocks), + .clk_factor = mt7620_factor_clocks, + .num_clk_factor = ARRAY_SIZE(mt7620_factor_clocks), .clk_periph = mt7620_pherip_clks, .num_clk_periph = ARRAY_SIZE(mt7620_pherip_clks), }; @@ -814,8 +840,8 @@ static const struct mtmips_clk_data mt76x8_clk_data = { .num_clk_base = ARRAY_SIZE(mt76x8_clks_base), .clk_fixed = mt76x8_fixed_clocks, .num_clk_fixed = ARRAY_SIZE(mt76x8_fixed_clocks), - .clk_factor = rt305x_factor_clocks, - .num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks), + .clk_factor = mt76x8_factor_clocks, + .num_clk_factor = ARRAY_SIZE(mt76x8_factor_clocks), .clk_periph = mt76x8_pherip_clks, .num_clk_periph = ARRAY_SIZE(mt76x8_pherip_clks), }; diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 76791a1c50ac..ff01f5f0ed20 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -237,6 +237,11 @@ config CLK_RZV2H bool "RZ/V2H(P) family clock support" if COMPILE_TEST select RESET_CONTROLLER +config CLK_RENESAS_VBATTB + tristate "Renesas VBATTB clock controller" + depends on ARCH_RZG2L || COMPILE_TEST + select RESET_CONTROLLER + # Generic config CLK_RENESAS_CPG_MSSR bool "CPG/MSSR clock support" if COMPILE_TEST diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index 23d2e26051c8..82efaa835ac7 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_CLK_RZV2H) += rzv2h-cpg.o obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o obj-$(CONFIG_CLK_RENESAS_CPG_MSTP) += clk-mstp.o obj-$(CONFIG_CLK_RENESAS_DIV6) += clk-div6.o +obj-$(CONFIG_CLK_RENESAS_VBATTB) += clk-vbattb.o diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c index 4b1815147f77..f331d8bc9daf 100644 --- a/drivers/clk/renesas/clk-r8a73a4.c +++ b/drivers/clk/renesas/clk-r8a73a4.c @@ -64,7 +64,6 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg, unsigned int mult = 1; unsigned int div = 1; - if (!strcmp(name, "main")) { u32 ckscr = readl(base + CPG_CKSCR); diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c index 797556259370..6ea173f22251 100644 --- a/drivers/clk/renesas/clk-r8a7778.c +++ b/drivers/clk/renesas/clk-r8a7778.c @@ -67,7 +67,6 @@ r8a7778_cpg_register_clock(struct device_node *np, const char *name) return ERR_PTR(-EINVAL); } - static void __init r8a7778_cpg_clocks_init(struct device_node *np) { struct clk_onecell_data *data; diff --git a/drivers/clk/renesas/clk-vbattb.c b/drivers/clk/renesas/clk-vbattb.c new file mode 100644 index 000000000000..ff9d1ead455c --- /dev/null +++ b/drivers/clk/renesas/clk-vbattb.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * VBATTB clock driver + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +#include <linux/cleanup.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include <dt-bindings/clock/renesas,r9a08g045-vbattb.h> + +#define VBATTB_BKSCCR 0x1c +#define VBATTB_BKSCCR_SOSEL 6 +#define VBATTB_SOSCCR2 0x24 +#define VBATTB_SOSCCR2_SOSTP2 0 +#define VBATTB_XOSCCR 0x30 +#define VBATTB_XOSCCR_OUTEN 16 +#define VBATTB_XOSCCR_XSEL GENMASK(1, 0) +#define VBATTB_XOSCCR_XSEL_4_PF 0x0 +#define VBATTB_XOSCCR_XSEL_7_PF 0x1 +#define VBATTB_XOSCCR_XSEL_9_PF 0x2 +#define VBATTB_XOSCCR_XSEL_12_5_PF 0x3 + +/** + * struct vbattb_clk - VBATTB clock data structure + * @base: base address + * @lock: lock + */ +struct vbattb_clk { + void __iomem *base; + spinlock_t lock; +}; + +static int vbattb_clk_validate_load_capacitance(u32 *reg_lc, u32 of_lc) +{ + switch (of_lc) { + case 4000: + *reg_lc = VBATTB_XOSCCR_XSEL_4_PF; + break; + case 7000: + *reg_lc = VBATTB_XOSCCR_XSEL_7_PF; + break; + case 9000: + *reg_lc = VBATTB_XOSCCR_XSEL_9_PF; + break; + case 12500: + *reg_lc = VBATTB_XOSCCR_XSEL_12_5_PF; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void vbattb_clk_action(void *data) +{ + struct device *dev = data; + struct reset_control *rstc = dev_get_drvdata(dev); + int ret; + + ret = reset_control_assert(rstc); + if (ret) + dev_err(dev, "Failed to de-assert reset!"); + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + dev_err(dev, "Failed to runtime suspend!"); + + of_clk_del_provider(dev->of_node); +} + +static int vbattb_clk_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct clk_parent_data parent_data = {}; + struct clk_hw_onecell_data *clk_data; + const struct clk_hw *parent_hws[2]; + struct device *dev = &pdev->dev; + struct reset_control *rstc; + struct vbattb_clk *vbclk; + u32 of_lc, reg_lc; + struct clk_hw *hw; + /* 4 clocks are exported: VBATTB_XC, VBATTB_XBYP, VBATTB_MUX, VBATTB_VBATTCLK. */ + u8 num_clks = 4; + int ret; + + /* Default to 4pF as this is not needed if external clock device is connected. */ + of_lc = 4000; + of_property_read_u32(np, "quartz-load-femtofarads", &of_lc); + + ret = vbattb_clk_validate_load_capacitance(®_lc, of_lc); + if (ret) + return ret; + + vbclk = devm_kzalloc(dev, sizeof(*vbclk), GFP_KERNEL); + if (!vbclk) + return -ENOMEM; + + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clks), GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + clk_data->num = num_clks; + + vbclk->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(vbclk->base)) + return PTR_ERR(vbclk->base); + + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + + rstc = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(rstc)) + return PTR_ERR(rstc); + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + ret = reset_control_deassert(rstc); + if (ret) { + pm_runtime_put_sync(dev); + return ret; + } + + dev_set_drvdata(dev, rstc); + ret = devm_add_action_or_reset(dev, vbattb_clk_action, dev); + if (ret) + return ret; + + spin_lock_init(&vbclk->lock); + + parent_data.fw_name = "rtx"; + hw = devm_clk_hw_register_gate_parent_data(dev, "xc", &parent_data, 0, + vbclk->base + VBATTB_SOSCCR2, + VBATTB_SOSCCR2_SOSTP2, + CLK_GATE_SET_TO_DISABLE, &vbclk->lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + clk_data->hws[VBATTB_XC] = hw; + + hw = devm_clk_hw_register_fixed_factor_fwname(dev, np, "xbyp", "rtx", 0, 1, 1); + if (IS_ERR(hw)) + return PTR_ERR(hw); + clk_data->hws[VBATTB_XBYP] = hw; + + parent_hws[0] = clk_data->hws[VBATTB_XC]; + parent_hws[1] = clk_data->hws[VBATTB_XBYP]; + hw = devm_clk_hw_register_mux_parent_hws(dev, "mux", parent_hws, 2, 0, + vbclk->base + VBATTB_BKSCCR, + VBATTB_BKSCCR_SOSEL, + 1, 0, &vbclk->lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + clk_data->hws[VBATTB_MUX] = hw; + + /* Set load capacitance before registering the VBATTCLK clock. */ + scoped_guard(spinlock, &vbclk->lock) { + u32 val = readl_relaxed(vbclk->base + VBATTB_XOSCCR); + + val &= ~VBATTB_XOSCCR_XSEL; + val |= reg_lc; + writel_relaxed(val, vbclk->base + VBATTB_XOSCCR); + } + + /* This feeds the RTC counter clock and it needs to stay on. */ + hw = devm_clk_hw_register_gate_parent_hw(dev, "vbattclk", hw, CLK_IS_CRITICAL, + vbclk->base + VBATTB_XOSCCR, + VBATTB_XOSCCR_OUTEN, 0, + &vbclk->lock); + + if (IS_ERR(hw)) + return PTR_ERR(hw); + clk_data->hws[VBATTB_VBATTCLK] = hw; + + return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); +} + +static const struct of_device_id vbattb_clk_match[] = { + { .compatible = "renesas,r9a08g045-vbattb" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, vbattb_clk_match); + +static struct platform_driver vbattb_clk_driver = { + .driver = { + .name = "renesas-vbattb-clk", + .of_match_table = vbattb_clk_match, + }, + .probe = vbattb_clk_probe, +}; +module_platform_driver(vbattb_clk_driver); + +MODULE_DESCRIPTION("Renesas VBATTB Clock Driver"); +MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c index 4c8e4c69c1bf..9c7e4094705c 100644 --- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c @@ -266,7 +266,6 @@ static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = { { 2, 128, 1, 192, 1, 32, }, }; - static int __init r8a779a0_cpg_mssr_init(struct device *dev) { const struct rcar_gen4_cpg_pll_config *cpg_pll_config; diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c index e20c048bfa9b..9067e407cbc6 100644 --- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c @@ -37,7 +37,6 @@ enum clk_ids { CLK_PLL5, CLK_PLL6, CLK_PLL1_DIV2, - CLK_PLL2_DIV2, CLK_PLL3_DIV2, CLK_PLL4_DIV2, CLK_PLL4_DIV5, @@ -78,7 +77,6 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = { DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), - DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1), DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1), DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1), DEF_FIXED(".pll4_div5", CLK_PLL4_DIV5, CLK_PLL4, 5, 1), @@ -101,10 +99,10 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = { DEF_RATE(".oco", CLK_OCO, 32768), /* Core Clock Outputs */ - DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 0), - DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 8), - DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 32), - DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 40), + DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 0), + DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 8), + DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 32), + DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 40), DEF_FIXED("s0d2", R8A779H0_CLK_S0D2, CLK_S0, 2, 1), DEF_FIXED("s0d3", R8A779H0_CLK_S0D3, CLK_S0, 3, 1), DEF_FIXED("s0d4", R8A779H0_CLK_S0D4, CLK_S0, 4, 1), diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c index 1ce40fb51f13..b2ae8cdc4723 100644 --- a/drivers/clk/renesas/r9a08g045-cpg.c +++ b/drivers/clk/renesas/r9a08g045-cpg.c @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/pm_domain.h> #include <dt-bindings/clock/r9a08g045-cpg.h> @@ -266,61 +267,50 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = { /* Keep always-on domain on the first position for proper domains registration. */ DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON, DEF_REG_CONF(0, 0), - RZG2L_PD_F_ALWAYS_ON), + GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE), DEF_PD("gic", R9A08G045_PD_GIC, DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)), - RZG2L_PD_F_ALWAYS_ON), + GENPD_FLAG_ALWAYS_ON), DEF_PD("ia55", R9A08G045_PD_IA55, DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)), - RZG2L_PD_F_ALWAYS_ON), + GENPD_FLAG_ALWAYS_ON), DEF_PD("dmac", R9A08G045_PD_DMAC, DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)), - RZG2L_PD_F_ALWAYS_ON), + GENPD_FLAG_ALWAYS_ON), DEF_PD("wdt0", R9A08G045_PD_WDT0, DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)), - RZG2L_PD_F_NONE), + GENPD_FLAG_IRQ_SAFE), DEF_PD("sdhi0", R9A08G045_PD_SDHI0, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)), 0), DEF_PD("sdhi1", R9A08G045_PD_SDHI1, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), 0), DEF_PD("sdhi2", R9A08G045_PD_SDHI2, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), 0), DEF_PD("usb0", R9A08G045_PD_USB0, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), 0), DEF_PD("usb1", R9A08G045_PD_USB1, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)), 0), DEF_PD("usb-phy", R9A08G045_PD_USB_PHY, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)), 0), DEF_PD("eth0", R9A08G045_PD_ETHER0, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), 0), DEF_PD("eth1", R9A08G045_PD_ETHER1, - DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)), 0), DEF_PD("i2c0", R9A08G045_PD_I2C0, - DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)), 0), DEF_PD("i2c1", R9A08G045_PD_I2C1, - DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)), 0), DEF_PD("i2c2", R9A08G045_PD_I2C2, - DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)), 0), DEF_PD("i2c3", R9A08G045_PD_I2C3, - DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), 0), DEF_PD("scif0", R9A08G045_PD_SCIF0, - DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), - RZG2L_PD_F_NONE), + DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), 0), DEF_PD("vbat", R9A08G045_PD_VBAT, DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)), - RZG2L_PD_F_ALWAYS_ON), + GENPD_FLAG_ALWAYS_ON), + DEF_PD("rtc", R9A08G045_PD_RTC, + DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(7)), 0), }; const struct rzg2l_cpg_info r9a08g045_cpg_info = { diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c index dda9f29dff33..22272279b104 100644 --- a/drivers/clk/renesas/r9a09g011-cpg.c +++ b/drivers/clk/renesas/r9a09g011-cpg.c @@ -98,7 +98,6 @@ static const struct clk_div_table dtable_divd[] = { {0, 0}, }; - static const struct clk_div_table dtable_divw[] = { {0, 6}, {1, 7}, diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c index 3ee32db5c0af..7c4507fd34e6 100644 --- a/drivers/clk/renesas/r9a09g057-cpg.c +++ b/drivers/clk/renesas/r9a09g057-cpg.c @@ -41,6 +41,14 @@ enum clk_ids { MOD_CLK_BASE, }; +static const struct clk_div_table dtable_1_8[] = { + {0, 1}, + {1, 2}, + {2, 4}, + {3, 8}, + {0, 0}, +}; + static const struct clk_div_table dtable_2_64[] = { {0, 2}, {1, 4}, @@ -74,10 +82,19 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = { /* Core Clocks */ DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1), + DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55, + CDDIV1_DIVCTL0, dtable_1_8), + DEF_DDIV("ca55_0_coreclk1", R9A09G057_CA55_0_CORE_CLK1, CLK_PLLCA55, + CDDIV1_DIVCTL1, dtable_1_8), + DEF_DDIV("ca55_0_coreclk2", R9A09G057_CA55_0_CORE_CLK2, CLK_PLLCA55, + CDDIV1_DIVCTL2, dtable_1_8), + DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55, + CDDIV1_DIVCTL3, dtable_1_8), DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1), }; static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = { + DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5), DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3), DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4), DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5), @@ -119,6 +136,7 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = { }; static const struct rzv2h_reset r9a09g057_resets[] __initconst = { + DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */ DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */ DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */ DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */ diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c index 42b126ea3e13..a45f8e7e9ab6 100644 --- a/drivers/clk/renesas/rcar-cpg-lib.c +++ b/drivers/clk/renesas/rcar-cpg-lib.c @@ -206,4 +206,3 @@ struct clk * __init cpg_rpcd2_clk_register(const char *name, return clk; } - diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index 20b89eb6c35c..027100e84ee4 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -335,7 +335,6 @@ static u32 cpg_quirks __initdata; #define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */ - static const struct soc_device_attribute cpg_quirks_match[] __initconst = { { .soc_id = "r8a7796", .revision = "ES1.0", diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 1b421b809796..79e7a90c3b1b 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -39,7 +39,6 @@ #define WARN_DEBUG(x) do { } while (0) #endif - /* * Module Standby and Software Reset register offets. * @@ -716,7 +715,6 @@ static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv) } #endif /* !CONFIG_RESET_CONTROLLER */ - static const struct of_device_id cpg_mssr_match[] = { #ifdef CONFIG_CLK_R7S9210 { diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 88bf39e8c79c..ddf722ca79eb 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -548,7 +548,7 @@ static unsigned long rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params, unsigned long rate) { - unsigned long foutpostdiv_rate; + unsigned long foutpostdiv_rate, foutvco_rate; params->pl5_intin = rate / MEGA; params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA); @@ -557,10 +557,11 @@ rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params, params->pl5_postdiv2 = 1; params->pl5_spread = 0x16; - foutpostdiv_rate = - EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv * - ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) / - (params->pl5_postdiv1 * params->pl5_postdiv2); + foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA, + (params->pl5_intin << 24) + params->pl5_fracin), + params->pl5_refdiv) >> 24; + foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate, + params->pl5_postdiv1 * params->pl5_postdiv2); return foutpostdiv_rate; } @@ -1680,23 +1681,31 @@ static int rzg2l_cpg_power_off(struct generic_pm_domain *domain) return 0; } -static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on) +static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd) { + bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON); struct dev_power_governor *governor; + int ret; + + if (always_on) + governor = &pm_domain_always_on_gov; + else + governor = &simple_qos_governor; pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; pd->genpd.attach_dev = rzg2l_cpg_attach_dev; pd->genpd.detach_dev = rzg2l_cpg_detach_dev; - if (always_on) { - pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; - governor = &pm_domain_always_on_gov; - } else { - pd->genpd.power_on = rzg2l_cpg_power_on; - pd->genpd.power_off = rzg2l_cpg_power_off; - governor = &simple_qos_governor; - } + pd->genpd.power_on = rzg2l_cpg_power_on; + pd->genpd.power_off = rzg2l_cpg_power_off; + + ret = pm_genpd_init(&pd->genpd, governor, !always_on); + if (ret) + return ret; - return pm_genpd_init(&pd->genpd, governor, !always_on); + if (always_on) + ret = rzg2l_cpg_power_on(&pd->genpd); + + return ret; } static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv) @@ -1711,8 +1720,9 @@ static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv) return -ENOMEM; pd->genpd.name = np->name; + pd->genpd.flags = GENPD_FLAG_ALWAYS_ON; pd->priv = priv; - ret = rzg2l_cpg_pd_setup(pd, true); + ret = rzg2l_cpg_pd_setup(pd); if (ret) return ret; @@ -1777,7 +1787,6 @@ static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv) return ret; for (unsigned int i = 0; i < info->num_pm_domains; i++) { - bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON); struct rzg2l_cpg_pd *pd; pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); @@ -1785,20 +1794,15 @@ static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv) return -ENOMEM; pd->genpd.name = info->pm_domains[i].name; + pd->genpd.flags = info->pm_domains[i].genpd_flags; pd->conf = info->pm_domains[i].conf; pd->id = info->pm_domains[i].id; pd->priv = priv; - ret = rzg2l_cpg_pd_setup(pd, always_on); + ret = rzg2l_cpg_pd_setup(pd); if (ret) return ret; - if (always_on) { - ret = rzg2l_cpg_power_on(&pd->genpd); - if (ret) - return ret; - } - domains->domains[i] = &pd->genpd; /* Parent should be on the very first entry of info->pm_domains[]. */ if (!i) { diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index ecfe7e7ea8a1..881a89b5a710 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -270,14 +270,14 @@ struct rzg2l_cpg_pm_domain_conf { * struct rzg2l_cpg_pm_domain_init_data - PM domain init data * @name: PM domain name * @conf: PM domain configuration - * @flags: RZG2L PM domain flags (see RZG2L_PD_F_*) + * @genpd_flags: genpd flags (see GENPD_FLAG_*) * @id: PM domain ID (similar to the ones defined in * include/dt-bindings/clock/<soc-id>-cpg.h) */ struct rzg2l_cpg_pm_domain_init_data { const char * const name; struct rzg2l_cpg_pm_domain_conf conf; - u32 flags; + u32 genpd_flags; u16 id; }; @@ -288,13 +288,9 @@ struct rzg2l_cpg_pm_domain_init_data { .conf = { \ .mstop = (_mstop_conf), \ }, \ - .flags = (_flags), \ + .genpd_flags = (_flags), \ } -/* Power domain flags. */ -#define RZG2L_PD_F_ALWAYS_ON BIT(0) -#define RZG2L_PD_F_NONE (0) - /** * struct rzg2l_cpg_info - SoC-specific CPG Description * diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h index 1bd406c69015..819029c81904 100644 --- a/drivers/clk/renesas/rzv2h-cpg.h +++ b/drivers/clk/renesas/rzv2h-cpg.h @@ -32,8 +32,13 @@ struct ddiv { }) #define CPG_CDDIV0 (0x400) +#define CPG_CDDIV1 (0x404) #define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2) +#define CDDIV1_DIVCTL0 DDIV_PACK(CPG_CDDIV1, 0, 2, 4) +#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5) +#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6) +#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7) /** * Definitions of CPG Core Clocks diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index f1ba48758c78..7a88331a658d 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o +obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos8895.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index abd49edcf707..e11ac67819ef 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -4,7 +4,7 @@ * Author: Padmavathi Venna <padma.v@samsung.com> * * Common Clock Framework support for Audio Subsystem Clock Controller. -*/ + */ #include <linux/slab.h> #include <linux/io.h> diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index cd4fec323a42..aec4d18c1f9e 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -260,7 +260,7 @@ static const struct samsung_mux_clock mux_clks[] __initconst = { /* SRC_TOP0 */ MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1), - MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p,SRC_TOP0, 24, 1), + MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p, SRC_TOP0, 24, 1), MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_div_mpll_pre_p, SRC_TOP0, 20, 1), MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_div_mpll_pre_p, SRC_TOP0, 16, 1), MUX(CLK_MOUT_ACLK_266_1, "mout_aclk_266_1", mout_aclk_266_1_p, SRC_TOP0, 14, 1), diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 28945b6b0ee1..16be0c53903c 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -5,7 +5,7 @@ * Author: Thomas Abraham <thomas.ab@samsung.com> * * Common Clock Framework support for all Exynos4 SoCs. -*/ + */ #include <dt-bindings/clock/exynos4.h> #include <linux/slab.h> diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c index a70c2b06a61a..29c5644f0593 100644 --- a/drivers/clk/samsung/clk-exynos4412-isp.c +++ b/drivers/clk/samsung/clk-exynos4412-isp.c @@ -4,7 +4,7 @@ * Author: Marek Szyprowski <m.szyprowski@samsung.com> * * Common Clock Framework support for Exynos4412 ISP module. -*/ + */ #include <dt-bindings/clock/exynos4.h> #include <linux/slab.h> diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index e02e7c013f3d..47e9ac2275ee 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -5,7 +5,7 @@ * Author: Thomas Abraham <thomas.ab@samsung.com> * * Common Clock Framework support for Exynos5250 SoC. -*/ + */ #include <dt-bindings/clock/exynos5250.h> #include <linux/clk-provider.h> diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c index 16da6ef5ca0c..fd0520d204dc 100644 --- a/drivers/clk/samsung/clk-exynos5260.c +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -1458,7 +1458,7 @@ static const struct samsung_fixed_rate_clock fixed_rate_clks[] __initconst = { FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi", NULL, 0, 125000000), FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS, - "phyclk_mipi_dphy_4l_m_txbyte_clkhs" , NULL, + "phyclk_mipi_dphy_4l_m_txbyte_clkhs", NULL, 0, 187500000), FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m", NULL, 0, 24000000), @@ -1629,7 +1629,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = { mout_isp1_media_400_p, MUX_SEL_TOP_ISP10, 4, 1), MUX(TOP_MOUT_ACLK_ISP1_400, "mout_aclk_isp1_400", mout_aclk_isp1_400_p, - MUX_SEL_TOP_ISP10, 8 , 1), + MUX_SEL_TOP_ISP10, 8, 1), MUX(TOP_MOUT_ISP1_MEDIA_266, "mout_isp1_media_266", mout_isp1_media_266_p, MUX_SEL_TOP_ISP10, 16, 1), diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c index 2654077211e7..99b1bb4539fd 100644 --- a/drivers/clk/samsung/clk-exynos5410.c +++ b/drivers/clk/samsung/clk-exynos5410.c @@ -4,7 +4,7 @@ * Author: Tarek Dakhran <t.dakhran@samsung.com> * * Common Clock Framework support for Exynos5410 SoC. -*/ + */ #include <dt-bindings/clock/exynos5410.h> diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index c630135c686b..333c52fda17f 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -5,7 +5,7 @@ * Chander Kashyap <k.chander@samsung.com> * * Common Clock Framework support for Exynos5420 SoC. -*/ + */ #include <dt-bindings/clock/exynos5420.h> #include <linux/slab.h> @@ -295,8 +295,8 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = { /* list of all parent clocks */ PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll", "mout_sclk_mpll", "mout_sclk_spll"}; -PNAME(mout_cpu_p) = {"mout_apll" , "mout_mspll_cpu"}; -PNAME(mout_kfc_p) = {"mout_kpll" , "mout_mspll_kfc"}; +PNAME(mout_cpu_p) = {"mout_apll", "mout_mspll_cpu"}; +PNAME(mout_kfc_p) = {"mout_kpll", "mout_mspll_kfc"}; PNAME(mout_apll_p) = {"fin_pll", "fout_apll"}; PNAME(mout_bpll_p) = {"fin_pll", "fout_bpll"}; PNAME(mout_cpll_p) = {"fin_pll", "fout_cpll"}; diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index 4a5d2a914bd6..e6c938effa29 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com> -*/ + */ #include <linux/clk-provider.h> #include <linux/of.h> diff --git a/drivers/clk/samsung/clk-exynos8895.c b/drivers/clk/samsung/clk-exynos8895.c new file mode 100644 index 000000000000..29ec0c4a8635 --- /dev/null +++ b/drivers/clk/samsung/clk-exynos8895.c @@ -0,0 +1,2803 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com> + * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com> + * + * Common Clock Framework support for Exynos8895 SoC. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <dt-bindings/clock/samsung,exynos8895.h> + +#include "clk.h" +#include "clk-exynos-arm64.h" + +/* NOTE: Must be equal to the last clock ID increased by one */ +#define CLKS_NR_TOP (CLK_GOUT_CMU_VPU_BUS + 1) +#define CLKS_NR_FSYS0 (CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK + 1) +#define CLKS_NR_FSYS1 (CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK + 1) +#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_USI03_I_SCLK_USI + 1) +#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK + 1) +#define CLKS_NR_PERIS (CLK_GOUT_PERIS_XIU_P_PERIS_ACLK + 1) + +/* ---- CMU_TOP ------------------------------------------------------------- */ + +/* Register Offset definitions for CMU_TOP (0x15a80000) */ +#define PLL_LOCKTIME_PLL_SHARED0 0x0000 +#define PLL_LOCKTIME_PLL_SHARED1 0x0004 +#define PLL_LOCKTIME_PLL_SHARED2 0x0008 +#define PLL_LOCKTIME_PLL_SHARED3 0x000c +#define PLL_LOCKTIME_PLL_SHARED4 0x0010 +#define PLL_CON0_MUX_CP2AP_MIF_CLK_USER 0x0100 +#define PLL_CON2_MUX_CP2AP_MIF_CLK_USER 0x0108 +#define PLL_CON0_PLL_SHARED0 0x0120 +#define PLL_CON0_PLL_SHARED1 0x0140 +#define PLL_CON0_PLL_SHARED2 0x0160 +#define PLL_CON0_PLL_SHARED3 0x0180 +#define PLL_CON0_PLL_SHARED4 0x01a0 +#define CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX 0x1000 +#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004 +#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1008 +#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS 0x100c +#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C 0x1010 +#define CLK_CON_MUX_MUX_CLKCMU_CAM_BUS 0x1014 +#define CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0 0x1018 +#define CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1 0x101c +#define CLK_CON_MUX_MUX_CLKCMU_CAM_VRA 0x1020 +#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1024 +#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x1028 +#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x102c +#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1030 +#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1038 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x103c +#define CLK_CON_MUX_MUX_CLKCMU_DBG_BUS 0x1040 +#define CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS 0x1044 +#define CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD 0x1048 +#define CLK_CON_MUX_MUX_CLKCMU_DPU_BUS 0x104c +#define CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR 0x1050 +#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1054 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS 0x1058 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC 0x105c +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD 0x1060 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD 0x1064 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30 0x1068 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS 0x106c +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD 0x1070 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE 0x1074 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD 0x1078 +#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x107c +#define CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG 0x1080 +#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1084 +#define CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS 0x1088 +#define CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS 0x108c +#define CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS 0x1090 +#define CLK_CON_MUX_MUX_CLKCMU_IVA_BUS 0x1094 +#define CLK_CON_MUX_MUX_CLKCMU_MFC_BUS 0x1098 +#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10a0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG 0x10a4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00 0x10a8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01 0x10ac +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02 0x10b0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03 0x10b4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10b8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2 0x10bc +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0 0x10c0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1 0x10c4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT 0x10c8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04 0x10cc +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05 0x10d0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06 0x10d4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07 0x10d8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08 0x10dc +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09 0x10e0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10 0x10e4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11 0x10e8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12 0x10ec +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13 0x10f0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10f4 +#define CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS 0x10f8 +#define CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD 0x10fc +#define CLK_CON_MUX_MUX_CLKCMU_VPU_BUS 0x1100 +#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x1104 +#define CLK_CON_MUX_MUX_CMU_CMUREF 0x1108 +#define CLK_CON_DIV_CLKCMU_ABOX_CPUABOX 0x1800 +#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1804 +#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x1808 +#define CLK_CON_DIV_CLKCMU_BUSC_BUS 0x180c +#define CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C 0x1810 +#define CLK_CON_DIV_CLKCMU_CAM_BUS 0x1814 +#define CLK_CON_DIV_CLKCMU_CAM_TPU0 0x1818 +#define CLK_CON_DIV_CLKCMU_CAM_TPU1 0x181c +#define CLK_CON_DIV_CLKCMU_CAM_VRA 0x1820 +#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1824 +#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1828 +#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x182c +#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1830 +#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1834 +#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838 +#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c +#define CLK_CON_DIV_CLKCMU_DBG_BUS 0x1840 +#define CLK_CON_DIV_CLKCMU_DCAM_BUS 0x1844 +#define CLK_CON_DIV_CLKCMU_DCAM_IMGD 0x1848 +#define CLK_CON_DIV_CLKCMU_DPU_BUS 0x184c +#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x1850 +#define CLK_CON_DIV_CLKCMU_FSYS0_BUS 0x1854 +#define CLK_CON_DIV_CLKCMU_FSYS0_DPGTC 0x1858 +#define CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD 0x185c +#define CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD 0x1860 +#define CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30 0x1864 +#define CLK_CON_DIV_CLKCMU_FSYS1_BUS 0x1868 +#define CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD 0x186c +#define CLK_CON_DIV_CLKCMU_FSYS1_PCIE 0x1870 +#define CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD 0x1874 +#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1878 +#define CLK_CON_DIV_CLKCMU_G2D_JPEG 0x187c +#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1880 +#define CLK_CON_DIV_CLKCMU_HPM 0x1884 +#define CLK_CON_DIV_CLKCMU_IMEM_BUS 0x1888 +#define CLK_CON_DIV_CLKCMU_ISPHQ_BUS 0x188c +#define CLK_CON_DIV_CLKCMU_ISPLP_BUS 0x1890 +#define CLK_CON_DIV_CLKCMU_IVA_BUS 0x1894 +#define CLK_CON_DIV_CLKCMU_MFC_BUS 0x1898 +#define CLK_CON_DIV_CLKCMU_MODEM_SHARED0 0x189c +#define CLK_CON_DIV_CLKCMU_MODEM_SHARED1 0x18a0 +#define CLK_CON_DIV_CLKCMU_OTP 0x18a4 +#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18a8 +#define CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG 0x18ac +#define CLK_CON_DIV_CLKCMU_PERIC0_USI00 0x18b0 +#define CLK_CON_DIV_CLKCMU_PERIC0_USI01 0x18b4 +#define CLK_CON_DIV_CLKCMU_PERIC0_USI02 0x18b8 +#define CLK_CON_DIV_CLKCMU_PERIC0_USI03 0x18bc +#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18c0 +#define CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2 0x18c4 +#define CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0 0x18c8 +#define CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1 0x18cc +#define CLK_CON_DIV_CLKCMU_PERIC1_UART_BT 0x18d0 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI04 0x18d4 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI05 0x18d8 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI06 0x18dc +#define CLK_CON_DIV_CLKCMU_PERIC1_USI07 0x18e0 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI08 0x18e4 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI09 0x18e8 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI10 0x18ec +#define CLK_CON_DIV_CLKCMU_PERIC1_USI11 0x18f0 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI12 0x18f4 +#define CLK_CON_DIV_CLKCMU_PERIC1_USI13 0x18f8 +#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18fc +#define CLK_CON_DIV_CLKCMU_SRDZ_BUS 0x1900 +#define CLK_CON_DIV_CLKCMU_SRDZ_IMGD 0x1904 +#define CLK_CON_DIV_CLKCMU_VPU_BUS 0x1908 +#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x190c +#define CLK_CON_DIV_DIV_CP2AP_MIF_CLK_DIV2 0x1910 +#define CLK_CON_DIV_DIV_PLL_SHARED0_DIV2 0x1914 +#define CLK_CON_DIV_DIV_PLL_SHARED0_DIV4 0x1918 +#define CLK_CON_DIV_DIV_PLL_SHARED1_DIV2 0x191c +#define CLK_CON_DIV_DIV_PLL_SHARED1_DIV4 0x1920 +#define CLK_CON_DIV_DIV_PLL_SHARED2_DIV2 0x1924 +#define CLK_CON_DIV_DIV_PLL_SHARED3_DIV2 0x1928 +#define CLK_CON_DIV_DIV_PLL_SHARED4_DIV2 0x192c +#define CLK_CON_GAT_CLKCMU_DROOPDETECTOR 0x2000 +#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004 +#define CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX 0x2008 +#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x200c +#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2010 +#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS 0x2014 +#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C 0x2018 +#define CLK_CON_GAT_GATE_CLKCMU_CAM_BUS 0x201c +#define CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0 0x2020 +#define CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1 0x2024 +#define CLK_CON_GAT_GATE_CLKCMU_CAM_VRA 0x2028 +#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x202c +#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2030 +#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2034 +#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2038 +#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x203c +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2040 +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2044 +#define CLK_CON_GAT_GATE_CLKCMU_DBG_BUS 0x2048 +#define CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS 0x204c +#define CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD 0x2050 +#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2054 +#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2058 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS 0x205c +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC 0x2060 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD 0x2064 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD 0x2068 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30 0x206c +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS 0x2070 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD 0x2074 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE 0x2078 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD 0x207c +#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x2080 +#define CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG 0x2084 +#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2088 +#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x208c +#define CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS 0x2090 +#define CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS 0x2094 +#define CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS 0x2098 +#define CLK_CON_GAT_GATE_CLKCMU_IVA_BUS 0x209c +#define CLK_CON_GAT_GATE_CLKCMU_MFC_BUS 0x20a0 +#define CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0 0x20a4 +#define CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1 0x20a8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20ac +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG 0x20b0 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00 0x20b4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01 0x20b8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02 0x20bc +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03 0x20c0 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20c4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2 0x20c8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0 0x20cc +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1 0x20d0 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT 0x20d4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04 0x20d8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05 0x20dc +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06 0x20e0 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07 0x20e4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08 0x20e8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09 0x20ec +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10 0x20f0 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11 0x20f4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12 0x20f8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13 0x20fc +#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x2100 +#define CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS 0x2104 +#define CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD 0x2108 +#define CLK_CON_GAT_GATE_CLKCMU_VPU_BUS 0x210c + +static const unsigned long top_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_SHARED0, + PLL_LOCKTIME_PLL_SHARED1, + PLL_LOCKTIME_PLL_SHARED2, + PLL_LOCKTIME_PLL_SHARED3, + PLL_LOCKTIME_PLL_SHARED4, + PLL_CON0_MUX_CP2AP_MIF_CLK_USER, + PLL_CON2_MUX_CP2AP_MIF_CLK_USER, + PLL_CON0_PLL_SHARED0, + PLL_CON0_PLL_SHARED1, + PLL_CON0_PLL_SHARED2, + PLL_CON0_PLL_SHARED3, + PLL_CON0_PLL_SHARED4, + CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX, + CLK_CON_MUX_MUX_CLKCMU_APM_BUS, + CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, + CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, + CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C, + CLK_CON_MUX_MUX_CLKCMU_CAM_BUS, + CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0, + CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1, + CLK_CON_MUX_MUX_CLKCMU_CAM_VRA, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, + CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_DBG_BUS, + CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS, + CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD, + CLK_CON_MUX_MUX_CLKCMU_DPU_BUS, + CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR, + CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD, + CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, + CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG, + CLK_CON_MUX_MUX_CLKCMU_HPM, + CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS, + CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS, + CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS, + CLK_CON_MUX_MUX_CLKCMU_IVA_BUS, + CLK_CON_MUX_MUX_CLKCMU_MFC_BUS, + CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13, + CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, + CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS, + CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD, + CLK_CON_MUX_MUX_CLKCMU_VPU_BUS, + CLK_CON_MUX_MUX_CLK_CMU_CMUREF, + CLK_CON_MUX_MUX_CMU_CMUREF, + CLK_CON_DIV_CLKCMU_ABOX_CPUABOX, + CLK_CON_DIV_CLKCMU_APM_BUS, + CLK_CON_DIV_CLKCMU_BUS1_BUS, + CLK_CON_DIV_CLKCMU_BUSC_BUS, + CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C, + CLK_CON_DIV_CLKCMU_CAM_BUS, + CLK_CON_DIV_CLKCMU_CAM_TPU0, + CLK_CON_DIV_CLKCMU_CAM_TPU1, + CLK_CON_DIV_CLKCMU_CAM_VRA, + CLK_CON_DIV_CLKCMU_CIS_CLK0, + CLK_CON_DIV_CLKCMU_CIS_CLK1, + CLK_CON_DIV_CLKCMU_CIS_CLK2, + CLK_CON_DIV_CLKCMU_CIS_CLK3, + CLK_CON_DIV_CLKCMU_CORE_BUS, + CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, + CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, + CLK_CON_DIV_CLKCMU_DBG_BUS, + CLK_CON_DIV_CLKCMU_DCAM_BUS, + CLK_CON_DIV_CLKCMU_DCAM_IMGD, + CLK_CON_DIV_CLKCMU_DPU_BUS, + CLK_CON_DIV_CLKCMU_DSP_BUS, + CLK_CON_DIV_CLKCMU_FSYS0_BUS, + CLK_CON_DIV_CLKCMU_FSYS0_DPGTC, + CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD, + CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD, + CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30, + CLK_CON_DIV_CLKCMU_FSYS1_BUS, + CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD, + CLK_CON_DIV_CLKCMU_FSYS1_PCIE, + CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD, + CLK_CON_DIV_CLKCMU_G2D_G2D, + CLK_CON_DIV_CLKCMU_G2D_JPEG, + CLK_CON_DIV_CLKCMU_G3D_SWITCH, + CLK_CON_DIV_CLKCMU_HPM, + CLK_CON_DIV_CLKCMU_IMEM_BUS, + CLK_CON_DIV_CLKCMU_ISPHQ_BUS, + CLK_CON_DIV_CLKCMU_ISPLP_BUS, + CLK_CON_DIV_CLKCMU_IVA_BUS, + CLK_CON_DIV_CLKCMU_MFC_BUS, + CLK_CON_DIV_CLKCMU_MODEM_SHARED0, + CLK_CON_DIV_CLKCMU_MODEM_SHARED1, + CLK_CON_DIV_CLKCMU_OTP, + CLK_CON_DIV_CLKCMU_PERIC0_BUS, + CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG, + CLK_CON_DIV_CLKCMU_PERIC0_USI00, + CLK_CON_DIV_CLKCMU_PERIC0_USI01, + CLK_CON_DIV_CLKCMU_PERIC0_USI02, + CLK_CON_DIV_CLKCMU_PERIC0_USI03, + CLK_CON_DIV_CLKCMU_PERIC1_BUS, + CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2, + CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0, + CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1, + CLK_CON_DIV_CLKCMU_PERIC1_UART_BT, + CLK_CON_DIV_CLKCMU_PERIC1_USI04, + CLK_CON_DIV_CLKCMU_PERIC1_USI05, + CLK_CON_DIV_CLKCMU_PERIC1_USI06, + CLK_CON_DIV_CLKCMU_PERIC1_USI07, + CLK_CON_DIV_CLKCMU_PERIC1_USI08, + CLK_CON_DIV_CLKCMU_PERIC1_USI09, + CLK_CON_DIV_CLKCMU_PERIC1_USI10, + CLK_CON_DIV_CLKCMU_PERIC1_USI11, + CLK_CON_DIV_CLKCMU_PERIC1_USI12, + CLK_CON_DIV_CLKCMU_PERIC1_USI13, + CLK_CON_DIV_CLKCMU_PERIS_BUS, + CLK_CON_DIV_CLKCMU_SRDZ_BUS, + CLK_CON_DIV_CLKCMU_SRDZ_IMGD, + CLK_CON_DIV_CLKCMU_VPU_BUS, + CLK_CON_DIV_DIV_CLK_CMU_CMUREF, + CLK_CON_DIV_DIV_CP2AP_MIF_CLK_DIV2, + CLK_CON_DIV_DIV_PLL_SHARED0_DIV2, + CLK_CON_DIV_DIV_PLL_SHARED0_DIV4, + CLK_CON_DIV_DIV_PLL_SHARED1_DIV2, + CLK_CON_DIV_DIV_PLL_SHARED1_DIV4, + CLK_CON_DIV_DIV_PLL_SHARED2_DIV2, + CLK_CON_DIV_DIV_PLL_SHARED3_DIV2, + CLK_CON_DIV_DIV_PLL_SHARED4_DIV2, + CLK_CON_GAT_CLKCMU_DROOPDETECTOR, + CLK_CON_GAT_CLKCMU_MIF_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX, + CLK_CON_GAT_GATE_CLKCMU_APM_BUS, + CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, + CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, + CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C, + CLK_CON_GAT_GATE_CLKCMU_CAM_BUS, + CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0, + CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1, + CLK_CON_GAT_GATE_CLKCMU_CAM_VRA, + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, + CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_DBG_BUS, + CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS, + CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD, + CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, + CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD, + CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, + CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG, + CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_HPM, + CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS, + CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS, + CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS, + CLK_CON_GAT_GATE_CLKCMU_IVA_BUS, + CLK_CON_GAT_GATE_CLKCMU_MFC_BUS, + CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0, + CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13, + CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS, + CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS, + CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD, + CLK_CON_GAT_GATE_CLKCMU_VPU_BUS, +}; + +static const struct samsung_pll_rate_table pll_shared0_rate_table[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 2132000000U, 328, 4, 0), +}; + +static const struct samsung_pll_rate_table pll_shared1_rate_table[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 1865500000U, 287, 4, 0), +}; + +static const struct samsung_pll_rate_table pll_shared2_rate_table[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 800000000U, 400, 13, 0), +}; + +static const struct samsung_pll_rate_table pll_shared3_rate_table[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 630000000U, 315, 13, 0), +}; + +static const struct samsung_pll_rate_table pll_shared4_rate_table[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 667333333U, 154, 6, 0), +}; + +static const struct samsung_pll_clock top_pll_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + PLL(pll_1051x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0, + pll_shared0_rate_table), + PLL(pll_1051x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1, + pll_shared1_rate_table), + PLL(pll_1052x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED2, PLL_CON0_PLL_SHARED2, + pll_shared2_rate_table), + PLL(pll_1052x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED3, PLL_CON0_PLL_SHARED3, + pll_shared3_rate_table), + PLL(pll_1052x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED4, PLL_CON0_PLL_SHARED4, + pll_shared4_rate_table), +}; + +/* List of parent clocks for Muxes in CMU_TOP */ +PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" }; +PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" }; +PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" }; +PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" }; +PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" }; +PNAME(mout_cp2ap_mif_clk_user_p) = { "oscclk" }; +PNAME(mout_cmu_abox_cpuabox_p) = { "dout_cmu_shared0_div2", + "dout_cmu_shared1_div2", + "fout_shared2_pll", + "fout_shared4_pll" }; +PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_bus1_bus_p) = { "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_busc_bus_p) = { "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_cp2ap_mif_clk_div2", + "oscclk", "oscclk", "oscclk" }; +PNAME(mout_cmu_busc_busphsi2c_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_cp2ap_mif_clk_div2", + "oscclk" }; +PNAME(mout_cmu_cam_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_cam_tpu0_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_cam_tpu1_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_cam_vra_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_cis_clk0_p) = { "oscclk", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_cis_clk1_p) = { "oscclk", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_cis_clk2_p) = { "oscclk", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_cis_clk3_p) = { "oscclk", + "dout_cmu_shared2_div2" }; +PNAME(mout_core_bus_p) = { "dout_cmu_shared0_div2", + "dout_cmu_shared1_div2", + "fout_shared2_pll", + "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_cp2ap_mif_clk_div2" }; +PNAME(mout_cmu_cpucl0_switch_p) = { "dout_cmu_shared0_div2", + "dout_cmu_shared1_div2", + "fout_shared2_pll", + "fout_shared4_pll" }; +PNAME(mout_cmu_cpucl1_switch_p) = { "dout_cmu_shared0_div2", + "dout_cmu_shared1_div2", + "fout_shared2_pll", + "fout_shared4_pll" }; +PNAME(mout_cmu_dbg_bus_p) = { "fout_shared2_pll", + "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4" }; +PNAME(mout_cmu_dcam_bus_p) = { "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_dcam_imgd_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_shared0_div2", + "fout_shared3_pll", + "fout_shared4_pll", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "oscclk", "oscclk", "oscclk" }; +PNAME(mout_cmu_droopdetector_p) = { "oscclk", "dout_cmu_shared0_div2", + "dout_cmu_shared1_div2", + "fout_shared2_pll" }; +PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_fsys0_bus_p) = { "dout_cmu_shared1_div2", + "fout_shared2_pll", + "fout_shared4_pll", + "dout_cmu_shared0_div4" }; +PNAME(mout_fsys0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_fsys0_mmc_embd_p) = { "oscclk", "fout_shared2_pll", + "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "oscclk", "oscclk", "oscclk" }; +PNAME(mout_fsys0_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_fsys0_usbdrd30_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_fsys1_bus_p) = { "fout_shared2_pll", + "dout_cmu_shared0_div4" }; +PNAME(mout_cmu_fsys1_mmc_card_p) = { "oscclk", "fout_shared2_pll", + "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "oscclk", "oscclk", "oscclk" }; +PNAME(mout_cmu_fsys1_pcie_p) = { "oscclk", "fout_shared2_pll" }; +PNAME(mout_cmu_fsys1_ufs_card_p) = { "oscclk", + "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_g2d_g2d_p) = { "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_g2d_jpeg_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_hpm_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_imem_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_isphq_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_isplp_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_iva_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_mfc_bus_p) = { "fout_shared4_pll", + "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll", + "fout_shared1_pll", + "dout_cmu_shared0_div2", + "dout_cmu_shared1_div2", + "fout_shared2_pll", + "mout_cp2ap_mif_clk_user", + "oscclk", "oscclk" }; +PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric0_usi00_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric0_usi01_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric0_usi02_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric0_usi03_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_peric1_speedy2_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "oscclk" }; +PNAME(mout_cmu_peric1_spi_cam0_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_spi_cam1_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_uart_bt_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi04_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi05_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi06_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi07_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi08_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi09_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi10_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi11_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi12_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peric1_usi13_p) = { "oscclk", "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared2_div2" }; +PNAME(mout_cmu_srdz_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_srdz_imgd_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; +PNAME(mout_cmu_vpu_bus_p) = { "dout_cmu_shared0_div4", + "dout_cmu_shared1_div4", + "dout_cmu_shared2_div2", + "dout_cmu_shared4_div2" }; + +/* + * Register name to clock name mangling strategy used in this file + * + * Replace PLL_CON0_PLL with CLK_MOUT_PLL and mout_pll + * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu + * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu + * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu + * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu + * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu + * + * For gates remove _UID _BLK _IPCLKPORT and _RSTNSYNC + */ + +static const struct samsung_mux_clock top_mux_clks[] __initconst = { + MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p, + PLL_CON0_PLL_SHARED0, 4, 1), + MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p, + PLL_CON0_PLL_SHARED1, 4, 1), + MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p, + PLL_CON0_PLL_SHARED2, 4, 1), + MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p, + PLL_CON0_PLL_SHARED3, 4, 1), + MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p, + PLL_CON0_PLL_SHARED4, 4, 1), + MUX(CLK_MOUT_CP2AP_MIF_CLK_USER, "mout_cp2ap_mif_clk_user", + mout_cp2ap_mif_clk_user_p, PLL_CON0_MUX_CP2AP_MIF_CLK_USER, 4, 1), + MUX(CLK_MOUT_CMU_ABOX_CPUABOX, "mout_cmu_abox_cpuabox", + mout_cmu_abox_cpuabox_p, CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX, + 0, 2), + MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus", mout_cmu_apm_bus_p, + CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1), + MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus", mout_cmu_bus1_bus_p, + CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2), + MUX(CLK_MOUT_CMU_BUSC_BUS, "mout_cmu_busc_bus", mout_cmu_busc_bus_p, + CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, 0, 3), + MUX(CLK_MOUT_CMU_BUSC_BUSPHSI2C, "mout_cmu_busc_busphsi2c", + mout_cmu_busc_busphsi2c_p, CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C, + 0, 2), + MUX(CLK_MOUT_CMU_CAM_BUS, "mout_cmu_cam_bus", mout_cmu_cam_bus_p, + CLK_CON_MUX_MUX_CLKCMU_CAM_BUS, 0, 2), + MUX(CLK_MOUT_CMU_CAM_TPU0, "mout_cmu_cam_tpu0", mout_cmu_cam_tpu0_p, + CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0, 0, 2), + MUX(CLK_MOUT_CMU_CAM_TPU1, "mout_cmu_cam_tpu1", mout_cmu_cam_tpu1_p, + CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1, 0, 2), + MUX(CLK_MOUT_CMU_CAM_VRA, "mout_cmu_cam_vra", mout_cmu_cam_vra_p, + CLK_CON_MUX_MUX_CLKCMU_CAM_VRA, 0, 2), + MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0", mout_cmu_cis_clk0_p, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1), + MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1", mout_cmu_cis_clk1_p, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1), + MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2", mout_cmu_cis_clk2_p, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1), + MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3", mout_cmu_cis_clk3_p, + CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1), + MUX(CLK_MOUT_CMU_CORE_BUS, "mout_core_bus", mout_core_bus_p, + CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3), + MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch", + mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + 0, 2), + MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch", + mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, + 0, 2), + MUX(CLK_MOUT_CMU_DBG_BUS, "mout_cmu_dbg_bus", mout_cmu_dbg_bus_p, + CLK_CON_MUX_MUX_CLKCMU_DBG_BUS, 0, 2), + MUX(CLK_MOUT_CMU_DCAM_BUS, "mout_cmu_dcam_bus", mout_cmu_dcam_bus_p, + CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS, 0, 2), + MUX(CLK_MOUT_CMU_DCAM_IMGD, "mout_cmu_dcam_imgd", mout_cmu_dcam_imgd_p, + CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD, 0, 2), + MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus", mout_cmu_dpu_bus_p, + CLK_CON_MUX_MUX_CLKCMU_DPU_BUS, 0, 3), + MUX(CLK_MOUT_CMU_DROOPDETECTOR, "mout_cmu_droopdetector", + mout_cmu_droopdetector_p, CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR, + 0, 2), + MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus", mout_cmu_dsp_bus_p, + CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2), + MUX(CLK_MOUT_CMU_FSYS0_BUS, "mout_fsys0_bus", mout_fsys0_bus_p, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, 0, 2), + MUX(CLK_MOUT_CMU_FSYS0_DPGTC, "mout_fsys0_dpgtc", mout_fsys0_dpgtc_p, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC, 0, 2), + MUX(CLK_MOUT_CMU_FSYS0_MMC_EMBD, "mout_fsys0_mmc_embd", + mout_fsys0_mmc_embd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD, + 0, 3), + MUX(CLK_MOUT_CMU_FSYS0_UFS_EMBD, "mout_fsys0_ufs_embd", + mout_fsys0_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD, + 0, 2), + MUX(CLK_MOUT_CMU_FSYS0_USBDRD30, "mout_fsys0_usbdrd30", + mout_fsys0_usbdrd30_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30, + 0, 2), + MUX(CLK_MOUT_CMU_FSYS1_BUS, "mout_cmu_fsys1_bus", mout_cmu_fsys1_bus_p, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, 0, 1), + MUX(CLK_MOUT_CMU_FSYS1_MMC_CARD, "mout_cmu_fsys1_mmc_card", + mout_cmu_fsys1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD, + 0, 3), + MUX(CLK_MOUT_CMU_FSYS1_PCIE, "mout_cmu_fsys1_pcie", + mout_cmu_fsys1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE, 0, 1), + MUX(CLK_MOUT_CMU_FSYS1_UFS_CARD, "mout_cmu_fsys1_ufs_card", + mout_cmu_fsys1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD, + 0, 2), + MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d", mout_cmu_g2d_g2d_p, + CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2), + MUX(CLK_MOUT_CMU_G2D_JPEG, "mout_cmu_g2d_jpeg", mout_cmu_g2d_jpeg_p, + CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG, 0, 2), + MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm", mout_cmu_hpm_p, + CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2), + MUX(CLK_MOUT_CMU_IMEM_BUS, "mout_cmu_imem_bus", mout_cmu_imem_bus_p, + CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS, 0, 2), + MUX(CLK_MOUT_CMU_ISPHQ_BUS, "mout_cmu_isphq_bus", mout_cmu_isphq_bus_p, + CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS, 0, 2), + MUX(CLK_MOUT_CMU_ISPLP_BUS, "mout_cmu_isplp_bus", mout_cmu_isplp_bus_p, + CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS, 0, 2), + MUX(CLK_MOUT_CMU_IVA_BUS, "mout_cmu_iva_bus", mout_cmu_iva_bus_p, + CLK_CON_MUX_MUX_CLKCMU_IVA_BUS, 0, 2), + MUX(CLK_MOUT_CMU_MFC_BUS, "mout_cmu_mfc_bus", mout_cmu_mfc_bus_p, + CLK_CON_MUX_MUX_CLKCMU_MFC_BUS, 0, 2), + MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch", + mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3), + MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus", + mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1), + MUX(CLK_MOUT_CMU_PERIC0_UART_DBG, "mout_cmu_peric0_uart_dbg", + mout_cmu_peric0_uart_dbg_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC0_USI00, "mout_cmu_peric0_usi00", + mout_cmu_peric0_usi00_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC0_USI01, "mout_cmu_peric0_usi01", + mout_cmu_peric0_usi01_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC0_USI02, "mout_cmu_peric0_usi02", + mout_cmu_peric0_usi02_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC0_USI03, "mout_cmu_peric0_usi03", + mout_cmu_peric0_usi03_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus", + mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1), + MUX(CLK_MOUT_CMU_PERIC1_SPEEDY2, "mout_cmu_peric1_speedy2", + mout_cmu_peric1_speedy2_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_SPI_CAM0, "mout_cmu_peric1_spi_cam0", + mout_cmu_peric1_spi_cam0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_SPI_CAM1, "mout_cmu_peric1_spi_cam1", + mout_cmu_peric1_spi_cam1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_UART_BT, "mout_cmu_peric1_uart_bt", + mout_cmu_peric1_uart_bt_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI04, "mout_cmu_peric1_usi04", + mout_cmu_peric1_usi04_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI05, "mout_cmu_peric1_usi05", + mout_cmu_peric1_usi05_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI06, "mout_cmu_peric1_usi06", + mout_cmu_peric1_usi06_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI07, "mout_cmu_peric1_usi07", + mout_cmu_peric1_usi07_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI08, "mout_cmu_peric1_usi08", + mout_cmu_peric1_usi08_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI09, "mout_cmu_peric1_usi09", + mout_cmu_peric1_usi09_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI10, "mout_cmu_peric1_usi10", + mout_cmu_peric1_usi10_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI11, "mout_cmu_peric1_usi11", + mout_cmu_peric1_usi11_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI12, "mout_cmu_peric1_usi12", + mout_cmu_peric1_usi12_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12, + 0, 2), + MUX(CLK_MOUT_CMU_PERIC1_USI13, "mout_cmu_peric1_usi13", + mout_cmu_peric1_usi13_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13, + 0, 2), + MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus", mout_cmu_peris_bus_p, + CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1), + MUX(CLK_MOUT_CMU_SRDZ_BUS, "mout_cmu_srdz_bus", mout_cmu_srdz_bus_p, + CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS, 0, 2), + MUX(CLK_MOUT_CMU_SRDZ_IMGD, "mout_cmu_srdz_imgd", mout_cmu_srdz_imgd_p, + CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD, 0, 2), + MUX(CLK_MOUT_CMU_VPU_BUS, "mout_cmu_vpu_bus", mout_cmu_vpu_bus_p, + CLK_CON_MUX_MUX_CLKCMU_VPU_BUS, 0, 2), +}; + +static const struct samsung_div_clock top_div_clks[] __initconst = { + DIV(CLK_DOUT_CMU_ABOX_CPUABOX, "dout_cmu_cmu_abox_cpuabox", + "gout_cmu_abox_cpuabox", CLK_CON_DIV_CLKCMU_ABOX_CPUABOX, 0, 3), + DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus", + CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3), + DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus", + CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4), + DIV(CLK_DOUT_CMU_BUSC_BUS, "dout_cmu_busc_bus", + "gout_cmu_clkcmu_busc_bus", CLK_CON_DIV_CLKCMU_BUSC_BUS, 0, 4), + DIV(CLK_DOUT_CMU_BUSC_BUSPHSI2C, "dout_cmu_busc_busphsi2c", + "gout_cmu_busc_busphsi2c", CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C, + 0, 4), + DIV(CLK_DOUT_CMU_CAM_BUS, "dout_cmu_cam_bus", "gout_cmu_cam_bus", + CLK_CON_DIV_CLKCMU_CAM_BUS, 0, 4), + DIV(CLK_DOUT_CMU_CAM_TPU0, "dout_cmu_cam_tpu0", "gout_cmu_cam_tpu0", + CLK_CON_DIV_CLKCMU_CAM_TPU0, 0, 4), + DIV(CLK_DOUT_CMU_CAM_TPU1, "dout_cmu_cam_tpu1", "gout_cmu_cam_tpu1", + CLK_CON_DIV_CLKCMU_CAM_TPU1, 0, 4), + DIV(CLK_DOUT_CMU_CAM_VRA, "dout_cmu_cam_vra", "gout_cmu_cam_vra", + CLK_CON_DIV_CLKCMU_CAM_VRA, 0, 4), + DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0", + CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5), + DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1", + CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5), + DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2", + CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5), + DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3", + CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5), + DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus", + CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4), + DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch", + "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3), + DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch", + "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3), + DIV(CLK_DOUT_CMU_DBG_BUS, "dout_cmu_dbg_bus", "gout_cmu_dbg_bus", + CLK_CON_DIV_CLKCMU_DBG_BUS, 0, 4), + DIV(CLK_DOUT_CMU_DCAM_BUS, "dout_cmu_dcam_bus", "gout_cmu_dcam_bus", + CLK_CON_DIV_CLKCMU_DCAM_BUS, 0, 4), + DIV(CLK_DOUT_CMU_DCAM_IMGD, "dout_cmu_dcam_imgd", "gout_cmu_dcam_imgd", + CLK_CON_DIV_CLKCMU_DCAM_IMGD, 0, 4), + DIV(CLK_DOUT_CMU_DPU_BUS, "dout_cmu_dpu_bus", "gout_cmu_dpu_bus", + CLK_CON_DIV_CLKCMU_DPU_BUS, 0, 4), + DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus", + CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4), + DIV(CLK_DOUT_CMU_FSYS0_BUS, "dout_cmu_fsys0_bus", "gout_cmu_fsys0_bus", + CLK_CON_DIV_CLKCMU_FSYS0_BUS, 0, 4), + DIV(CLK_DOUT_CMU_FSYS0_DPGTC, "dout_cmu_fsys0_dpgtc", + "gout_cmu_fsys0_dpgtc", CLK_CON_DIV_CLKCMU_FSYS0_DPGTC, 0, 3), + DIV(CLK_DOUT_CMU_FSYS0_MMC_EMBD, "dout_cmu_fsys0_mmc_embd", + "gout_cmu_fsys0_mmc_embd", CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD, + 0, 9), + DIV(CLK_DOUT_CMU_FSYS0_UFS_EMBD, "dout_cmu_fsys0_ufs_embd", + "gout_cmu_fsys0_ufs_embd", CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD, + 0, 3), + DIV(CLK_DOUT_CMU_FSYS0_USBDRD30, "dout_cmu_fsys0_usbdrd30", + "gout_cmu_fsys0_usbdrd30", CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30, + 0, 4), + DIV(CLK_DOUT_CMU_FSYS1_BUS, "dout_cmu_fsys1_bus", "gout_cmu_fsys1_bus", + CLK_CON_DIV_CLKCMU_FSYS1_BUS, 0, 4), + DIV(CLK_DOUT_CMU_FSYS1_MMC_CARD, "dout_cmu_fsys1_mmc_card", + "gout_cmu_fsys1_mmc_card", CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD, + 0, 9), + DIV(CLK_DOUT_CMU_FSYS1_UFS_CARD, "dout_cmu_fsys1_ufs_card", + "gout_cmu_fsys1_ufs_card", CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD, + 0, 4), + DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d", + CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4), + DIV(CLK_DOUT_CMU_G2D_JPEG, "dout_cmu_g2d_jpeg", "gout_cmu_g2d_jpeg", + CLK_CON_DIV_CLKCMU_G2D_JPEG, 0, 4), + DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch", + "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3), + DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm", + CLK_CON_DIV_CLKCMU_HPM, 0, 2), + DIV(CLK_DOUT_CMU_IMEM_BUS, "dout_cmu_imem_bus", "gout_cmu_imem_bus", + CLK_CON_DIV_CLKCMU_IMEM_BUS, 0, 4), + DIV(CLK_DOUT_CMU_ISPHQ_BUS, "dout_cmu_isphq_bus", "gout_cmu_isphq_bus", + CLK_CON_DIV_CLKCMU_ISPHQ_BUS, 0, 4), + DIV(CLK_DOUT_CMU_ISPLP_BUS, "dout_cmu_isplp_bus", "gout_cmu_isplp_bus", + CLK_CON_DIV_CLKCMU_ISPLP_BUS, 0, 4), + DIV(CLK_DOUT_CMU_IVA_BUS, "dout_cmu_iva_bus", "gout_cmu_iva_bus", + CLK_CON_DIV_CLKCMU_IVA_BUS, 0, 4), + DIV(CLK_DOUT_CMU_MFC_BUS, "dout_cmu_mfc_bus", "gout_cmu_mfc_bus", + CLK_CON_DIV_CLKCMU_MFC_BUS, 0, 4), + DIV(CLK_DOUT_CMU_MODEM_SHARED0, "dout_cmu_modem_shared0", + "gout_cmu_modem_shared0", CLK_CON_DIV_CLKCMU_MODEM_SHARED0, 0, 3), + DIV(CLK_DOUT_CMU_MODEM_SHARED1, "dout_cmu_modem_shared1", + "gout_cmu_modem_shared1", CLK_CON_DIV_CLKCMU_MODEM_SHARED1, 0, 3), + DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus", + "gout_cmu_peric0_bus", CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4), + DIV(CLK_DOUT_CMU_PERIC0_UART_DBG, "dout_cmu_peric0_uart_dbg", + "gout_cmu_peric0_uart_dbg", CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG, + 0, 4), + DIV(CLK_DOUT_CMU_PERIC0_USI00, "dout_cmu_peric0_usi00", + "gout_cmu_peric0_usi00", CLK_CON_DIV_CLKCMU_PERIC0_USI00, 0, 4), + DIV(CLK_DOUT_CMU_PERIC0_USI01, "dout_cmu_peric0_usi01", + "gout_cmu_peric0_usi01", CLK_CON_DIV_CLKCMU_PERIC0_USI01, 0, 4), + DIV(CLK_DOUT_CMU_PERIC0_USI02, "dout_cmu_peric0_usi02", + "gout_cmu_peric0_usi02", CLK_CON_DIV_CLKCMU_PERIC0_USI02, 0, 4), + DIV(CLK_DOUT_CMU_PERIC0_USI03, "dout_cmu_peric0_usi03", + "gout_cmu_peric0_usi03", CLK_CON_DIV_CLKCMU_PERIC0_USI03, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus", + "gout_cmu_peric1_bus", CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_SPEEDY2, "dout_cmu_peric1_speedy2", + "gout_cmu_peric1_speedy2", CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2, + 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_SPI_CAM0, "dout_cmu_peric1_spi_cam0", + "gout_cmu_peric1_spi_cam0", CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0, + 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_SPI_CAM1, "dout_cmu_peric1_spi_cam1", + "gout_cmu_peric1_spi_cam1", CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1, + 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_UART_BT, "dout_cmu_peric1_uart_bt", + "gout_cmu_peric1_uart_bt", CLK_CON_DIV_CLKCMU_PERIC1_UART_BT, + 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI04, "dout_cmu_peric1_usi04", + "gout_cmu_peric1_usi04", CLK_CON_DIV_CLKCMU_PERIC1_USI04, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI05, "dout_cmu_peric1_usi05", + "gout_cmu_peric1_usi05", CLK_CON_DIV_CLKCMU_PERIC1_USI05, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI06, "dout_cmu_peric1_usi06", + "gout_cmu_peric1_usi06", CLK_CON_DIV_CLKCMU_PERIC1_USI06, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI07, "dout_cmu_peric1_usi07", + "gout_cmu_peric1_usi07", CLK_CON_DIV_CLKCMU_PERIC1_USI07, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI08, "dout_cmu_peric1_usi08", + "gout_cmu_peric1_usi08", CLK_CON_DIV_CLKCMU_PERIC1_USI08, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI09, "dout_cmu_peric1_usi09", + "gout_cmu_peric1_usi09", CLK_CON_DIV_CLKCMU_PERIC1_USI09, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI10, "dout_cmu_peric1_usi10", + "gout_cmu_peric1_usi10", CLK_CON_DIV_CLKCMU_PERIC1_USI10, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI11, "dout_cmu_peric1_usi11", + "gout_cmu_peric1_usi11", CLK_CON_DIV_CLKCMU_PERIC1_USI11, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI12, "dout_cmu_peric1_usi12", + "gout_cmu_peric1_usi12", CLK_CON_DIV_CLKCMU_PERIC1_USI12, 0, 4), + DIV(CLK_DOUT_CMU_PERIC1_USI13, "dout_cmu_peric1_usi13", + "gout_cmu_peric1_usi13", CLK_CON_DIV_CLKCMU_PERIC1_USI13, 0, 4), + DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus", + CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4), + DIV(CLK_DOUT_CMU_SRDZ_BUS, "dout_cmu_srdz_bus", "gout_cmu_srdz_bus", + CLK_CON_DIV_CLKCMU_SRDZ_BUS, 0, 4), + DIV(CLK_DOUT_CMU_SRDZ_IMGD, "dout_cmu_srdz_imgd", "gout_cmu_srdz_imgd", + CLK_CON_DIV_CLKCMU_SRDZ_IMGD, 0, 4), + DIV(CLK_DOUT_CMU_VPU_BUS, "dout_cmu_vpu_bus", "gout_cmu_vpu_bus", + CLK_CON_DIV_CLKCMU_VPU_BUS, 0, 4), +}; + +static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = { + FFACTOR(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2", + "mout_pll_shared0", 1, 2, 0), + FFACTOR(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4", + "mout_pll_shared0", 1, 4, 0), + FFACTOR(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2", + "mout_pll_shared1", 1, 2, 0), + FFACTOR(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4", + "mout_pll_shared1", 1, 4, 0), + FFACTOR(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2", + "mout_pll_shared2", 1, 2, 0), + FFACTOR(CLK_DOUT_CMU_SHARED3_DIV2, "dout_cmu_shared3_div2", + "mout_pll_shared3", 1, 2, 0), + FFACTOR(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2", + "mout_pll_shared4", 1, 2, 0), + FFACTOR(CLK_DOUT_CMU_FSYS1_PCIE, "dout_cmu_fsys1_pcie", + "gout_cmu_fsys1_pcie", 1, 8, 0), + FFACTOR(CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2, "dout_cmu_cp2ap_mif_clk_div2", + "mout_cp2ap_mif_clk_user", 1, 2, 0), + FFACTOR(CLK_DOUT_CMU_CMU_OTP, "dout_cmu_cmu_otp", "oscclk", 1, 8, 0), +}; + +static const struct samsung_gate_clock top_gate_clks[] __initconst = { + GATE(CLK_GOUT_CMU_DROOPDETECTOR, "gout_droopdetector", + "mout_cmu_droopdetector", CLK_CON_GAT_CLKCMU_DROOPDETECTOR, + 21, 0, 0), + GATE(CLK_GOUT_CMU_MIF_SWITCH, "gout_cmu_mif_switch", + "mout_cmu_mif_switch", CLK_CON_GAT_CLKCMU_MIF_SWITCH, 21, 0, 0), + GATE(CLK_GOUT_CMU_ABOX_CPUABOX, "gout_cmu_abox_cpuabox", + "mout_cmu_abox_cpuabox", CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX, + 21, 0, 0), + GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus", + CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus", + CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_BUSC_BUS, "gout_cmu_busc_bus", "mout_cmu_busc_bus", + CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_BUSC_BUSPHSI2C, "gout_cmu_busc_busphsi2c", + "mout_cmu_busc_busphsi2c", CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_CAM_BUS, "gout_cmu_cam_bus", "mout_cmu_cam_bus", + CLK_CON_GAT_GATE_CLKCMU_CAM_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_CAM_TPU0, "gout_cmu_cam_tpu0", "mout_cmu_cam_tpu0", + CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0, 21, 0, 0), + GATE(CLK_GOUT_CMU_CAM_TPU1, "gout_cmu_cam_tpu1", "mout_cmu_cam_tpu1", + CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1, 21, 0, 0), + GATE(CLK_GOUT_CMU_CAM_VRA, "gout_cmu_cam_vra", "mout_cmu_cam_vra", + CLK_CON_GAT_GATE_CLKCMU_CAM_VRA, 21, 0, 0), + GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0", + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0), + GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1", + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0), + GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2", + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0), + GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3", + CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0), + GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_core_bus", + CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch", + "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch", + "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_DBG_BUS, "gout_cmu_dbg_bus", "mout_cmu_dbg_bus", + CLK_CON_GAT_GATE_CLKCMU_DBG_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_DCAM_BUS, "gout_cmu_dcam_bus", "mout_cmu_dcam_bus", + CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_DCAM_IMGD, "gout_cmu_dcam_imgd", + "mout_cmu_dcam_imgd", CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD, + 21, 0, 0), + GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_bus", + CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus", + CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CMU_FSYS0_BUS, "gout_cmu_fsys0_bus", "mout_fsys0_bus", + CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS0_DPGTC, "gout_cmu_fsys0_dpgtc", + "mout_fsys0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS0_MMC_EMBD, "gout_cmu_fsys0_mmc_embd", + "mout_fsys0_mmc_embd", CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS0_UFS_EMBD, "gout_cmu_fsys0_ufs_embd", + "mout_fsys0_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS0_USBDRD30, "gout_cmu_fsys0_usbdrd30", + "mout_fsys0_usbdrd30", CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS1_BUS, "gout_cmu_fsys1_bus", + "mout_cmu_fsys1_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS1_MMC_CARD, "gout_cmu_fsys1_mmc_card", + "mout_cmu_fsys1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS1_PCIE, "gout_cmu_fsys1_pcie", + "mout_cmu_fsys1_pcie", CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE, + 21, 0, 0), + GATE(CLK_GOUT_CMU_FSYS1_UFS_CARD, "gout_cmu_fsys1_ufs_card", + "mout_cmu_fsys1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD, + 21, 0, 0), + GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d", + CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0), + GATE(CLK_GOUT_CMU_G2D_JPEG, "gout_cmu_g2d_jpeg", "mout_cmu_g2d_jpeg", + CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG, 21, 0, 0), + GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch", + "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, 21, 0, 0), + GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm", + CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0), + GATE(CLK_GOUT_CMU_IMEM_BUS, "gout_cmu_imem_bus", "mout_cmu_imem_bus", + CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_ISPHQ_BUS, "gout_cmu_isphq_bus", + "mout_cmu_isphq_bus", CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS, + 21, 0, 0), + GATE(CLK_GOUT_CMU_ISPLP_BUS, "gout_cmu_isplp_bus", + "mout_cmu_isplp_bus", CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS, + 21, 0, 0), + GATE(CLK_GOUT_CMU_IVA_BUS, "gout_cmu_iva_bus", "mout_cmu_iva_bus", + CLK_CON_GAT_GATE_CLKCMU_IVA_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_MFC_BUS, "gout_cmu_mfc_bus", "mout_cmu_mfc_bus", + CLK_CON_GAT_GATE_CLKCMU_MFC_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_MODEM_SHARED0, "gout_cmu_modem_shared0", + "dout_cmu_shared0_div2", CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0, + 21, 0, 0), + GATE(CLK_GOUT_CMU_MODEM_SHARED1, "gout_cmu_modem_shared1", + "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus", + "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC0_UART_DBG, "gout_cmu_peric0_uart_dbg", + "mout_cmu_peric0_uart_dbg", + CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG, 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC0_USI00, "gout_cmu_peric0_usi00", + "mout_cmu_peric0_usi00", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC0_USI01, "gout_cmu_peric0_usi01", + "mout_cmu_peric0_usi01", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC0_USI02, "gout_cmu_peric0_usi02", + "mout_cmu_peric0_usi02", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC0_USI03, "gout_cmu_peric0_usi03", + "mout_cmu_peric0_usi03", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus", + "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_SPEEDY2, "gout_cmu_peric1_speedy2", + "mout_cmu_peric1_speedy2", CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_SPI_CAM0, "gout_cmu_peric1_spi_cam0", + "mout_cmu_peric1_spi_cam0", + CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0, 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_SPI_CAM1, "gout_cmu_peric1_spi_cam1", + "mout_cmu_peric1_spi_cam1", + CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1, 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_UART_BT, "gout_cmu_peric1_uart_bt", + "mout_cmu_peric1_uart_bt", CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI04, "gout_cmu_peric1_usi04", + "mout_cmu_peric1_usi04", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI05, "gout_cmu_peric1_usi05", + "mout_cmu_peric1_usi05", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI06, "gout_cmu_peric1_usi06", + "mout_cmu_peric1_usi06", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI07, "gout_cmu_peric1_usi07", + "mout_cmu_peric1_usi07", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI08, "gout_cmu_peric1_usi08", + "mout_cmu_peric1_usi08", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI09, "gout_cmu_peric1_usi09", + "mout_cmu_peric1_usi09", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI10, "gout_cmu_peric1_usi10", + "mout_cmu_peric1_usi10", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI11, "gout_cmu_peric1_usi11", + "mout_cmu_peric1_usi11", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI12, "gout_cmu_peric1_usi12", + "mout_cmu_peric1_usi12", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIC1_USI13, "gout_cmu_peric1_usi13", + "mout_cmu_peric1_usi13", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13, + 21, 0, 0), + GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus", + "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS, + 21, 0, 0), + GATE(CLK_GOUT_CMU_SRDZ_BUS, "gout_cmu_srdz_bus", "mout_cmu_srdz_bus", + CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS, 21, 0, 0), + GATE(CLK_GOUT_CMU_SRDZ_IMGD, "gout_cmu_srdz_imgd", + "mout_cmu_srdz_imgd", CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD, + 21, 0, 0), + GATE(CLK_GOUT_CMU_VPU_BUS, "gout_cmu_vpu_bus", "mout_cmu_vpu_bus", + CLK_CON_GAT_GATE_CLKCMU_VPU_BUS, 21, 0, 0), +}; + +static const struct samsung_cmu_info top_cmu_info __initconst = { + .pll_clks = top_pll_clks, + .nr_pll_clks = ARRAY_SIZE(top_pll_clks), + .mux_clks = top_mux_clks, + .nr_mux_clks = ARRAY_SIZE(top_mux_clks), + .div_clks = top_div_clks, + .nr_div_clks = ARRAY_SIZE(top_div_clks), + .fixed_factor_clks = top_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks), + .gate_clks = top_gate_clks, + .nr_gate_clks = ARRAY_SIZE(top_gate_clks), + .nr_clk_ids = CLKS_NR_TOP, + .clk_regs = top_clk_regs, + .nr_clk_regs = ARRAY_SIZE(top_clk_regs), +}; + +static void __init exynos8895_cmu_top_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &top_cmu_info); +} + +/* Register CMU_TOP early, as it's a dependency for other early domains */ +CLK_OF_DECLARE(exynos8895_cmu_top, "samsung,exynos8895-cmu-top", + exynos8895_cmu_top_init); + +/* ---- CMU_PERIS ----------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIS (0x10010000) */ +#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0100 +#define PLL_CON2_MUX_CLKCMU_PERIS_BUS_USER 0x0108 +#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000 +#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x2000 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM 0x2010 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS 0x2014 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK 0x2018 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK 0x2020 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK 0x2024 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK 0x2028 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK 0x2038 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK 0x203c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2040 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x2044 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK 0x2048 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK 0x204c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK 0x2050 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK 0x2054 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK 0x205c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK 0x2060 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK 0x2064 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK 0x2068 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK 0x206c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK 0x2070 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK 0x2074 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK 0x2078 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK 0x207c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK 0x2080 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK 0x2084 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x2088 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK 0x208c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK 0x2090 + +static const unsigned long peris_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, + PLL_CON2_MUX_CLKCMU_PERIS_BUS_USER, + CLK_CON_MUX_MUX_CLK_PERIS_GIC, + CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK, +}; + +/* List of parent clocks for Muxes in CMU_PERIS */ +PNAME(mout_peris_bus_user_p) = { "oscclk", "dout_cmu_peris_bus" }; +PNAME(mout_peris_gic_p) = { "oscclk", "mout_peris_bus_user" }; + +static const struct samsung_mux_clock peris_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user", + mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, 4, 1), + MUX(CLK_MOUT_PERIS_GIC, "mout_peris_gic", + mout_peris_gic_p, CLK_CON_MUX_MUX_CLK_PERIS_GIC, 0, 5), +}; + +static const struct samsung_gate_clock peris_gate_clks[] __initconst = { + GATE(CLK_GOUT_PERIS_CMU_PERIS_PCLK, "gout_peris_cmu_peris_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM, + "gout_peris_ad_axi_p_peris_aclkm", "mout_peris_gic", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS, + "gout_peris_ad_axi_p_peris_aclks", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK, + "gout_peris_axi2apb_perisp0_aclk", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK, + "gout_peris_axi2apb_perisp1_aclk", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_BUSIF_TMU_PCLK, "gout_peris_busif_tmu_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK, + 21, 0, 0), + /* GIC (interrupt controller) clock must be always running */ + GATE(CLK_GOUT_PERIS_GIC_CLK, "gout_peris_gic_clk", + "mout_peris_gic", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK, + "gout_peris_lhm_axi_p_peris_i_clk", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIS_MCT_PCLK, "gout_peris_mct_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK, "gout_peris_otp_con_bira_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_OTP_CON_TOP_PCLK, "gout_peris_otp_con_top_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_PMU_PERIS_PCLK, "gout_peris_pmu_peris_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK, + "gout_peris_rstnsync_clk_peris_busp_clk", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK, + "gout_peris_rstnsync_clk_peris_gic_clk", "mout_peris_gic", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_SYSREG_PERIS_PCLK, "gout_peris_sysreg_peris_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC00_PCLK, "gout_peris_tzpc00_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC01_PCLK, "gout_peris_tzpc01_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC02_PCLK, "gout_peris_tzpc02_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC03_PCLK, "gout_peris_tzpc03_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC04_PCLK, "gout_peris_tzpc04_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC05_PCLK, "gout_peris_tzpc05_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC06_PCLK, "gout_peris_tzpc06_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC07_PCLK, "gout_peris_tzpc07_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC08_PCLK, "gout_peris_tzpc08_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC09_PCLK, "gout_peris_tzpc09_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC10_PCLK, "gout_peris_tzpc10_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC11_PCLK, "gout_peris_tzpc11_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC12_PCLK, "gout_peris_tzpc12_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC13_PCLK, "gout_peris_tzpc13_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC14_PCLK, "gout_peris_tzpc14_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_TZPC15_PCLK, "gout_peris_tzpc15_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK, "gout_peris_wdt_cluster0_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK, "gout_peris_wdt_cluster1_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIS_XIU_P_PERIS_ACLK, "gout_peris_xiu_p_peris_aclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info peris_cmu_info __initconst = { + .mux_clks = peris_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peris_mux_clks), + .gate_clks = peris_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peris_gate_clks), + .nr_clk_ids = CLKS_NR_PERIS, + .clk_regs = peris_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peris_clk_regs), + .clk_name = "bus", +}; + +static void __init exynos8895_cmu_peris_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &peris_cmu_info); +} + +/* Register CMU_PERIS early, as it's needed for MCT timer */ +CLK_OF_DECLARE(exynos8895_cmu_peris, "samsung,exynos8895-cmu-peris", + exynos8895_cmu_peris_init); + +/* ---- CMU_FSYS0 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_FSYS0 (0x11000000) */ +#define PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER 0x0100 +#define PLL_CON2_MUX_CLKCMU_FSYS0_BUS_USER 0x0108 +#define PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER 0x0120 +#define PLL_CON2_MUX_CLKCMU_FSYS0_DPGTC_USER 0x0128 +#define PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER 0x0140 +#define PLL_CON2_MUX_CLKCMU_FSYS0_MMC_EMBD_USER 0x0148 +#define PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER 0x0160 +#define PLL_CON2_MUX_CLKCMU_FSYS0_UFS_EMBD_USER 0x0168 +#define PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER 0x0180 +#define PLL_CON2_MUX_CLKCMU_FSYS0_USBDRD30_USER 0x0188 +#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x2000 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK 0x2010 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK 0x2014 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK 0x2018 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK 0x2020 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK 0x2024 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK 0x2038 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK 0x203c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK 0x2040 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK 0x2044 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK 0x2048 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK 0x204c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK 0x2050 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN 0x2054 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK 0x205c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK 0x2060 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK 0x2064 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK 0x2068 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x206c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x2070 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x2074 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK 0x2078 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK 0x207c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK 0x2080 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK 0x2084 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK 0x2088 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK 0x208c +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK 0x2090 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK 0x2094 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK 0x2098 +#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK 0x209c + +static const unsigned long fsys0_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER, + PLL_CON2_MUX_CLKCMU_FSYS0_BUS_USER, + PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER, + PLL_CON2_MUX_CLKCMU_FSYS0_DPGTC_USER, + PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER, + PLL_CON2_MUX_CLKCMU_FSYS0_MMC_EMBD_USER, + PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER, + PLL_CON2_MUX_CLKCMU_FSYS0_UFS_EMBD_USER, + PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER, + PLL_CON2_MUX_CLKCMU_FSYS0_USBDRD30_USER, + CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK, +}; + +/* List of parent clocks for Muxes in CMU_FSYS0 */ +PNAME(mout_fsys0_bus_user_p) = { "oscclk", "dout_cmu_fsys0_bus" }; +PNAME(mout_fsys0_dpgtc_user_p) = { "oscclk", "dout_cmu_fsys0_dpgtc" }; +PNAME(mout_fsys0_mmc_embd_user_p) = { "oscclk", + "dout_cmu_fsys0_mmc_embd" }; +PNAME(mout_fsys0_ufs_embd_user_p) = { "oscclk", + "dout_cmu_fsys0_ufs_embd" }; +PNAME(mout_fsys0_usbdrd30_user_p) = { "oscclk", + "dout_cmu_fsys0_usbdrd30" }; + +static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = { + MUX(CLK_MOUT_FSYS0_BUS_USER, "mout_fsys0_bus_user", + mout_fsys0_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER, 4, 1), + MUX(CLK_MOUT_FSYS0_DPGTC_USER, "mout_fsys0_dpgtc_user", + mout_fsys0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER, + 4, 1), + MUX_F(CLK_MOUT_FSYS0_MMC_EMBD_USER, "mout_fsys0_mmc_embd_user", + mout_fsys0_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER, + 4, 1, CLK_SET_RATE_PARENT, 0), + MUX(CLK_MOUT_FSYS0_UFS_EMBD_USER, "mout_fsys0_ufs_embd_user", + mout_fsys0_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER, + 4, 1), + MUX(CLK_MOUT_FSYS0_USBDRD30_USER, "mout_fsys0_usbdrd30_user", + mout_fsys0_usbdrd30_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER, + 4, 1), +}; + +static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = { + /* Disabling this clock makes the system hang. Mark the clock as critical. */ + GATE(CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK, + "gout_fsys0_fsys0_cmu_fsys0_pclk", "mout_fsys0_bus_user", + CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK, "gout_fsys0_ahbbr_fsys0_hclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK, + "gout_fsys0_axi2ahb_fsys0_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK, + "gout_fsys0_axi2ahb_usb_fsys0_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK, + "gout_fsys0_axi2apb_fsys0_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK, "gout_fsys0_btm_fsys0_i_aclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK, "gout_fsys0_btm_fsys0_i_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK, + "gout_fsys0_dp_link_i_gtc_ext_clk", "mout_fsys0_dpgtc_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_DP_LINK_I_PCLK, "gout_fsys0_dp_link_i_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_ETR_MIU_I_ACLK, "gout_fsys0_etr_miu_i_aclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS0_ETR_MIU_I_PCLK, "gout_fsys0_etr_miu_i_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK, "gout_fsys0_gpio_fsys0_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK, + "gout_fsys0_lhm_axi_d_usbtv_i_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK, + "gout_fsys0_lhm_axi_g_etr_i_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK, + "gout_fsys0_lhm_axi_p_fsys0_i_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK, + "gout_fsys0_lhs_acel_d_fsys0_i_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK, "gout_fsys0_mmc_embd_i_aclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN, "gout_fsys0_mmc_embd_sdclkin", + "mout_fsys0_mmc_embd_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN, + 21, CLK_SET_RATE_PARENT, 0), + GATE(CLK_GOUT_FSYS0_PMU_FSYS0_PCLK, "gout_fsys0_pmu_fsys0_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS0_BCM_FSYS0_ACLK, "gout_fsys0_bcm_fsys0_aclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS0_BCM_FSYS0_PCLK, "gout_fsys0_bcm_fsys0_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK, + "gout_fsys0_rstnsync_clk_fsys0_bus_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK, "gout_fsys0_sysreg_fsys0_pclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK, "gout_fsys0_ufs_embd_i_aclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO, + "gout_fsys0_ufs_embd_i_clk_unipro", "mout_fsys0_ufs_embd_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK, + "gout_fsys0_ufs_embd_i_fmp_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK, + "gout_fsys0_usbtv_i_usb30drd_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK, + "gout_fsys0_usbtv_i_usb30drd_ref_clk", "mout_fsys0_usbdrd30_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK, + "gout_fsys0_usbtv_i_usb30drd_suspend_clk", + "mout_fsys0_usbdrd30_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK, + "gout_fsys0_usbtv_i_usbtvh_ahb_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK, + "gout_fsys0_usbtv_i_usbtvh_core_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK, + "gout_fsys0_usbtv_i_usbtvh_xiu_clk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK, + "gout_fsys0_us_d_fsys0_usb_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK, "gout_fsys0_xiu_d_fsys0_aclk", + "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK, + "gout_fsys0_xiu_d_fsys0_usb_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK, + "gout_fsys0_xiu_p_fsys0_aclk", "mout_fsys0_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info fsys0_cmu_info __initconst = { + .mux_clks = fsys0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks), + .gate_clks = fsys0_gate_clks, + .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks), + .nr_clk_ids = CLKS_NR_FSYS0, + .clk_regs = fsys0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs), + .clk_name = "bus", +}; + +/* ---- CMU_FSYS1 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_FSYS1 (0x11400000) */ +#define PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER 0x0100 +#define PLL_CON2_MUX_CLKCMU_FSYS1_BUS_USER 0x0108 +#define PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0120 +#define PLL_CON2_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0128 +#define PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER 0x0140 +#define PLL_CON2_MUX_CLKCMU_FSYS1_PCIE_USER 0x0148 +#define PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER 0x0160 +#define PLL_CON2_MUX_CLKCMU_FSYS1_UFS_CARD_USER 0x0168 +#define CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN 0x2000 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM 0x2004 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK 0x2008 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK 0x200c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK 0x2010 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK 0x2014 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK 0x2018 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2024 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK 0x2028 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2038 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0 0x203c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1 0x2040 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 0x2044 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0 0x2048 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1 0x204c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2050 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 0x2054 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 0x2058 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0 0x205c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1 0x2060 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK 0x2068 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK 0x206c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK 0x2070 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK 0x2074 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK 0x207c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK 0x2080 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK 0x2084 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK 0x2088 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK 0x2090 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK 0x2094 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK 0x2098 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK 0x209c +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO 0x20a0 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK 0x20a4 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK 0x20a8 +#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK 0x20ac + +static const unsigned long fsys1_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER, + PLL_CON2_MUX_CLKCMU_FSYS1_BUS_USER, + PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER, + PLL_CON2_MUX_CLKCMU_FSYS1_MMC_CARD_USER, + PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER, + PLL_CON2_MUX_CLKCMU_FSYS1_PCIE_USER, + PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER, + PLL_CON2_MUX_CLKCMU_FSYS1_UFS_CARD_USER, + CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK, +}; + +/* List of parent clocks for Muxes in CMU_FSYS1 */ +PNAME(mout_fsys1_bus_user_p) = { "oscclk", "dout_cmu_fsys1_bus" }; +PNAME(mout_fsys1_mmc_card_user_p) = { "oscclk", + "dout_cmu_fsys1_mmc_card" }; +PNAME(mout_fsys1_pcie_user_p) = { "oscclk", "dout_cmu_fsys1_pcie" }; +PNAME(mout_fsys1_ufs_card_user_p) = { "oscclk", + "dout_cmu_fsys1_ufs_card" }; + +static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = { + MUX(CLK_MOUT_FSYS1_BUS_USER, "mout_fsys1_bus_user", + mout_fsys1_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER, 4, 1), + MUX_F(CLK_MOUT_FSYS1_MMC_CARD_USER, "mout_fsys1_mmc_card_user", + mout_fsys1_mmc_card_user_p, + PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER, + 4, 1, CLK_SET_RATE_PARENT, 0), + MUX(CLK_MOUT_FSYS1_PCIE_USER, "mout_fsys1_pcie_user", + mout_fsys1_pcie_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER, 4, 1), + MUX(CLK_MOUT_FSYS1_UFS_CARD_USER, "mout_fsys1_ufs_card_user", + mout_fsys1_ufs_card_user_p, + PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER, 4, 1), +}; + +static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = { + GATE(CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN, + "gout_clk_blk_fsys1_pcie_phy_ref_clk_in", "mout_fsys1_pcie_user", + CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM, "gout_fsys1_adm_ahb_sss_hclkm", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK, "gout_fsys1_ahbbr_fsys1_hclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK, + "gout_fsys1_axi2ahb_fsys1_aclk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK, + "gout_fsys1_axi2apb_fsys1p0_aclk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK, + "gout_fsys1_axi2apb_fsys1p1_aclk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK, "gout_fsys1_btm_fsys1_i_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK, "gout_fsys1_btm_fsys1_i_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK, + "gout_fsys1_fsys1_cmu_fsys1_pclk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK, "gout_fsys1_gpio_fsys1_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK, + "gout_fsys1_lhm_axi_p_fsys1_i_clk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK, + "gout_fsys1_lhs_acel_d_fsys1_i_clk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_FSYS1_MMC_CARD_I_ACLK, "gout_fsys1_mmc_card_i_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN, "gout_fsys1_mmc_card_sdclkin", + "mout_fsys1_mmc_card_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN, + 21, CLK_SET_RATE_PARENT, 0), + GATE(CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0, "gout_fsys1_pcie_dbi_aclk_0", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1, "gout_fsys1_pcie_dbi_aclk_1", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK, + "gout_fsys1_pcie_ieee1500_wrapper_for_pcie_phy_lc_x2_inst_0_i_scl_apb_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0, + "gout_fsys1_pcie_mstr_aclk_0", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1, + "gout_fsys1_pcie_mstr_aclk_1", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + "gout_fsys1_pcie_pcie_sub_ctrl_inst_0_i_driver_apb_clk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK, + "gout_fsys1_pcie_pcie_sub_ctrl_inst_1_i_driver_apb_clk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL, + "gout_fsys1_pcie_pipe2_digital_x2_wrap_inst_0_i_apb_pclk_scl", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0, "gout_fsys1_pcie_slv_aclk_0", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1, "gout_fsys1_pcie_slv_aclk_1", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_PMU_FSYS1_PCLK, "gout_fsys1_pmu_fsys1_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_BCM_FSYS1_ACLK, "gout_fsys1_bcm_fsys1_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_BCM_FSYS1_PCLK, "gout_fsys1_bcm_fsys1_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK, + "gout_fsys1_rstnsync_clk_fsys1_bus_clk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_RTIC_I_ACLK, "gout_fsys1_rtic_i_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS1_RTIC_I_PCLK, "gout_fsys1_rtic_i_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS1_SSS_I_ACLK, "gout_fsys1_sss_i_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS1_SSS_I_PCLK, "gout_fsys1_sss_i_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK, "gout_fsys1_sysreg_fsys1_pclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK, "gout_fsys1_toe_wifi0_i_clk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK, "gout_fsys1_toe_wifi1_i_clk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_UFS_CARD_I_ACLK, "gout_fsys1_ufs_card_i_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO, + "gout_fsys1_ufs_card_i_clk_unipro", "mout_fsys1_ufs_card_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK, + "gout_fsys1_ufs_card_i_fmp_clk", "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK, + 21, 0, 0), + GATE(CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK, "gout_fsys1_xiu_d_fsys1_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK, "gout_fsys1_xiu_p_fsys1_aclk", + "mout_fsys1_bus_user", + CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info fsys1_cmu_info __initconst = { + .mux_clks = fsys1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks), + .gate_clks = fsys1_gate_clks, + .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks), + .nr_clk_ids = CLKS_NR_FSYS1, + .clk_regs = fsys1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs), + .clk_name = "bus", +}; + +/* ---- CMU_PERIC0 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIC0 (0x10400000) */ +#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0100 +#define PLL_CON2_MUX_CLKCMU_PERIC0_BUS_USER 0x0108 +#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER 0x0120 +#define PLL_CON2_MUX_CLKCMU_PERIC0_UART_DBG_USER 0x0128 +#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER 0x0140 +#define PLL_CON2_MUX_CLKCMU_PERIC0_USI00_USER 0x0148 +#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER 0x0160 +#define PLL_CON2_MUX_CLKCMU_PERIC0_USI01_USER 0x0168 +#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER 0x0180 +#define PLL_CON2_MUX_CLKCMU_PERIC0_USI02_USER 0x0188 +#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER 0x01a0 +#define PLL_CON2_MUX_CLKCMU_PERIC0_USI03_USER 0x01a8 +#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2000 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK 0x2014 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2018 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK 0x2020 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0 0x2028 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK 0x2038 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK 0x203c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK 0x2040 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI 0x2044 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK 0x2048 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI 0x204c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK 0x2050 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI 0x2054 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI 0x205c + +static const unsigned long peric0_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, + PLL_CON2_MUX_CLKCMU_PERIC0_BUS_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER, + PLL_CON2_MUX_CLKCMU_PERIC0_UART_DBG_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER, + PLL_CON2_MUX_CLKCMU_PERIC0_USI00_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER, + PLL_CON2_MUX_CLKCMU_PERIC0_USI01_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER, + PLL_CON2_MUX_CLKCMU_PERIC0_USI02_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER, + PLL_CON2_MUX_CLKCMU_PERIC0_USI03_USER, + CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI, +}; + +/* List of parent clocks for Muxes in CMU_PERIC0 */ +PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" }; +PNAME(mout_peric0_uart_dbg_user_p) = { "oscclk", + "dout_cmu_peric0_uart_dbg" }; +PNAME(mout_peric0_usi00_user_p) = { "oscclk", + "dout_cmu_peric0_usi00" }; +PNAME(mout_peric0_usi01_user_p) = { "oscclk", + "dout_cmu_peric0_usi01" }; +PNAME(mout_peric0_usi02_user_p) = { "oscclk", + "dout_cmu_peric0_usi02" }; +PNAME(mout_peric0_usi03_user_p) = { "oscclk", + "dout_cmu_peric0_usi03" }; + +static const struct samsung_mux_clock peric0_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user", + mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, 4, 1), + MUX(CLK_MOUT_PERIC0_UART_DBG_USER, "mout_peric0_uart_dbg_user", + mout_peric0_uart_dbg_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER, 4, 1), + MUX(CLK_MOUT_PERIC0_USI00_USER, "mout_peric0_usi00_user", + mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER, + 4, 1), + MUX(CLK_MOUT_PERIC0_USI01_USER, "mout_peric0_usi01_user", + mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER, + 4, 1), + MUX(CLK_MOUT_PERIC0_USI02_USER, "mout_peric0_usi02_user", + mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER, + 4, 1), + MUX(CLK_MOUT_PERIC0_USI03_USER, "mout_peric0_usi03_user", + mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER, + 4, 1), +}; + +static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { + GATE(CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK, + "gout_cperic0_peric0_cmu_peric0_pclk", "mout_peric0_bus_user", + CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK, + "gout_peric0_axi2apb_peric0_aclk", "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK, "gout_peric0_gpio_peric0_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK, + "gout_peric0_lhm_axi_p_peric0_i_clk", "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC0_PMU_PERIC0_PCLK, "gout_peric0_pmu_peric0_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PWM_I_PCLK_S0, "gout_peric0_pwm_i_pclk_s0", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK, + "gout_peric0_rstnsync_clk_peric0_busp_clk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK, "gout_peric0_speedy2_tsp_clk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK, + "gout_peric0_sysreg_peric0_pclk", "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK, + "gout_peric0_uart_dbg_ext_uclk", "mout_peric0_uart_dbg_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_UART_DBG_PCLK, "gout_peric0_uart_dbg_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI00_I_PCLK, "gout_peric0_usi00_i_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI00_I_SCLK_USI, "gout_peric0_usi00_i_sclk_usi", + "mout_peric0_usi00_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI01_I_PCLK, "gout_peric0_usi01_i_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI01_I_SCLK_USI, "gout_peric0_usi01_i_sclk_usi", + "mout_peric0_usi01_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI02_I_PCLK, "gout_peric0_usi02_i_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI02_I_SCLK_USI, "gout_peric0_usi02_i_sclk_usi", + "mout_peric0_usi02_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI03_I_PCLK, "gout_peric0_usi03_i_pclk", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC0_USI03_I_SCLK_USI, "gout_peric0_usi03_i_sclk_usi", + "mout_peric0_usi03_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), +}; + +static const struct samsung_cmu_info peric0_cmu_info __initconst = { + .mux_clks = peric0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks), + .gate_clks = peric0_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks), + .nr_clk_ids = CLKS_NR_PERIC0, + .clk_regs = peric0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs), + .clk_name = "bus", +}; + +/* ---- CMU_PERIC1 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIC1 (0x10800000) */ +#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0100 +#define PLL_CON2_MUX_CLKCMU_PERIC1_BUS_USER 0x0108 +#define PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER 0x0120 +#define PLL_CON2_MUX_CLKCMU_PERIC1_SPEEDY2_USER 0x0128 +#define PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER 0x0140 +#define PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM0_USER 0x0148 +#define PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER 0x0160 +#define PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM1_USER 0x0168 +#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0180 +#define PLL_CON2_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0188 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER 0x01a0 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI04_USER 0x01a8 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER 0x01c0 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI05_USER 0x01c8 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER 0x01e0 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI06_USER 0x01e8 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER 0x0200 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI07_USER 0x0208 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER 0x0220 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI08_USER 0x0228 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER 0x0240 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI09_USER 0x0248 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER 0x0260 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI10_USER 0x0268 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER 0x0280 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI11_USER 0x0288 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER 0x02a0 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI12_USER 0x02a8 +#define PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER 0x02c0 +#define PLL_CON2_MUX_CLKCMU_PERIC1_USI13_USER 0x02c8 +#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2000 +#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK 0x200c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK 0x2020 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK 0x2024 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2028 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK 0x2038 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x203c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK 0x2040 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x2044 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK 0x2048 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK 0x204c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK 0x2050 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK 0x2054 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK 0x205c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK 0x2060 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK 0x2064 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK 0x2068 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK 0x206c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK 0x2070 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK 0x2074 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2078 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK 0x207c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK 0x2080 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK 0x2084 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI 0x2088 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK 0x208c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI 0x2090 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK 0x2094 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI 0x2098 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK 0x209c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI 0x20a0 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK 0x20a4 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI 0x20a8 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK 0x20ac +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI 0x20b0 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK 0x20b4 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI 0x20b8 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK 0x20bc +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI 0x20c0 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK 0x20c4 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI 0x20c8 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK 0x20cc +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI 0x20d0 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x20d4 + +static const unsigned long peric1_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_BUS_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_SPEEDY2_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM0_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM1_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_UART_BT_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI04_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI05_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI06_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI07_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI08_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI09_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI10_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI11_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI12_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER, + PLL_CON2_MUX_CLKCMU_PERIC1_USI13_USER, + CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK, + CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK, +}; + +/* List of parent clocks for Muxes in CMU_PERIC1 */ +PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" }; +PNAME(mout_peric1_speedy2_user_p) = { "oscclk", + "dout_cmu_peric1_speedy2" }; +PNAME(mout_peric1_spi_cam0_user_p) = { "oscclk", + "dout_cmu_peric1_spi_cam0" }; +PNAME(mout_peric1_spi_cam1_user_p) = { "oscclk", + "dout_cmu_peric1_spi_cam1" }; +PNAME(mout_peric1_uart_bt_user_p) = { "oscclk", + "dout_cmu_peric1_uart_bt" }; +PNAME(mout_peric1_usi04_user_p) = { "oscclk", + "dout_cmu_peric1_usi04" }; +PNAME(mout_peric1_usi05_user_p) = { "oscclk", + "dout_cmu_peric1_usi05" }; +PNAME(mout_peric1_usi06_user_p) = { "oscclk", + "dout_cmu_peric1_usi06" }; +PNAME(mout_peric1_usi07_user_p) = { "oscclk", + "dout_cmu_peric1_usi07" }; +PNAME(mout_peric1_usi08_user_p) = { "oscclk", + "dout_cmu_peric1_usi08" }; +PNAME(mout_peric1_usi09_user_p) = { "oscclk", + "dout_cmu_peric1_usi09" }; +PNAME(mout_peric1_usi10_user_p) = { "oscclk", + "dout_cmu_peric1_usi10" }; +PNAME(mout_peric1_usi11_user_p) = { "oscclk", + "dout_cmu_peric1_usi11" }; +PNAME(mout_peric1_usi12_user_p) = { "oscclk", + "dout_cmu_peric1_usi12" }; +PNAME(mout_peric1_usi13_user_p) = { "oscclk", + "dout_cmu_peric1_usi13" }; + +static const struct samsung_mux_clock peric1_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user", + mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_SPEEDY2_USER, "mout_peric1_speedy2_user", + mout_peric1_speedy2_user_p, + PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_SPI_CAM0_USER, "mout_peric1_spi_cam0_user", + mout_peric1_spi_cam0_user_p, + PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_SPI_CAM1_USER, "mout_peric1_spi_cam1_user", + mout_peric1_spi_cam1_user_p, + PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user", + mout_peric1_uart_bt_user_p, + PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_USI04_USER, "mout_peric1_usi04_user", + mout_peric1_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI05_USER, "mout_peric1_usi05_user", + mout_peric1_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI06_USER, "mout_peric1_usi06_user", + mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI07_USER, "mout_peric1_usi07_user", + mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI08_USER, "mout_peric1_usi08_user", + mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI09_USER, "mout_peric1_usi09_user", + mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI10_USER, "mout_peric1_usi10_user", + mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI11_USER, "mout_peric1_usi11_user", + mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI12_USER, "mout_peric1_usi12_user", + mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER, + 4, 1), + MUX(CLK_MOUT_PERIC1_USI13_USER, "mout_peric1_usi13_user", + mout_peric1_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER, + 4, 1), +}; + +static const struct samsung_gate_clock peric1_gate_clks[] __initconst = { + GATE(CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK, + "gout_peric1_peric1_cmu_peric1_pclk", "mout_peric1_bus_user", + CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK, + "gout_peric1_rstnsync_clk_peric1_speedy2_clk", + "mout_peric1_speedy2_user", + CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK, + "gout_peric1_axi2apb_peric1p0_aclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK, + "gout_peric1_axi2apb_peric1p1_aclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK, + "gout_peric1_axi2apb_peric1p2_aclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK, + "gout_peric1_gpio_peric1_pclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK, "gout_peric1_hsi2c_cam0_ipclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK, + "gout_peric1_hsi2c_cam1_ipclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK, + "gout_peric1_hsi2c_cam2_ipclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK, "gout_peric1_hsi2c_cam3_ipclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK, + "gout_peric1_lhm_axi_p_peric1_i_clk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_PERIC1_PMU_PERIC1_PCLK, "gout_peric1_pmu_peric1_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK, + "gout_peric1_rstnsync_clk_peric1_busp_clk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK, "gout_peric1_speedy2_ddi1_clk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK, + "gout_peric1_speedy2_ddi1_sclk", "mout_peric1_speedy2_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK, "gout_peric1_speedy2_ddi2_clk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK, + "gout_peric1_speedy2_ddi2_sclk", "mout_peric1_speedy2_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK, "gout_peric1_speedy2_ddi_clk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK, "gout_peric1_speedy2_ddi_sclk", + "mout_peric1_speedy2_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK, "gout_peric1_speedy2_tsp1_clk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK, "gout_peric1_speedy2_tsp2_clk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPI_CAM0_PCLK, "gout_peric1_spi_cam0_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK, + "gout_peric1_spi_cam0_spi_ext_clk", "mout_peric1_spi_cam0_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPI_CAM1_PCLK, "gout_peric1_spi_cam1_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK, + "gout_peric1_spi_cam1_spi_ext_clk", "mout_peric1_spi_cam1_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK, + "gout_peric1_sysreg_peric1_pclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_UART_BT_EXT_UCLK, "gout_peric1_uart_bt_ext_uclk", + "mout_peric1_uart_bt_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_UART_BT_PCLK, "gout_peric1_uart_bt_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI04_I_PCLK, "gout_peric1_usi04_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI04_I_SCLK_USI, "gout_peric1_usi04_i_sclk_usi", + "mout_peric1_usi04_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI05_I_PCLK, "gout_peric1_usi05_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI05_I_SCLK_USI, "gout_peric1_usi05_i_sclk_usi", + "mout_peric1_usi05_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI06_I_PCLK, "gout_peric1_usi06_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI06_I_SCLK_USI, "gout_peric1_usi06_i_sclk_usi", + "mout_peric1_usi06_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI07_I_PCLK, "gout_peric1_usi07_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI07_I_SCLK_USI, "gout_peric1_usi07_i_sclk_usi", + "mout_peric1_usi07_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI08_I_PCLK, "gout_peric1_usi08_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI08_I_SCLK_USI, "gout_peric1_usi08_i_sclk_usi", + "mout_peric1_usi08_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI09_I_PCLK, "gout_peric1_usi09_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI09_I_SCLK_USI, "gout_peric1_usi09_i_sclk_usi", + "mout_peric1_usi09_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI10_I_PCLK, "gout_peric1_usi10_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI10_I_SCLK_USI, "gout_peric1_usi10_i_sclk_usi", + "mout_peric1_usi10_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI11_I_PCLK, "gout_peric1_usi11_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI11_I_SCLK_USI, "gout_peric1_usi11_i_sclk_usi", + "mout_peric1_usi11_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI12_I_PCLK, "gout_peric1_usi12_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI12_I_SCLK_USI, "gout_peric1_usi12_i_sclk_usi", + "mout_peric1_usi12_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI13_I_PCLK, "gout_peric1_usi13_i_pclk", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_PERIC1_USI13_I_SCLK_USI, "gout_peric1_usi13_i_sclk_usi", + "mout_peric1_usi13_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK, + "gout_peric1_xiu_p_peric1_aclk", "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info peric1_cmu_info __initconst = { + .mux_clks = peric1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks), + .gate_clks = peric1_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks), + .nr_clk_ids = CLKS_NR_PERIC1, + .clk_regs = peric1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs), + .clk_name = "bus", +}; + +static int __init exynos8895_cmu_probe(struct platform_device *pdev) +{ + const struct samsung_cmu_info *info; + struct device *dev = &pdev->dev; + + info = of_device_get_match_data(dev); + exynos_arm64_register_cmu(dev, dev->of_node, info); + + return 0; +} + +static const struct of_device_id exynos8895_cmu_of_match[] = { + { + .compatible = "samsung,exynos8895-cmu-fsys0", + .data = &fsys0_cmu_info, + }, { + .compatible = "samsung,exynos8895-cmu-fsys1", + .data = &fsys1_cmu_info, + }, { + .compatible = "samsung,exynos8895-cmu-peric0", + .data = &peric0_cmu_info, + }, { + .compatible = "samsung,exynos8895-cmu-peric1", + .data = &peric1_cmu_info, + }, + { } +}; + +static struct platform_driver exynos8895_cmu_driver __refdata = { + .driver = { + .name = "exynos8895-cmu", + .of_match_table = exynos8895_cmu_of_match, + .suppress_bind_attrs = true, + }, + .probe = exynos8895_cmu_probe, +}; + +static int __init exynos8895_cmu_init(void) +{ + return platform_driver_register(&exynos8895_cmu_driver); +} +core_initcall(exynos8895_cmu_init); diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c index f60f0a0c598d..2a8bfd5d9abc 100644 --- a/drivers/clk/samsung/clk-exynosautov920.c +++ b/drivers/clk/samsung/clk-exynosautov920.c @@ -19,6 +19,10 @@ /* NOTE: Must be equal to the last clock ID increased by one */ #define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1) #define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1) +#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1) +#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1) +#define CLKS_NR_HSI0 (CLK_DOUT_HSI0_PCIE_APB + 1) +#define CLKS_NR_HSI1 (CLK_MOUT_HSI1_USBDRD + 1) /* ---- CMU_TOP ------------------------------------------------------------ */ @@ -974,6 +978,8 @@ static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initcon "mout_shared5_pll", 1, 3, 0), FFACTOR(DOUT_SHARED5_DIV4, "dout_shared5_div4", "mout_shared5_pll", 1, 4, 0), + FFACTOR(DOUT_TCXO_DIV2, "dout_tcxo_div2", + "oscclk", 1, 2, 0), }; static const struct samsung_cmu_info top_cmu_info __initconst = { @@ -1139,6 +1145,277 @@ static const struct samsung_cmu_info peric0_cmu_info __initconst = { .clk_name = "noc", }; +/* ---- CMU_PERIC1 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIC1 (0x10C00000) */ +#define PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER 0x600 +#define PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER 0x610 +#define CLK_CON_MUX_MUX_CLK_PERIC1_I3C 0x1000 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI 0x1004 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI 0x1008 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI 0x100c +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI 0x1010 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI 0x1014 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI 0x1018 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI 0x101c +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI 0x1020 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI 0x1024 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C 0x1028 +#define CLK_CON_DIV_DIV_CLK_PERIC1_I3C 0x1800 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1804 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1808 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x180c +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x1810 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI 0x1814 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI 0x1818 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI 0x181c +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x1828 + +static const unsigned long peric1_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER, + CLK_CON_MUX_MUX_CLK_PERIC1_I3C, + CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, + CLK_CON_DIV_DIV_CLK_PERIC1_I3C, + CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, +}; + +/* List of parent clocks for Muxes in CMU_PERIC1 */ +PNAME(mout_peric1_ip_user_p) = { "oscclk", "dout_clkcmu_peric1_ip" }; +PNAME(mout_peric1_noc_user_p) = { "oscclk", "dout_clkcmu_peric1_noc" }; +PNAME(mout_peric1_usi_p) = { "oscclk", "mout_peric1_ip_user" }; + +static const struct samsung_mux_clock peric1_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIC1_IP_USER, "mout_peric1_ip_user", + mout_peric1_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_NOC_USER, "mout_peric1_noc_user", + mout_peric1_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER, 4, 1), + /* USI09 ~ USI17 */ + MUX(CLK_MOUT_PERIC1_USI09_USI, "mout_peric1_usi09_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI10_USI, "mout_peric1_usi10_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI11_USI, "mout_peric1_usi11_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI12_USI, "mout_peric1_usi12_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI13_USI, "mout_peric1_usi13_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI14_USI, "mout_peric1_usi14_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI15_USI, "mout_peric1_usi15_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI16_USI, "mout_peric1_usi16_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI17_USI, "mout_peric1_usi17_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI, 0, 1), + /* USI_I2C */ + MUX(CLK_MOUT_PERIC1_USI_I2C, "mout_peric1_usi_i2c", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, 0, 1), + /* USI_I3C */ + MUX(CLK_MOUT_PERIC1_I3C, "mout_peric1_i3c", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_I3C, 0, 1), +}; + +static const struct samsung_div_clock peric1_div_clks[] __initconst = { + /* USI09 ~ USI17 */ + DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi", + "mout_peric1_usi09_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi", + "mout_peric1_usi10_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi", + "mout_peric1_usi11_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi", + "mout_peric1_usi12_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI13_USI, "dout_peric1_usi13_usi", + "mout_peric1_usi13_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI14_USI, "dout_peric1_usi14_usi", + "mout_peric1_usi14_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI15_USI, "dout_peric1_usi15_usi", + "mout_peric1_usi15_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi", + "mout_peric1_usi16_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi", + "mout_peric1_usi17_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI, + 0, 4), + /* USI_I2C */ + DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c", + "mout_peric1_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, 0, 4), + /* USI_I3C */ + DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c", + "mout_peric1_i3c", CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4), +}; + +static const struct samsung_cmu_info peric1_cmu_info __initconst = { + .mux_clks = peric1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks), + .div_clks = peric1_div_clks, + .nr_div_clks = ARRAY_SIZE(peric1_div_clks), + .nr_clk_ids = CLKS_NR_PERIC1, + .clk_regs = peric1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs), + .clk_name = "noc", +}; + +/* ---- CMU_MISC --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_MISC (0x10020000) */ +#define PLL_CON0_MUX_CLKCMU_MISC_NOC_USER 0x600 +#define CLK_CON_MUX_MUX_CLK_MISC_GIC 0x1000 +#define CLK_CON_DIV_CLKCMU_OTP 0x1800 +#define CLK_CON_DIV_DIV_CLK_MISC_NOCP 0x1804 +#define CLK_CON_DIV_DIV_CLK_MISC_OSC_DIV2 0x1808 + +static const unsigned long misc_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_MISC_NOC_USER, + CLK_CON_MUX_MUX_CLK_MISC_GIC, + CLK_CON_DIV_CLKCMU_OTP, + CLK_CON_DIV_DIV_CLK_MISC_NOCP, + CLK_CON_DIV_DIV_CLK_MISC_OSC_DIV2, +}; + +/* List of parent clocks for Muxes in CMU_MISC */ +PNAME(mout_misc_noc_user_p) = { "oscclk", "dout_clkcmu_misc_noc" }; +PNAME(mout_misc_gic_p) = { "dout_misc_nocp", "oscclk" }; + +static const struct samsung_mux_clock misc_mux_clks[] __initconst = { + MUX(CLK_MOUT_MISC_NOC_USER, "mout_misc_noc_user", + mout_misc_noc_user_p, PLL_CON0_MUX_CLKCMU_MISC_NOC_USER, 4, 1), + MUX(CLK_MOUT_MISC_GIC, "mout_misc_gic", + mout_misc_gic_p, CLK_CON_MUX_MUX_CLK_MISC_GIC, 0, 1), +}; + +static const struct samsung_div_clock misc_div_clks[] __initconst = { + DIV(CLK_DOUT_MISC_NOCP, "dout_misc_nocp", + "mout_misc_noc_user", CLK_CON_DIV_DIV_CLK_MISC_NOCP, + 0, 3), +}; + +static const struct samsung_fixed_factor_clock misc_fixed_factor_clks[] __initconst = { + FFACTOR(CLK_DOUT_MISC_OTP, "dout_misc_otp", + "oscclk", 1, 10, 0), + FFACTOR(CLK_DOUT_MISC_OSC_DIV2, "dout_misc_osc_div2", + "oscclk", 1, 2, 0), +}; + +static const struct samsung_cmu_info misc_cmu_info __initconst = { + .mux_clks = misc_mux_clks, + .nr_mux_clks = ARRAY_SIZE(misc_mux_clks), + .div_clks = misc_div_clks, + .nr_div_clks = ARRAY_SIZE(misc_div_clks), + .fixed_factor_clks = misc_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(misc_fixed_factor_clks), + .nr_clk_ids = CLKS_NR_MISC, + .clk_regs = misc_clk_regs, + .nr_clk_regs = ARRAY_SIZE(misc_clk_regs), + .clk_name = "noc", +}; + +/* ---- CMU_HSI0 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_HSI0 (0x16000000) */ +#define PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER 0x600 +#define CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB 0x1800 + +static const unsigned long hsi0_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER, + CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB, +}; + +/* List of parent clocks for Muxes in CMU_HSI0 */ +PNAME(mout_hsi0_noc_user_p) = { "oscclk", "dout_clkcmu_hsi0_noc" }; + +static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = { + MUX(CLK_MOUT_HSI0_NOC_USER, "mout_hsi0_noc_user", + mout_hsi0_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER, 4, 1), +}; + +static const struct samsung_div_clock hsi0_div_clks[] __initconst = { + DIV(CLK_DOUT_HSI0_PCIE_APB, "dout_hsi0_pcie_apb", + "mout_hsi0_noc_user", CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB, + 0, 4), +}; + +static const struct samsung_cmu_info hsi0_cmu_info __initconst = { + .mux_clks = hsi0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks), + .div_clks = hsi0_div_clks, + .nr_div_clks = ARRAY_SIZE(hsi0_div_clks), + .nr_clk_ids = CLKS_NR_HSI0, + .clk_regs = hsi0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs), + .clk_name = "noc", +}; + +/* ---- CMU_HSI1 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_HSI1 (0x16400000) */ +#define PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER 0x600 +#define PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER 0x610 +#define PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER 0x620 +#define CLK_CON_MUX_MUX_CLK_HSI1_USBDRD 0x1000 + +static const unsigned long hsi1_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER, + PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER, + PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER, + CLK_CON_MUX_MUX_CLK_HSI1_USBDRD, +}; + +/* List of parent clocks for Muxes in CMU_HSI1 */ +PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"}; +PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" }; +PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" }; +PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" }; + +static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = { + MUX(CLK_MOUT_HSI1_MMC_CARD_USER, "mout_hsi1_mmc_card_user", + mout_hsi1_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER, 4, 1), + MUX(CLK_MOUT_HSI1_NOC_USER, "mout_hsi1_noc_user", + mout_hsi1_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER, 4, 1), + MUX(CLK_MOUT_HSI1_USBDRD_USER, "mout_hsi1_usbdrd_user", + mout_hsi1_usbdrd_user_p, PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER, 4, 1), + MUX(CLK_MOUT_HSI1_USBDRD, "mout_hsi1_usbdrd", + mout_hsi1_usbdrd_p, CLK_CON_MUX_MUX_CLK_HSI1_USBDRD, 4, 1), +}; + +static const struct samsung_cmu_info hsi1_cmu_info __initconst = { + .mux_clks = hsi1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(hsi1_mux_clks), + .nr_clk_ids = CLKS_NR_HSI1, + .clk_regs = hsi1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(hsi1_clk_regs), + .clk_name = "noc", +}; + static int __init exynosautov920_cmu_probe(struct platform_device *pdev) { const struct samsung_cmu_info *info; @@ -1154,6 +1431,18 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = { { .compatible = "samsung,exynosautov920-cmu-peric0", .data = &peric0_cmu_info, + }, { + .compatible = "samsung,exynosautov920-cmu-peric1", + .data = &peric1_cmu_info, + }, { + .compatible = "samsung,exynosautov920-cmu-misc", + .data = &misc_cmu_info, + }, { + .compatible = "samsung,exynosautov920-cmu-hsi0", + .data = &hsi0_cmu_info, + }, { + .compatible = "samsung,exynosautov920-cmu-hsi1", + .data = &hsi1_cmu_info, }, { } }; diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c index 6f984cfcd33c..9a6006c298c2 100644 --- a/drivers/clk/samsung/clk-fsd.c +++ b/drivers/clk/samsung/clk-fsd.c @@ -82,6 +82,15 @@ #define GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU 0x205c #define GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK 0x2060 +/* NOTE: Must be equal to the last clock ID increased by one */ +#define CLKS_NR_CMU (GAT_CMU_FSYS0_SHARED0DIV4 + 1) +#define CLKS_NR_PERIC (PERIC_DOUT_RGMII_CLK + 1) +#define CLKS_NR_FSYS0 (FSYS0_DOUT_FSYS0_PERIBUS_GRP + 1) +#define CLKS_NR_FSYS1 (PCIE_LINK1_IPCLKPORT_SLV_ACLK + 1) +#define CLKS_NR_IMEM (IMEM_TMU_GT_IPCLKPORT_I_CLK_TS + 1) +#define CLKS_NR_MFC (MFC_MFC_IPCLKPORT_ACLK + 1) +#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_ACLK + 1) + static const unsigned long cmu_clk_regs[] __initconst = { PLL_LOCKTIME_PLL_SHARED0, PLL_LOCKTIME_PLL_SHARED1, @@ -300,7 +309,7 @@ static const struct samsung_cmu_info cmu_cmu_info __initconst = { .nr_div_clks = ARRAY_SIZE(cmu_div_clks), .gate_clks = cmu_gate_clks, .nr_gate_clks = ARRAY_SIZE(cmu_gate_clks), - .nr_clk_ids = CMU_NR_CLK, + .nr_clk_ids = CLKS_NR_CMU, .clk_regs = cmu_clk_regs, .nr_clk_regs = ARRAY_SIZE(cmu_clk_regs), }; @@ -665,7 +674,7 @@ static const struct samsung_cmu_info peric_cmu_info __initconst = { .nr_gate_clks = ARRAY_SIZE(peric_gate_clks), .fixed_clks = peric_fixed_clks, .nr_fixed_clks = ARRAY_SIZE(peric_fixed_clks), - .nr_clk_ids = PERIC_NR_CLK, + .nr_clk_ids = CLKS_NR_PERIC, .clk_regs = peric_clk_regs, .nr_clk_regs = ARRAY_SIZE(peric_clk_regs), .clk_name = "dout_cmu_pll_shared0_div4", @@ -964,7 +973,7 @@ static const struct samsung_cmu_info fsys0_cmu_info __initconst = { .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks), .fixed_clks = fsys0_fixed_clks, .nr_fixed_clks = ARRAY_SIZE(fsys0_fixed_clks), - .nr_clk_ids = FSYS0_NR_CLK, + .nr_clk_ids = CLKS_NR_FSYS0, .clk_regs = fsys0_clk_regs, .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs), .clk_name = "dout_cmu_fsys0_shared1div4", @@ -1136,7 +1145,7 @@ static const struct samsung_cmu_info fsys1_cmu_info __initconst = { .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks), .fixed_clks = fsys1_fixed_clks, .nr_fixed_clks = ARRAY_SIZE(fsys1_fixed_clks), - .nr_clk_ids = FSYS1_NR_CLK, + .nr_clk_ids = CLKS_NR_FSYS1, .clk_regs = fsys1_clk_regs, .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs), .clk_name = "dout_cmu_fsys1_shared0div4", @@ -1413,7 +1422,7 @@ static const struct samsung_cmu_info imem_cmu_info __initconst = { .nr_div_clks = ARRAY_SIZE(imem_div_clks), .gate_clks = imem_gate_clks, .nr_gate_clks = ARRAY_SIZE(imem_gate_clks), - .nr_clk_ids = IMEM_NR_CLK, + .nr_clk_ids = CLKS_NR_IMEM, .clk_regs = imem_clk_regs, .nr_clk_regs = ARRAY_SIZE(imem_clk_regs), }; @@ -1538,7 +1547,7 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = { .nr_div_clks = ARRAY_SIZE(mfc_div_clks), .gate_clks = mfc_gate_clks, .nr_gate_clks = ARRAY_SIZE(mfc_gate_clks), - .nr_clk_ids = MFC_NR_CLK, + .nr_clk_ids = CLKS_NR_MFC, .clk_regs = mfc_clk_regs, .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs), }; @@ -1742,7 +1751,7 @@ static const struct samsung_cmu_info cam_csi_cmu_info __initconst = { .nr_div_clks = ARRAY_SIZE(cam_csi_div_clks), .gate_clks = cam_csi_gate_clks, .nr_gate_clks = ARRAY_SIZE(cam_csi_gate_clks), - .nr_clk_ids = CAM_CSI_NR_CLK, + .nr_clk_ids = CLKS_NR_CAM_CSI, .clk_regs = cam_csi_clk_regs, .nr_clk_regs = ARRAY_SIZE(cam_csi_clk_regs), }; diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c index 85098c61c15e..86b39edba122 100644 --- a/drivers/clk/samsung/clk-gs101.c +++ b/drivers/clk/samsung/clk-gs101.c @@ -2775,11 +2775,11 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = { GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK, "gout_hsi2_qe_ufs_embd_hsi2_aclk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK, - 21, 0, 0), + 21, CLK_IS_CRITICAL, 0), GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK, "gout_hsi2_qe_ufs_embd_hsi2_pclk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK, - 21, 0, 0), + 21, CLK_IS_CRITICAL, 0), GATE(CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK, "gout_hsi2_clk_hsi2_bus_clk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK, @@ -2806,7 +2806,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = { GATE(CLK_GOUT_HSI2_SYSREG_HSI2_PCLK, "gout_hsi2_sysreg_hsi2_pclk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK, - 21, 0, 0), + 21, CLK_IS_CRITICAL, 0), GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK, "gout_hsi2_uasc_pcie_gen4a_dbi_1_aclk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK, @@ -2842,7 +2842,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = { GATE(CLK_GOUT_HSI2_UFS_EMBD_I_ACLK, "gout_hsi2_ufs_embd_i_aclk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK, - 21, 0, 0), + 21, CLK_IS_CRITICAL, 0), GATE(CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO, "gout_hsi2_ufs_embd_i_clk_unipro", "mout_hsi2_ufs_embd_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO, @@ -2850,7 +2850,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = { GATE(CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK, "gout_hsi2_ufs_embd_i_fmp_clk", "mout_hsi2_bus_user", CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK, - 21, 0, 0), + 21, CLK_IS_CRITICAL, 0), /* TODO: should have a driver for this */ GATE(CLK_GOUT_HSI2_XIU_D_HSI2_ACLK, "gout_hsi2_xiu_d_hsi2_aclk", "mout_hsi2_bus_user", diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index cca3e630922c..be6b51694919 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -1370,6 +1370,8 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, break; case pll_1417x: case pll_1418x: + case pll_1051x: + case pll_1052x: case pll_0818x: case pll_0822x: case pll_0516x: diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h index 3481941ba07a..858ab367eb65 100644 --- a/drivers/clk/samsung/clk-pll.h +++ b/drivers/clk/samsung/clk-pll.h @@ -43,6 +43,8 @@ enum samsung_pll_type { pll_0517x, pll_0518x, pll_531x, + pll_1051x, + pll_1052x, }; #define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \ diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index d27a1f73f077..e2ec8fe32e39 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -3,7 +3,7 @@ * Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com> * * Common Clock Framework support for all S3C64xx SoCs. -*/ + */ #include <linux/slab.h> #include <linux/clk-provider.h> diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c index b31c00ea331f..d19a3d9fd452 100644 --- a/drivers/clk/samsung/clk-s5pv210-audss.c +++ b/drivers/clk/samsung/clk-s5pv210-audss.c @@ -8,7 +8,7 @@ * Author: Padmavathi Venna <padma.v@samsung.com> * * Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs. -*/ + */ #include <linux/io.h> #include <linux/clk.h> diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index afa5760ed3a1..283c523763e6 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -6,7 +6,7 @@ * * This file includes utility functions to register clocks to common * clock framework for Samsung platforms. -*/ + */ #include <linux/slab.h> #include <linux/clkdev.h> diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c index ff9deeef509b..1537f4f05860 100644 --- a/drivers/clk/sophgo/clk-sg2042-pll.c +++ b/drivers/clk/sophgo/clk-sg2042-pll.c @@ -153,7 +153,7 @@ static unsigned long sg2042_pll_recalc_rate(unsigned int reg_value, sg2042_pll_ctrl_decode(reg_value, &ctrl_table); - numerator = parent_rate * ctrl_table.fbdiv; + numerator = (u64)parent_rate * ctrl_table.fbdiv; denominator = ctrl_table.refdiv * ctrl_table.postdiv1 * ctrl_table.postdiv2; do_div(numerator, denominator); return numerator; diff --git a/drivers/clk/starfive/clk-starfive-jh7110-pll.c b/drivers/clk/starfive/clk-starfive-jh7110-pll.c index 3598390e8fd0..56dc58a04f8a 100644 --- a/drivers/clk/starfive/clk-starfive-jh7110-pll.c +++ b/drivers/clk/starfive/clk-starfive-jh7110-pll.c @@ -453,7 +453,7 @@ static struct clk_hw *jh7110_pll_get(struct of_phandle_args *clkspec, void *data return ERR_PTR(-EINVAL); } -static int jh7110_pll_probe(struct platform_device *pdev) +static int __init jh7110_pll_probe(struct platform_device *pdev) { struct jh7110_pll_priv *priv; unsigned int idx; diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c index de36e21d3eaf..4084714adb15 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c @@ -91,7 +91,7 @@ static struct clk_hw_onecell_data sun20i_d1_r_hw_clks = { }, }; -static struct ccu_reset_map sun20i_d1_r_ccu_resets[] = { +static const struct ccu_reset_map sun20i_d1_r_ccu_resets[] = { [RST_BUS_R_TIMER] = { 0x11c, BIT(16) }, [RST_BUS_R_TWD] = { 0x12c, BIT(16) }, [RST_BUS_R_PPU] = { 0x1ac, BIT(16) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index 9b5cfac2ee70..c80ac2dfbb60 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -1232,7 +1232,7 @@ static struct clk_hw_onecell_data sun20i_d1_hw_clks = { }, }; -static struct ccu_reset_map sun20i_d1_ccu_resets[] = { +static const struct ccu_reset_map sun20i_d1_ccu_resets[] = { [RST_MBUS] = { 0x540, BIT(30) }, [RST_BUS_DE] = { 0x60c, BIT(16) }, [RST_BUS_DI] = { 0x62c, BIT(16) }, @@ -1371,7 +1371,7 @@ static int sun20i_d1_ccu_probe(struct platform_device *pdev) /* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */ val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG); - val &= ~BIT(1) | BIT(0); + val &= ~(BIT(1) | BIT(0)); writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG); /* Force fanout-27M factor N to 0. */ diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c index d1a1683baff4..54c794c50828 100644 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c @@ -1382,7 +1382,7 @@ static struct clk_hw_onecell_data sun7i_a20_hw_clks = { .num = CLK_NUMBER_SUN7I, }; -static struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = { +static const struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_PHY2] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c index 2c791761a646..cdd9721f9e7d 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c @@ -166,7 +166,7 @@ static struct clk_hw_onecell_data sun50i_a100_r_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun50i_a100_r_ccu_resets[] = { +static const struct ccu_reset_map sun50i_a100_r_ccu_resets[] = { [RST_R_APB1_TIMER] = { 0x11c, BIT(16) }, [RST_R_APB1_BUS_PWM] = { 0x13c, BIT(16) }, [RST_R_APB1_PPU] = { 0x17c, BIT(16) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c index bbaa82978716..1b6a49bc7184 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c @@ -1061,7 +1061,7 @@ static struct clk_hw_onecell_data sun50i_a100_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun50i_a100_ccu_resets[] = { +static const struct ccu_reset_map sun50i_a100_ccu_resets[] = { [RST_MBUS] = { 0x540, BIT(30) }, [RST_BUS_DE] = { 0x60c, BIT(16) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index c255dba2c96d..82d7dcbca1cc 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -858,7 +858,7 @@ static struct clk_hw_onecell_data sun50i_a64_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun50i_a64_ccu_resets[] = { +static const struct ccu_reset_map sun50i_a64_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_HSIC] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index c72815841111..d0ce2779c550 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -179,7 +179,7 @@ static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = { +static const struct ccu_reset_map sun50i_h6_r_ccu_resets[] = { [RST_R_APB1_TIMER] = { 0x11c, BIT(16) }, [RST_R_APB1_TWD] = { 0x12c, BIT(16) }, [RST_R_APB1_PWM] = { 0x13c, BIT(16) }, @@ -190,7 +190,7 @@ static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = { [RST_R_APB1_W1] = { 0x1ec, BIT(16) }, }; -static struct ccu_reset_map sun50i_h616_r_ccu_resets[] = { +static const struct ccu_reset_map sun50i_h616_r_ccu_resets[] = { [RST_R_APB1_TWD] = { 0x12c, BIT(16) }, [RST_R_APB2_I2C] = { 0x19c, BIT(16) }, [RST_R_APB2_RSB] = { 0x1bc, BIT(16) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index a20b621ad8f1..bd6fc3df911d 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -1076,7 +1076,7 @@ static struct clk_hw_onecell_data sun50i_h6_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun50i_h6_ccu_resets[] = { +static const struct ccu_reset_map sun50i_h6_ccu_resets[] = { [RST_MBUS] = { 0x540, BIT(30) }, [RST_BUS_DE] = { 0x60c, BIT(16) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c index 84e406ddf9d1..b001d0c03534 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c @@ -216,19 +216,29 @@ static struct ccu_nkmp pll_de_clk = { }; /* - * TODO: Determine SDM settings for the audio PLL. The manual suggests - * PLL_FACTOR_N=16, PLL_POST_DIV_P=2, OUTPUT_DIV=2, pattern=0xe000c49b - * for 24.576 MHz, and PLL_FACTOR_N=22, PLL_POST_DIV_P=3, OUTPUT_DIV=2, - * pattern=0xe001288c for 22.5792 MHz. - * This clashes with our fixed PLL_POST_DIV_P. + * Sigma-delta modulation settings table obtained from the vendor SDK driver. + * There are additional M0 and M1 divider bits not modelled here, so forced to + * fixed values in the probe routine. Sigma-delta modulation allows providing a + * fractional-N divider in the PLL, to help reaching those specific + * frequencies with less error. */ +static struct ccu_sdm_setting pll_audio_sdm_table[] = { + { .rate = 90316800, .pattern = 0xc001288d, .m = 3, .n = 22 }, + { .rate = 98304000, .pattern = 0xc001eb85, .m = 5, .n = 40 }, +}; + #define SUN50I_H616_PLL_AUDIO_REG 0x078 static struct ccu_nm pll_audio_hs_clk = { .enable = BIT(31), .lock = BIT(28), .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), - .m = _SUNXI_CCU_DIV(1, 1), /* input divider */ + .m = _SUNXI_CCU_DIV(16, 6), + .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table, + BIT(24), 0x178, BIT(31)), + .fixed_post_div = 2, .common = { + .features = CCU_FEATURE_FIXED_POSTDIV | + CCU_FEATURE_SIGMA_DELTA_MOD, .reg = 0x078, .hw.init = CLK_HW_INIT("pll-audio-hs", "osc24M", &ccu_nm_ops, @@ -685,18 +695,20 @@ static const struct clk_hw *clk_parent_pll_audio[] = { }; /* - * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200 - * rates can be set exactly in conjunction with sigma-delta modulation. + * The PLL_AUDIO_4X clock defaults to 24.5714 MHz according to the manual, with + * a final divider of 1. The 2X and 1X clocks use 2 and 4 respectively. The 1x + * clock is set to either 24576000 or 22579200 for 48Khz and 44.1Khz (and + * multiples). */ static CLK_FIXED_FACTOR_HWS(pll_audio_1x_clk, "pll-audio-1x", clk_parent_pll_audio, - 96, 1, CLK_SET_RATE_PARENT); + 4, 1, CLK_SET_RATE_PARENT); static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", clk_parent_pll_audio, - 48, 1, CLK_SET_RATE_PARENT); + 2, 1, CLK_SET_RATE_PARENT); static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", clk_parent_pll_audio, - 24, 1, CLK_SET_RATE_PARENT); + 1, 1, CLK_SET_RATE_PARENT); static const struct clk_hw *pll_periph0_parents[] = { &pll_periph0_clk.common.hw @@ -990,7 +1002,7 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun50i_h616_ccu_resets[] = { +static const struct ccu_reset_map sun50i_h616_ccu_resets[] = { [RST_MBUS] = { 0x540, BIT(30) }, [RST_BUS_DE] = { 0x60c, BIT(16) }, @@ -1136,12 +1148,14 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev) } /* - * Force the post-divider of pll-audio to 12 and the output divider - * of it to 2, so 24576000 and 22579200 rates can be set exactly. + * Set the output-divider for the pll-audio clocks (M0) to 2 and the + * input divider (M1) to 1 as recommended by the manual when using + * SDM. */ val = readl(reg + SUN50I_H616_PLL_AUDIO_REG); - val &= ~(GENMASK(21, 16) | BIT(0)); - writel(val | (11 << 16) | BIT(0), reg + SUN50I_H616_PLL_AUDIO_REG); + val &= ~BIT(1); + val |= BIT(0); + writel(val, reg + SUN50I_H616_PLL_AUDIO_REG); /* * First clock parent (osc32K) is unusable for CEC. But since there diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 1f4bc0e773a7..c9bf1fdb8a8a 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -731,7 +731,7 @@ static struct clk_hw_onecell_data sun5i_a10s_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun5i_a10s_ccu_resets[] = { +static const struct ccu_reset_map sun5i_a10s_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index e8b8d2dd7f2c..c2ad1209633e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -1146,7 +1146,7 @@ static struct clk_hw_onecell_data sun6i_a31_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun6i_a31_ccu_resets[] = { +static const struct ccu_reset_map sun6i_a31_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_PHY2] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c index 87e23d16ed0f..724b202863a8 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c @@ -356,7 +356,7 @@ int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg) const char *fw_name; /* ext-osc32k was the only input clock in the old binding. */ - fw_name = of_property_read_bool(dev->of_node, "clock-names") + fw_name = of_property_present(dev->of_node, "clock-names") ? "ext-osc32k" : NULL; ext_osc32k_clk = devm_clk_get_optional(dev, fw_name); if (IS_ERR(ext_osc32k_clk)) diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c index 6c2a08f722a8..9433dbac038e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c @@ -668,7 +668,7 @@ static struct clk_hw_onecell_data sun8i_a23_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun8i_a23_ccu_resets[] = { +static const struct ccu_reset_map sun8i_a23_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_HSIC] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 5e0bc08a9ce3..1ffc5ab9bc3c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -712,7 +712,7 @@ static struct clk_hw_onecell_data sun8i_a33_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun8i_a33_ccu_resets[] = { +static const struct ccu_reset_map sun8i_a33_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_HSIC] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index cb4c6b16c467..a51fb2c10c94 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -797,7 +797,7 @@ static struct clk_hw_onecell_data sun8i_a83t_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun8i_a83t_ccu_resets[] = { +static const struct ccu_reset_map sun8i_a83t_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_HSIC] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c index 7683ea08d8e3..a742f83746d1 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c @@ -146,7 +146,7 @@ static struct clk_hw_onecell_data sun50i_a64_de2_hw_clks = { .num = CLK_NUMBER_WITH_ROT, }; -static struct ccu_reset_map sun8i_a83t_de2_resets[] = { +static const struct ccu_reset_map sun8i_a83t_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, /* * Mixer1 reset line is shared with wb, so only RST_WB is @@ -156,7 +156,7 @@ static struct ccu_reset_map sun8i_a83t_de2_resets[] = { [RST_ROT] = { 0x08, BIT(3) }, }; -static struct ccu_reset_map sun8i_h3_de2_resets[] = { +static const struct ccu_reset_map sun8i_h3_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, /* * Mixer1 reset line is shared with wb, so only RST_WB is @@ -166,14 +166,14 @@ static struct ccu_reset_map sun8i_h3_de2_resets[] = { [RST_WB] = { 0x08, BIT(2) }, }; -static struct ccu_reset_map sun50i_a64_de2_resets[] = { +static const struct ccu_reset_map sun50i_a64_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, [RST_MIXER1] = { 0x08, BIT(1) }, [RST_WB] = { 0x08, BIT(2) }, [RST_ROT] = { 0x08, BIT(3) }, }; -static struct ccu_reset_map sun50i_h5_de2_resets[] = { +static const struct ccu_reset_map sun50i_h5_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, [RST_MIXER1] = { 0x08, BIT(1) }, [RST_WB] = { 0x08, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 13e57db2f8d5..74da5d27af72 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -876,7 +876,7 @@ static struct clk_hw_onecell_data sun50i_h5_hw_clks = { .num = CLK_NUMBER_H5, }; -static struct ccu_reset_map sun8i_h3_ccu_resets[] = { +static const struct ccu_reset_map sun8i_h3_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_PHY2] = { 0x0cc, BIT(2) }, @@ -939,7 +939,7 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = { [RST_BUS_SCR0] = { 0x2d8, BIT(20) }, }; -static struct ccu_reset_map sun50i_h5_ccu_resets[] = { +static const struct ccu_reset_map sun50i_h5_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_PHY2] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index da6569334d68..2b3e094a32cb 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -178,7 +178,7 @@ static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = { +static const struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = { [RST_APB0_IR] = { 0xb0, BIT(1) }, [RST_APB0_TIMER] = { 0xb0, BIT(2) }, [RST_APB0_RSB] = { 0xb0, BIT(3) }, @@ -186,14 +186,14 @@ static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = { [RST_APB0_I2C] = { 0xb0, BIT(6) }, }; -static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = { +static const struct ccu_reset_map sun8i_h3_r_ccu_resets[] = { [RST_APB0_IR] = { 0xb0, BIT(1) }, [RST_APB0_TIMER] = { 0xb0, BIT(2) }, [RST_APB0_UART] = { 0xb0, BIT(4) }, [RST_APB0_I2C] = { 0xb0, BIT(6) }, }; -static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = { +static const struct ccu_reset_map sun50i_a64_r_ccu_resets[] = { [RST_APB0_IR] = { 0xb0, BIT(1) }, [RST_APB0_TIMER] = { 0xb0, BIT(2) }, [RST_APB0_RSB] = { 0xb0, BIT(3) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 2f51ceab8016..a374aeeca3f4 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -1162,7 +1162,7 @@ static struct clk_hw_onecell_data sun8i_r40_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun8i_r40_ccu_resets[] = { +static const struct ccu_reset_map sun8i_r40_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_USB_PHY1] = { 0x0cc, BIT(1) }, [RST_USB_PHY2] = { 0x0cc, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index d24c0d8dfee4..00d04f7ad94d 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -644,7 +644,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { .num = CLK_I2S0 + 1, }; -static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { +static const struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_MBUS] = { 0x0fc, BIT(31) }, @@ -679,7 +679,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_UART2] = { 0x2d8, BIT(18) }, }; -static struct ccu_reset_map sun8i_v3_ccu_resets[] = { +static const struct ccu_reset_map sun8i_v3_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_MBUS] = { 0x0fc, BIT(31) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c index 0975ac58949f..d561c15f5122 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c @@ -177,7 +177,7 @@ static struct clk_hw_onecell_data sun9i_a80_de_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun9i_a80_de_resets[] = { +static const struct ccu_reset_map sun9i_a80_de_resets[] = { [RST_FE0] = { 0x0c, BIT(0) }, [RST_FE1] = { 0x0c, BIT(1) }, [RST_FE2] = { 0x0c, BIT(2) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c index e5527c8cc64f..9e2b8d47fc54 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c @@ -68,7 +68,7 @@ static struct clk_hw_onecell_data sun9i_a80_usb_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun9i_a80_usb_resets[] = { +static const struct ccu_reset_map sun9i_a80_usb_resets[] = { [RST_USB0_HCI] = { 0x0, BIT(17) }, [RST_USB1_HCI] = { 0x0, BIT(18) }, [RST_USB2_HCI] = { 0x0, BIT(19) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c index 756dd8fca6b0..5da9a16b4ec7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c @@ -1108,7 +1108,7 @@ static struct clk_hw_onecell_data sun9i_a80_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map sun9i_a80_ccu_resets[] = { +static const struct ccu_reset_map sun9i_a80_ccu_resets[] = { /* AHB0 reset controls */ [RST_BUS_FD] = { 0x5a0, BIT(0) }, [RST_BUS_VE] = { 0x5a0, BIT(1) }, diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c index 52f1a04269f8..fb37c0fc4fde 100644 --- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c +++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c @@ -477,7 +477,7 @@ static struct clk_hw_onecell_data suniv_hw_clks = { .num = CLK_NUMBER, }; -static struct ccu_reset_map suniv_ccu_resets[] = { +static const struct ccu_reset_map suniv_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, [RST_BUS_DMA] = { 0x2c0, BIT(6) }, diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h index 329734f8cf42..dd330426a6e5 100644 --- a/drivers/clk/sunxi-ng/ccu_common.h +++ b/drivers/clk/sunxi-ng/ccu_common.h @@ -50,7 +50,7 @@ struct sunxi_ccu_desc { struct clk_hw_onecell_data *hw_clks; - struct ccu_reset_map *resets; + const struct ccu_reset_map *resets; unsigned long num_resets; }; diff --git a/drivers/clk/sunxi-ng/ccu_reset.h b/drivers/clk/sunxi-ng/ccu_reset.h index e9b973cae4af..941276a8ec2e 100644 --- a/drivers/clk/sunxi-ng/ccu_reset.h +++ b/drivers/clk/sunxi-ng/ccu_reset.h @@ -17,7 +17,7 @@ struct ccu_reset_map { struct ccu_reset { void __iomem *base; - struct ccu_reset_map *reset_map; + const struct ccu_reset_map *reset_map; spinlock_t *lock; struct reset_controller_dev rcdev; diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 7bfba0afd778..b2323cb8eddc 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -174,7 +174,7 @@ static int tegra_bpmp_clk_determine_rate(struct clk_hw *hw, unsigned long rate; int err; - rate = min(max(rate_req->rate, rate_req->min_rate), rate_req->max_rate); + rate = clamp(rate_req->rate, rate_req->min_rate, rate_req->max_rate); memset(&request, 0, sizeof(request)); request.rate = min_t(u64, rate, S64_MAX); diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c index 7a0269bdfbb3..b2233d3ff9a9 100644 --- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c +++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c @@ -17,6 +17,7 @@ #include <linux/of.h> #include <linux/math64.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/err.h> #include <linux/iopoll.h> @@ -121,26 +122,24 @@ enum clk_wzrd_int_clks { /** * struct clk_wzrd - Clock wizard private data structure * - * @clk_data: Clock data * @nb: Notifier block * @base: Memory base * @clk_in1: Handle to input clock 'clk_in1' * @axi_clk: Handle to input clock 's_axi_aclk' * @clks_internal: Internal clocks - * @clkout: Output clocks * @speed_grade: Speed grade of the device * @suspended: Flag indicating power state of the device + * @clk_data: Output clock data */ struct clk_wzrd { - struct clk_onecell_data clk_data; struct notifier_block nb; void __iomem *base; struct clk *clk_in1; struct clk *axi_clk; - struct clk *clks_internal[wzrd_clk_int_max]; - struct clk *clkout[WZRD_NUM_OUTPUTS]; + struct clk_hw *clks_internal[wzrd_clk_int_max]; unsigned int speed_grade; bool suspended; + struct clk_hw_onecell_data clk_data; }; /** @@ -765,7 +764,7 @@ static const struct clk_ops clk_wzrd_clk_divider_ops_f = { .recalc_rate = clk_wzrd_recalc_ratef, }; -static struct clk *clk_wzrd_register_divf(struct device *dev, +static struct clk_hw *clk_wzrd_register_divf(struct device *dev, const char *name, const char *parent_name, unsigned long flags, @@ -805,10 +804,10 @@ static struct clk *clk_wzrd_register_divf(struct device *dev, if (ret) return ERR_PTR(ret); - return hw->clk; + return hw; } -static struct clk *clk_wzrd_ver_register_divider(struct device *dev, +static struct clk_hw *clk_wzrd_ver_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, @@ -852,10 +851,10 @@ static struct clk *clk_wzrd_ver_register_divider(struct device *dev, if (ret) return ERR_PTR(ret); - return hw->clk; + return hw; } -static struct clk *clk_wzrd_register_divider(struct device *dev, +static struct clk_hw *clk_wzrd_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, @@ -898,7 +897,7 @@ static struct clk *clk_wzrd_register_divider(struct device *dev, if (ret) return ERR_PTR(ret); - return hw->clk; + return hw; } static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event, @@ -963,81 +962,30 @@ static const struct versal_clk_data versal_data = { .is_versal = true, }; -static int clk_wzrd_probe(struct platform_device *pdev) +static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs) { const char *clkout_name, *clk_name, *clk_mul_name; + struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev); u32 regl, regh, edge, regld, reghd, edged, div; - struct device_node *np = pdev->dev.of_node; const struct versal_clk_data *data; - struct clk_wzrd *clk_wzrd; unsigned long flags = 0; + bool is_versal = false; void __iomem *ctrl_reg; u32 reg, reg_f, mult; - bool is_versal = false; - unsigned long rate; - int nr_outputs; - int i, ret; - - clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL); - if (!clk_wzrd) - return -ENOMEM; - platform_set_drvdata(pdev, clk_wzrd); - - clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(clk_wzrd->base)) - return PTR_ERR(clk_wzrd->base); - - ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade); - if (!ret) { - if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) { - dev_warn(&pdev->dev, "invalid speed grade '%d'\n", - clk_wzrd->speed_grade); - clk_wzrd->speed_grade = 0; - } - } - - clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1"); - if (IS_ERR(clk_wzrd->clk_in1)) - return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1), - "clk_in1 not found\n"); - - clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); - if (IS_ERR(clk_wzrd->axi_clk)) - return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk), - "s_axi_aclk not found\n"); - ret = clk_prepare_enable(clk_wzrd->axi_clk); - if (ret) { - dev_err(&pdev->dev, "enabling s_axi_aclk failed\n"); - return ret; - } - rate = clk_get_rate(clk_wzrd->axi_clk); - if (rate > WZRD_ACLK_MAX_FREQ) { - dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", - rate); - ret = -EINVAL; - goto err_disable_clk; - } + int i; - data = device_get_match_data(&pdev->dev); + data = device_get_match_data(dev); if (data) is_versal = data->is_versal; - ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs); - if (ret || nr_outputs > WZRD_NUM_OUTPUTS) { - ret = -EINVAL; - goto err_disable_clk; - } - - clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_out0", dev_name(&pdev->dev)); - if (!clkout_name) { - ret = -ENOMEM; - goto err_disable_clk; - } + clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out0", dev_name(dev)); + if (!clkout_name) + return -ENOMEM; if (is_versal) { if (nr_outputs == 1) { - clk_wzrd->clkout[0] = clk_wzrd_ver_register_divider - (&pdev->dev, clkout_name, + clk_wzrd->clk_data.hws[0] = clk_wzrd_ver_register_divider + (dev, clkout_name, __clk_get_name(clk_wzrd->clk_in1), 0, clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3), WZRD_CLKOUT_DIVIDE_SHIFT, @@ -1045,7 +993,7 @@ static int clk_wzrd_probe(struct platform_device *pdev) CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, DIV_ALL, &clkwzrd_lock); - goto out; + return 0; } /* register multiplier */ edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) & @@ -1069,8 +1017,8 @@ static int clk_wzrd_probe(struct platform_device *pdev) div = 64; } else { if (nr_outputs == 1) { - clk_wzrd->clkout[0] = clk_wzrd_register_divider - (&pdev->dev, clkout_name, + clk_wzrd->clk_data.hws[0] = clk_wzrd_register_divider + (dev, clkout_name, __clk_get_name(clk_wzrd->clk_in1), 0, clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3), WZRD_CLKOUT_DIVIDE_SHIFT, @@ -1078,7 +1026,7 @@ static int clk_wzrd_probe(struct platform_device *pdev) CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, DIV_ALL, &clkwzrd_lock); - goto out; + return 0; } reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)); reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK; @@ -1089,26 +1037,21 @@ static int clk_wzrd_probe(struct platform_device *pdev) mult = (reg * 1000) + reg_f; div = 1000; } - clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul", dev_name(&pdev->dev)); - if (!clk_name) { - ret = -ENOMEM; - goto err_disable_clk; - } - clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor - (&pdev->dev, clk_name, + clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul", dev_name(dev)); + if (!clk_name) + return -ENOMEM; + clk_wzrd->clks_internal[wzrd_clk_mul] = devm_clk_hw_register_fixed_factor + (dev, clk_name, __clk_get_name(clk_wzrd->clk_in1), 0, mult, div); if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) { - dev_err(&pdev->dev, "unable to register fixed-factor clock\n"); - ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]); - goto err_disable_clk; + dev_err(dev, "unable to register fixed-factor clock\n"); + return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]); } - clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev)); - if (!clk_name) { - ret = -ENOMEM; - goto err_rm_int_clk; - } + clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul_div", dev_name(dev)); + if (!clk_name) + return -ENOMEM; if (is_versal) { edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) & @@ -1121,36 +1064,31 @@ static int clk_wzrd_probe(struct platform_device *pdev) if (!div) div = 1; - clk_mul_name = __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]); + clk_mul_name = clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]); clk_wzrd->clks_internal[wzrd_clk_mul_div] = - clk_register_fixed_factor(&pdev->dev, clk_name, - clk_mul_name, 0, 1, div); + devm_clk_hw_register_fixed_factor(dev, clk_name, clk_mul_name, 0, 1, div); } else { ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0); - clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider - (&pdev->dev, clk_name, - __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]), + clk_wzrd->clks_internal[wzrd_clk_mul_div] = devm_clk_hw_register_divider + (dev, clk_name, + clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]), flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock); } if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) { - dev_err(&pdev->dev, "unable to register divider clock\n"); - ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]); - goto err_rm_int_clk; + dev_err(dev, "unable to register divider clock\n"); + return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]); } /* register div per output */ for (i = nr_outputs - 1; i >= 0 ; i--) { - clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "%s_out%d", dev_name(&pdev->dev), i); - if (!clkout_name) { - ret = -ENOMEM; - goto err_rm_int_clk; - } + clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%d", dev_name(dev), i); + if (!clkout_name) + return -ENOMEM; if (is_versal) { - clk_wzrd->clkout[i] = clk_wzrd_ver_register_divider - (&pdev->dev, + clk_wzrd->clk_data.hws[i] = clk_wzrd_ver_register_divider + (dev, clkout_name, clk_name, 0, clk_wzrd->base, (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8), @@ -1161,84 +1099,108 @@ static int clk_wzrd_probe(struct platform_device *pdev) DIV_O, &clkwzrd_lock); } else { if (!i) - clk_wzrd->clkout[i] = clk_wzrd_register_divf - (&pdev->dev, clkout_name, clk_name, flags, clk_wzrd->base, + clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divf + (dev, clkout_name, clk_name, flags, clk_wzrd->base, (WZRD_CLK_CFG_REG(is_versal, 2) + i * 12), WZRD_CLKOUT_DIVIDE_SHIFT, WZRD_CLKOUT_DIVIDE_WIDTH, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, DIV_O, &clkwzrd_lock); else - clk_wzrd->clkout[i] = clk_wzrd_register_divider - (&pdev->dev, clkout_name, clk_name, 0, clk_wzrd->base, + clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divider + (dev, clkout_name, clk_name, 0, clk_wzrd->base, (WZRD_CLK_CFG_REG(is_versal, 2) + i * 12), WZRD_CLKOUT_DIVIDE_SHIFT, WZRD_CLKOUT_DIVIDE_WIDTH, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, DIV_O, &clkwzrd_lock); } - if (IS_ERR(clk_wzrd->clkout[i])) { - int j; - - for (j = i + 1; j < nr_outputs; j++) - clk_unregister(clk_wzrd->clkout[j]); - dev_err(&pdev->dev, - "unable to register divider clock\n"); - ret = PTR_ERR(clk_wzrd->clkout[i]); - goto err_rm_int_clks; + if (IS_ERR(clk_wzrd->clk_data.hws[i])) { + dev_err(dev, "unable to register divider clock\n"); + return PTR_ERR(clk_wzrd->clk_data.hws[i]); } } -out: - clk_wzrd->clk_data.clks = clk_wzrd->clkout; - clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data); + return 0; +} - if (clk_wzrd->speed_grade) { - clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier; +static int clk_wzrd_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct clk_wzrd *clk_wzrd; + unsigned long rate; + int nr_outputs; + int ret; - ret = clk_notifier_register(clk_wzrd->clk_in1, - &clk_wzrd->nb); - if (ret) - dev_warn(&pdev->dev, - "unable to register clock notifier\n"); + ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs); + if (ret || nr_outputs > WZRD_NUM_OUTPUTS) + return -EINVAL; - ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb); - if (ret) - dev_warn(&pdev->dev, - "unable to register clock notifier\n"); - } + clk_wzrd = devm_kzalloc(&pdev->dev, struct_size(clk_wzrd, clk_data.hws, nr_outputs), + GFP_KERNEL); + if (!clk_wzrd) + return -ENOMEM; + platform_set_drvdata(pdev, clk_wzrd); - return 0; + clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(clk_wzrd->base)) + return PTR_ERR(clk_wzrd->base); -err_rm_int_clks: - clk_unregister(clk_wzrd->clks_internal[1]); -err_rm_int_clk: - clk_unregister(clk_wzrd->clks_internal[0]); -err_disable_clk: - clk_disable_unprepare(clk_wzrd->axi_clk); + clk_wzrd->axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); + if (IS_ERR(clk_wzrd->axi_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk), + "s_axi_aclk not found\n"); + rate = clk_get_rate(clk_wzrd->axi_clk); + if (rate > WZRD_ACLK_MAX_FREQ) { + dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate); + return -EINVAL; + } - return ret; -} + if (!of_property_present(np, "xlnx,static-config")) { + ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade); + if (!ret) { + if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) { + dev_warn(&pdev->dev, "invalid speed grade '%d'\n", + clk_wzrd->speed_grade); + clk_wzrd->speed_grade = 0; + } + } -static void clk_wzrd_remove(struct platform_device *pdev) -{ - int i; - struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev); + clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1"); + if (IS_ERR(clk_wzrd->clk_in1)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1), + "clk_in1 not found\n"); + + ret = clk_wzrd_register_output_clocks(&pdev->dev, nr_outputs); + if (ret) + return ret; + + clk_wzrd->clk_data.num = nr_outputs; + ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get, + &clk_wzrd->clk_data); + if (ret) { + dev_err(&pdev->dev, "unable to register clock provider\n"); + return ret; + } - of_clk_del_provider(pdev->dev.of_node); + if (clk_wzrd->speed_grade) { + clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier; - for (i = 0; i < WZRD_NUM_OUTPUTS; i++) - clk_unregister(clk_wzrd->clkout[i]); - for (i = 0; i < wzrd_clk_int_max; i++) - clk_unregister(clk_wzrd->clks_internal[i]); + ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->clk_in1, + &clk_wzrd->nb); + if (ret) + dev_warn(&pdev->dev, + "unable to register clock notifier\n"); - if (clk_wzrd->speed_grade) { - clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb); - clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb); + ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->axi_clk, + &clk_wzrd->nb); + if (ret) + dev_warn(&pdev->dev, + "unable to register clock notifier\n"); + } } - clk_disable_unprepare(clk_wzrd->axi_clk); + return 0; } static const struct of_device_id clk_wzrd_ids[] = { @@ -1257,7 +1219,6 @@ static struct platform_driver clk_wzrd_driver = { .pm = &clk_wzrd_dev_pm_ops, }, .probe = clk_wzrd_probe, - .remove = clk_wzrd_remove, }; module_platform_driver(clk_wzrd_driver); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2561b215432a..92a83a9bb2e1 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -217,6 +217,20 @@ config CPUFREQ_DT If in doubt, say N. +config CPUFREQ_VIRT + tristate "Virtual cpufreq driver" + depends on GENERIC_ARCH_TOPOLOGY + help + This adds a virtualized cpufreq driver for guest kernels that + read/writes to a MMIO region for a virtualized cpufreq device to + communicate with the host. It sends performance requests to the host + which gets used as a hint to schedule vCPU threads and select CPU + frequency. If a VM does not support a virtualized FIE such as AMUs, + it updates the frequency scaling factor by polling host CPU frequency + to enable accurate Per-Entity Load Tracking for tasks running in the guest. + + If in doubt, say N. + config CPUFREQ_DT_PLATDEV tristate "Generic DT based cpufreq platdev driver" depends on OF diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 58151ca56695..eb678fa5260a 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -17,13 +17,6 @@ config CPU_FREQ_CBE_PMI frequencies. Using PMI, the processor will not only be able to run at lower speed, but also at lower core voltage. -config CPU_FREQ_MAPLE - bool "Support for Maple 970FX Evaluation Board" - depends on PPC_MAPLE - help - This adds support for frequency switching on Maple 970FX - Evaluation Board and compatible boards (IBM JS2x blades). - config CPU_FREQ_PMAC bool "Support for Apple PowerBooks" depends on ADB_PMU && PPC32 diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 0f184031dd12..d35a28dd9463 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o +obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o # Traces CFLAGS_amd-pstate-trace.o := -I$(src) @@ -92,7 +93,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o -obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 5d03a295a085..2fd0f6be6fa3 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -474,8 +474,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv) rc = brcm_avs_get_pmap(priv, NULL); magic = readl(priv->base + AVS_MBOX_MAGIC); - return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) || - (rc != -EINVAL)); + return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) && + (rc != -EINVAL); } static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 2b8708475ac7..bd8f75accfa0 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -36,33 +36,15 @@ static LIST_HEAD(cpu_data_list); static bool boost_supported; -struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE + 1]; - char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; - u32 oem_revision; -}; - -static struct cppc_workaround_oem_info wa_info[] = { - { - .oem_id = "HISI ", - .oem_table_id = "HIP07 ", - .oem_revision = 0, - }, { - .oem_id = "HISI ", - .oem_table_id = "HIP08 ", - .oem_revision = 0, - } -}; - static struct cpufreq_driver cppc_cpufreq_driver; +#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE static enum { FIE_UNSET = -1, FIE_ENABLED, FIE_DISABLED } fie_disabled = FIE_UNSET; -#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE module_param(fie_disabled, int, 0444); MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)"); @@ -78,7 +60,6 @@ struct cppc_freq_invariance { static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); static struct kthread_worker *kworker_fie; -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, struct cppc_perf_fb_ctrs *fb_ctrs_t0, struct cppc_perf_fb_ctrs *fb_ctrs_t1); @@ -118,6 +99,9 @@ static void cppc_scale_freq_workfn(struct kthread_work *work) perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, &fb_ctrs); + if (!perf) + return; + cppc_fi->prev_perf_fb_ctrs = fb_ctrs; perf <<= SCHED_CAPACITY_SHIFT; @@ -420,6 +404,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev, struct cppc_cpudata *cpu_data; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -487,6 +474,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, int step; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -724,13 +714,31 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, delta_delivered = get_delta(fb_ctrs_t1->delivered, fb_ctrs_t0->delivered); - /* Check to avoid divide-by zero and invalid delivered_perf */ + /* + * Avoid divide-by zero and unchanged feedback counters. + * Leave it for callers to handle. + */ if (!delta_reference || !delta_delivered) - return cpu_data->perf_ctrls.desired_perf; + return 0; return (reference_perf * delta_delivered) / delta_reference; } +static int cppc_get_perf_ctrs_sample(int cpu, + struct cppc_perf_fb_ctrs *fb_ctrs_t0, + struct cppc_perf_fb_ctrs *fb_ctrs_t1) +{ + int ret; + + ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0); + if (ret) + return ret; + + udelay(2); /* 2usec delay between sampling */ + + return cppc_get_perf_ctrs(cpu, fb_ctrs_t1); +} + static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) { struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; @@ -746,18 +754,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) cpufreq_cpu_put(policy); - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); - if (ret) - return 0; - - udelay(2); /* 2usec delay between sampling */ - - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); - if (ret) - return 0; + ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1); + if (ret) { + if (ret == -EFAULT) + /* Any of the associated CPPC regs is 0. */ + goto out_invalid_counters; + else + return 0; + } delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, &fb_ctrs_t1); + if (!delivered_perf) + goto out_invalid_counters; + + return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); + +out_invalid_counters: + /* + * Feedback counters could be unchanged or 0 when a cpu enters a + * low-power idle state, e.g. clock-gated or power-gated. + * Use desired perf for reflecting frequency. Get the latest register + * value first as some platforms may update the actual delivered perf + * there; if failed, resort to the cached desired perf. + */ + if (cppc_get_desired_perf(cpu, &delivered_perf)) + delivered_perf = cpu_data->perf_ctrls.desired_perf; return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); } @@ -812,57 +834,6 @@ static struct cpufreq_driver cppc_cpufreq_driver = { .name = "cppc_cpufreq", }; -/* - * HISI platform does not support delivered performance counter and - * reference performance counter. It can calculate the performance using the - * platform specific mechanism. We reuse the desired performance register to - * store the real performance calculated by the platform. - */ -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data; - u64 desired_perf; - int ret; - - if (!policy) - return -ENODEV; - - cpu_data = policy->driver_data; - - cpufreq_cpu_put(policy); - - ret = cppc_get_desired_perf(cpu, &desired_perf); - if (ret < 0) - return -EIO; - - return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf); -} - -static void cppc_check_hisi_workaround(void) -{ - struct acpi_table_header *tbl; - acpi_status status = AE_OK; - int i; - - status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); - if (ACPI_FAILURE(status) || !tbl) - return; - - for (i = 0; i < ARRAY_SIZE(wa_info); i++) { - if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && - !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) { - /* Overwrite the get() callback */ - cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; - fie_disabled = FIE_DISABLED; - break; - } - } - - acpi_put_table(tbl); -} - static int __init cppc_cpufreq_init(void) { int ret; @@ -870,7 +841,6 @@ static int __init cppc_cpufreq_init(void) if (!acpi_cpc_valid()) return -ENODEV; - cppc_check_hisi_workaround(); cppc_freq_invariance_init(); populate_efficiency_class(); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 18942bfe9c95..2a3e8bd317c9 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,7 @@ static const struct of_device_id allowlist[] __initconst = { * platforms using "operating-points-v2" property. */ static const struct of_device_id blocklist[] __initconst = { + { .compatible = "allwinner,sun50i-a100" }, { .compatible = "allwinner,sun50i-h6", }, { .compatible = "allwinner,sun50i-h616", }, { .compatible = "allwinner,sun50i-h618", }, diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 6a8e97896d38..ed1a6dbad638 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -148,7 +148,9 @@ static int __init cpufreq_init(void) ret = cpufreq_register_driver(&loongson2_cpufreq_driver); - if (!ret && !nowait) { + if (ret) { + platform_driver_unregister(&platform_driver); + } else if (!nowait) { saved_cpu_wait = cpu_wait; cpu_wait = loongson2_cpu_wait; } diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c index 61ebebf69455..bd34bf0fafa5 100644 --- a/drivers/cpufreq/loongson3_cpufreq.c +++ b/drivers/cpufreq/loongson3_cpufreq.c @@ -346,8 +346,11 @@ static int loongson3_cpufreq_probe(struct platform_device *pdev) { int i, ret; - for (i = 0; i < MAX_PACKAGES; i++) - devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]); + for (i = 0; i < MAX_PACKAGES; i++) { + ret = devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]); + if (ret) + return ret; + } ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0); if (ret <= 0) diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c deleted file mode 100644 index 690da85c4865..000000000000 --- a/drivers/cpufreq/maple-cpufreq.c +++ /dev/null @@ -1,242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2011 Dmitry Eremin-Solenikov - * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> - * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de> - * - * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, - * that is iMac G5 and latest single CPU desktop. - */ - -#undef DEBUG - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/completion.h> -#include <linux/mutex.h> -#include <linux/time.h> -#include <linux/of.h> - -#define DBG(fmt...) pr_debug(fmt) - -/* see 970FX user manual */ - -#define SCOM_PCR 0x0aa001 /* PCR scom addr */ - -#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ -#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ -#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ -#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ -#define PCR_SPEED_MASK 0x000e0000U /* speed mask */ -#define PCR_SPEED_SHIFT 17 -#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ -#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ -#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ -#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ -#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ -#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ - -#define SCOM_PSR 0x408001 /* PSR scom addr */ -/* warning: PSR is a 64 bits register */ -#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ -#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ -#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ -#define PSR_CUR_SPEED_SHIFT (56) - -/* - * The G5 only supports two frequencies (Quarter speed is not supported) - */ -#define CPUFREQ_HIGH 0 -#define CPUFREQ_LOW 1 - -static struct cpufreq_frequency_table maple_cpu_freqs[] = { - {0, CPUFREQ_HIGH, 0}, - {0, CPUFREQ_LOW, 0}, - {0, 0, CPUFREQ_TABLE_END}, -}; - -/* Power mode data is an array of the 32 bits PCR values to use for - * the various frequencies, retrieved from the device-tree - */ -static int maple_pmode_cur; - -static const u32 *maple_pmode_data; -static int maple_pmode_max; - -/* - * SCOM based frequency switching for 970FX rev3 - */ -static int maple_scom_switch_freq(int speed_mode) -{ - unsigned long flags; - int to; - - local_irq_save(flags); - - /* Clear PCR high */ - scom970_write(SCOM_PCR, 0); - /* Clear PCR low */ - scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); - /* Set PCR low */ - scom970_write(SCOM_PCR, PCR_HILO_SELECT | - maple_pmode_data[speed_mode]); - - /* Wait for completion */ - for (to = 0; to < 10; to++) { - unsigned long psr = scom970_read(SCOM_PSR); - - if ((psr & PSR_CMD_RECEIVED) == 0 && - (((psr >> PSR_CUR_SPEED_SHIFT) ^ - (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) - == 0) - break; - if (psr & PSR_CMD_COMPLETED) - break; - udelay(100); - } - - local_irq_restore(flags); - - maple_pmode_cur = speed_mode; - ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul; - - return 0; -} - -static int maple_scom_query_freq(void) -{ - unsigned long psr = scom970_read(SCOM_PSR); - int i; - - for (i = 0; i <= maple_pmode_max; i++) - if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ - (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) - break; - return i; -} - -/* - * Common interface to the cpufreq core - */ - -static int maple_cpufreq_target(struct cpufreq_policy *policy, - unsigned int index) -{ - return maple_scom_switch_freq(index); -} - -static unsigned int maple_cpufreq_get_speed(unsigned int cpu) -{ - return maple_cpu_freqs[maple_pmode_cur].frequency; -} - -static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - cpufreq_generic_init(policy, maple_cpu_freqs, 12000); - return 0; -} - -static struct cpufreq_driver maple_cpufreq_driver = { - .name = "maple", - .flags = CPUFREQ_CONST_LOOPS, - .init = maple_cpufreq_cpu_init, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = maple_cpufreq_target, - .get = maple_cpufreq_get_speed, - .attr = cpufreq_generic_attr, -}; - -static int __init maple_cpufreq_init(void) -{ - struct device_node *cpunode; - unsigned int psize; - unsigned long max_freq; - const u32 *valp; - u32 pvr_hi; - int rc = -ENODEV; - - /* - * Behave here like powermac driver which checks machine compatibility - * to ease merging of two drivers in future. - */ - if (!of_machine_is_compatible("Momentum,Maple") && - !of_machine_is_compatible("Momentum,Apache")) - return 0; - - /* Get first CPU node */ - cpunode = of_cpu_device_node_get(0); - if (cpunode == NULL) { - pr_err("Can't find any CPU 0 node\n"); - goto bail_noprops; - } - - /* Check 970FX for now */ - /* we actually don't care on which CPU to access PVR */ - pvr_hi = PVR_VER(mfspr(SPRN_PVR)); - if (pvr_hi != 0x3c && pvr_hi != 0x44) { - pr_err("Unsupported CPU version (%x)\n", pvr_hi); - goto bail_noprops; - } - - /* Look for the powertune data in the device-tree */ - /* - * On Maple this property is provided by PIBS in dual-processor config, - * not provided by PIBS in CPU0 config and also not provided by SLOF, - * so YMMV - */ - maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize); - if (!maple_pmode_data) { - DBG("No power-mode-data !\n"); - goto bail_noprops; - } - maple_pmode_max = psize / sizeof(u32) - 1; - - /* - * From what I see, clock-frequency is always the maximal frequency. - * The current driver can not slew sysclk yet, so we really only deal - * with powertune steps for now. We also only implement full freq and - * half freq in this version. So far, I haven't yet seen a machine - * supporting anything else. - */ - valp = of_get_property(cpunode, "clock-frequency", NULL); - if (!valp) - goto bail_noprops; - max_freq = (*valp)/1000; - maple_cpu_freqs[0].frequency = max_freq; - maple_cpu_freqs[1].frequency = max_freq/2; - - /* Force apply current frequency to make sure everything is in - * sync (voltage is right for example). Firmware may leave us with - * a strange setting ... - */ - msleep(10); - maple_pmode_cur = -1; - maple_scom_switch_freq(maple_scom_query_freq()); - - pr_info("Registering Maple CPU frequency driver\n"); - pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", - maple_cpu_freqs[1].frequency/1000, - maple_cpu_freqs[0].frequency/1000, - maple_cpu_freqs[maple_pmode_cur].frequency/1000); - - rc = cpufreq_register_driver(&maple_cpufreq_driver); - -bail_noprops: - of_node_put(cpunode); - - return rc; -} - -module_init(maple_cpufreq_init); - - -MODULE_DESCRIPTION("cpufreq driver for Maple 970FX/970MP boards"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index f7db5f4ad306..9252ebd60373 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -62,7 +62,7 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, policy = cpufreq_cpu_get_raw(cpu_dev->id); if (!policy) - return 0; + return -EINVAL; data = policy->driver_data; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 5892c73e129d..07d6f9a9b7c8 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -287,7 +287,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) ret = cpufreq_enable_boost_support(); if (ret) { dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); - goto out_free_opp; + goto out_free_table; } else { scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; scmi_cpufreq_driver.boost_enabled = true; @@ -296,6 +296,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) return 0; +out_free_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_opp: dev_pm_opp_remove_all_dynamic(cpu_dev); diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 352e1a69a85e..17d6a149f580 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -22,6 +22,9 @@ #define NVMEM_MASK 0x7 #define NVMEM_SHIFT 5 +#define SUN50I_A100_NVMEM_MASK 0xf +#define SUN50I_A100_NVMEM_SHIFT 12 + static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev; struct sunxi_cpufreq_data { @@ -45,6 +48,23 @@ static u32 sun50i_h6_efuse_xlate(u32 speedbin) return 0; } +static u32 sun50i_a100_efuse_xlate(u32 speedbin) +{ + u32 efuse_value; + + efuse_value = (speedbin >> SUN50I_A100_NVMEM_SHIFT) & + SUN50I_A100_NVMEM_MASK; + + switch (efuse_value) { + case 0b100: + return 2; + case 0b010: + return 1; + default: + return 0; + } +} + static int get_soc_id_revision(void) { #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY @@ -108,6 +128,10 @@ static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = { .efuse_xlate = sun50i_h6_efuse_xlate, }; +static struct sunxi_cpufreq_data sun50i_a100_cpufreq_data = { + .efuse_xlate = sun50i_a100_efuse_xlate, +}; + static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = { .efuse_xlate = sun50i_h616_efuse_xlate, }; @@ -116,6 +140,9 @@ static const struct of_device_id cpu_opp_match_list[] = { { .compatible = "allwinner,sun50i-h6-operating-points", .data = &sun50i_h6_cpufreq_data, }, + { .compatible = "allwinner,sun50i-a100-operating-points", + .data = &sun50i_a100_cpufreq_data, + }, { .compatible = "allwinner,sun50i-h616-operating-points", .data = &sun50i_h616_cpufreq_data, }, @@ -291,6 +318,7 @@ static struct platform_driver sun50i_cpufreq_driver = { static const struct of_device_id sun50i_cpufreq_match_list[] = { { .compatible = "allwinner,sun50i-h6" }, + { .compatible = "allwinner,sun50i-a100" }, { .compatible = "allwinner,sun50i-h616" }, { .compatible = "allwinner,sun50i-h618" }, { .compatible = "allwinner,sun50i-h700" }, diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index ba621ce1cdda..5a5147277cd0 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -93,6 +93,8 @@ struct ti_cpufreq_soc_data { bool multi_regulator; /* Backward compatibility hack: Might have missing syscon */ #define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1 +/* Backward compatibility hack: new syscon size is 1 register wide */ +#define TI_QUIRK_SYSCON_IS_SINGLE_REG 0x2 u8 quirks; }; @@ -316,8 +318,8 @@ static struct ti_cpufreq_soc_data am625_soc_data = { .efuse_offset = 0x0018, .efuse_mask = 0x07c0, .efuse_shift = 0x6, - .rev_offset = 0x0014, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_IS_SINGLE_REG, }; static struct ti_cpufreq_soc_data am62a7_soc_data = { @@ -325,7 +327,6 @@ static struct ti_cpufreq_soc_data am62a7_soc_data = { .efuse_offset = 0x0, .efuse_mask = 0x07c0, .efuse_shift = 0x6, - .rev_offset = 0x0014, .multi_regulator = false, }; @@ -334,7 +335,6 @@ static struct ti_cpufreq_soc_data am62p5_soc_data = { .efuse_offset = 0x0, .efuse_mask = 0x07c0, .efuse_shift = 0x6, - .rev_offset = 0x0014, .multi_regulator = false, }; @@ -354,6 +354,10 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data, ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, &efuse); + + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_IS_SINGLE_REG && ret == -EIO) + ret = regmap_read(opp_data->syscon, 0x0, &efuse); + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c new file mode 100644 index 000000000000..a050b3a6737f --- /dev/null +++ b/drivers/cpufreq/virtual-cpufreq.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Google LLC + */ + +#include <linux/arch_topology.h> +#include <linux/cpufreq.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * CPU0..CPUn + * +-------------+-------------------------------+--------+-------+ + * | Register | Description | Offset | Len | + * +-------------+-------------------------------+--------+-------+ + * | cur_perf | read this register to get | 0x0 | 0x4 | + * | | the current perf (integer val | | | + * | | representing perf relative to | | | + * | | max performance) | | | + * | | that vCPU is running at | | | + * +-------------+-------------------------------+--------+-------+ + * | set_perf | write to this register to set | 0x4 | 0x4 | + * | | perf value of the vCPU | | | + * +-------------+-------------------------------+--------+-------+ + * | perftbl_len | number of entries in perf | 0x8 | 0x4 | + * | | table. A single entry in the | | | + * | | perf table denotes no table | | | + * | | and the entry contains | | | + * | | the maximum perf value | | | + * | | that this vCPU supports. | | | + * | | The guest can request any | | | + * | | value between 1 and max perf | | | + * | | when perftbls are not used. | | | + * +---------------------------------------------+--------+-------+ + * | perftbl_sel | write to this register to | 0xc | 0x4 | + * | | select perf table entry to | | | + * | | read from | | | + * +---------------------------------------------+--------+-------+ + * | perftbl_rd | read this register to get | 0x10 | 0x4 | + * | | perf value of the selected | | | + * | | entry based on perftbl_sel | | | + * +---------------------------------------------+--------+-------+ + * | perf_domain | performance domain number | 0x14 | 0x4 | + * | | that this vCPU belongs to. | | | + * | | vCPUs sharing the same perf | | | + * | | domain number are part of the | | | + * | | same performance domain. | | | + * +-------------+-------------------------------+--------+-------+ + */ + +#define REG_CUR_PERF_STATE_OFFSET 0x0 +#define REG_SET_PERF_STATE_OFFSET 0x4 +#define REG_PERFTBL_LEN_OFFSET 0x8 +#define REG_PERFTBL_SEL_OFFSET 0xc +#define REG_PERFTBL_RD_OFFSET 0x10 +#define REG_PERF_DOMAIN_OFFSET 0x14 +#define PER_CPU_OFFSET 0x1000 + +#define PERFTBL_MAX_ENTRIES 64U + +static void __iomem *base; +static DEFINE_PER_CPU(u32, perftbl_num_entries); + +static void virt_scale_freq_tick(void) +{ + int cpu = smp_processor_id(); + u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu); + u64 cur_freq; + unsigned long scale; + + cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET + + REG_CUR_PERF_STATE_OFFSET); + + cur_freq <<= SCHED_CAPACITY_SHIFT; + scale = (unsigned long)div_u64(cur_freq, max_freq); + scale = min(scale, SCHED_CAPACITY_SCALE); + + this_cpu_write(arch_freq_scale, scale); +} + +static struct scale_freq_data virt_sfd = { + .source = SCALE_FREQ_SOURCE_VIRT, + .set_freq_scale = virt_scale_freq_tick, +}; + +static unsigned int virt_cpufreq_set_perf(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + writel_relaxed(target_freq, + base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET); + return 0; +} + +static unsigned int virt_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + virt_cpufreq_set_perf(policy, target_freq); + return target_freq; +} + +static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx) +{ + writel_relaxed(idx, base + cpu * PER_CPU_OFFSET + + REG_PERFTBL_SEL_OFFSET); + return readl_relaxed(base + cpu * PER_CPU_OFFSET + + REG_PERFTBL_RD_OFFSET); +} + +static int virt_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_freqs freqs; + int ret = 0; + + freqs.old = policy->cur; + freqs.new = target_freq; + + cpufreq_freq_transition_begin(policy, &freqs); + ret = virt_cpufreq_set_perf(policy, target_freq); + cpufreq_freq_transition_end(policy, &freqs, ret != 0); + + return ret; +} + +static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy) +{ + u32 cur_perf_domain, perf_domain; + struct device *cpu_dev; + int cpu; + + cur_perf_domain = readl_relaxed(base + policy->cpu * + PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET); + + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + continue; + + perf_domain = readl_relaxed(base + cpu * + PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET); + + if (perf_domain == cur_perf_domain) + cpumask_set_cpu(cpu, policy->cpus); + } + + return 0; +} + +static int virt_cpufreq_get_freq_info(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *table; + u32 num_perftbl_entries, idx; + + num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu); + + if (num_perftbl_entries == 1) { + policy->cpuinfo.min_freq = 1; + policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0); + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + + policy->cur = policy->max; + return 0; + } + + table = kcalloc(num_perftbl_entries + 1, sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (idx = 0; idx < num_perftbl_entries; idx++) + table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx); + + table[idx].frequency = CPUFREQ_TABLE_END; + policy->freq_table = table; + + return 0; +} + +static int virt_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + struct device *cpu_dev; + int ret; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) + return -ENODEV; + + ret = virt_cpufreq_get_freq_info(policy); + if (ret) { + dev_warn(cpu_dev, "failed to get cpufreq info\n"); + return ret; + } + + ret = virt_cpufreq_get_sharing_cpus(policy); + if (ret) { + dev_warn(cpu_dev, "failed to get sharing cpumask\n"); + return ret; + } + + /* + * To simplify and improve latency of handling frequency requests on + * the host side, this ensures that the vCPU thread triggering the MMIO + * abort is the same thread whose performance constraints (Ex. uclamp + * settings) need to be updated. This simplifies the VMM (Virtual + * Machine Manager) having to find the correct vCPU thread and/or + * facing permission issues when configuring other threads. + */ + policy->dvfs_possible_from_any_cpu = false; + policy->fast_switch_possible = true; + + /* + * Using the default SCALE_FREQ_SOURCE_CPUFREQ is insufficient since + * the actual physical CPU frequency may not match requested frequency + * from the vCPU thread due to frequency update latencies or other + * inputs to the physical CPU frequency selection. This additional FIE + * source allows for more accurate freq_scale updates and only takes + * effect if another FIE source such as AMUs have not been registered. + */ + topology_set_scale_freq_source(&virt_sfd, policy->cpus); + + return 0; +} + +static void virt_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_VIRT, policy->related_cpus); + kfree(policy->freq_table); +} + +static int virt_cpufreq_online(struct cpufreq_policy *policy) +{ + /* Nothing to restore. */ + return 0; +} + +static int virt_cpufreq_offline(struct cpufreq_policy *policy) +{ + /* Dummy offline() to avoid exit() being called and freeing resources. */ + return 0; +} + +static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + if (policy->freq_table) + return cpufreq_frequency_table_verify(policy, policy->freq_table); + + cpufreq_verify_within_cpu_limits(policy); + return 0; +} + +static struct cpufreq_driver cpufreq_virt_driver = { + .name = "virt-cpufreq", + .init = virt_cpufreq_cpu_init, + .exit = virt_cpufreq_cpu_exit, + .online = virt_cpufreq_online, + .offline = virt_cpufreq_offline, + .verify = virt_cpufreq_verify_policy, + .target = virt_cpufreq_target, + .fast_switch = virt_cpufreq_fast_switch, + .attr = cpufreq_generic_attr, +}; + +static int virt_cpufreq_driver_probe(struct platform_device *pdev) +{ + u32 num_perftbl_entries; + int ret, cpu; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + for_each_possible_cpu(cpu) { + num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET + + REG_PERFTBL_LEN_OFFSET); + + if (!num_perftbl_entries || num_perftbl_entries > PERFTBL_MAX_ENTRIES) + return -ENODEV; + + per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries; + } + + ret = cpufreq_register_driver(&cpufreq_virt_driver); + if (ret) { + dev_err(&pdev->dev, "Virtual CPUFreq driver failed to register: %d\n", ret); + return ret; + } + + dev_dbg(&pdev->dev, "Virtual CPUFreq driver initialized\n"); + return 0; +} + +static void virt_cpufreq_driver_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&cpufreq_virt_driver); +} + +static const struct of_device_id virt_cpufreq_match[] = { + { .compatible = "qemu,virtual-cpufreq", .data = NULL}, + {} +}; +MODULE_DEVICE_TABLE(of, virt_cpufreq_match); + +static struct platform_driver virt_cpufreq_driver = { + .probe = virt_cpufreq_driver_probe, + .remove = virt_cpufreq_driver_remove, + .driver = { + .name = "virt-cpufreq", + .of_match_table = virt_cpufreq_match, + }, +}; + +static int __init virt_cpufreq_init(void) +{ + return platform_driver_register(&virt_cpufreq_driver); +} +postcore_initcall(virt_cpufreq_init); + +static void __exit virt_cpufreq_exit(void) +{ + platform_driver_unregister(&virt_cpufreq_driver); +} +module_exit(virt_cpufreq_exit); + +MODULE_DESCRIPTION("Virtual cpufreq driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 14db9b7d985d..f68c65f1d023 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -22,6 +22,7 @@ #include <asm/idle.h> #include <asm/plpar_wrappers.h> #include <asm/rtas.h> +#include <asm/time.h> static struct cpuidle_driver pseries_idle_driver = { .name = "pseries_idle", diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index d228b4d18d56..14462c092039 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -26,6 +26,7 @@ #include <asm/smp.h> #include <asm/suspend.h> +#include "cpuidle.h" #include "dt_idle_states.h" #include "dt_idle_genpd.h" @@ -329,6 +330,9 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) return ret; } + if (cpuidle_disabled()) + return 0; + ret = cpuidle_register(drv, NULL); if (ret) goto deinit; @@ -538,7 +542,10 @@ static int sbi_cpuidle_probe(struct platform_device *pdev) /* Setup CPU hotplut notifiers */ sbi_idle_init_cpuhp(); - pr_info("idle driver registered for all CPUs\n"); + if (cpuidle_disabled()) + pr_info("cpuidle is disabled\n"); + else + pr_info("idle driver registered for all CPUs\n"); return 0; @@ -582,4 +589,4 @@ static int __init sbi_cpuidle_init(void) return 0; } -device_initcall(sbi_cpuidle_init); +arch_initcall(sbi_cpuidle_init); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 06ace16f9e71..0835da449db8 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -69,11 +69,15 @@ int cpuidle_play_dead(void) if (!drv) return -ENODEV; - /* Find lowest-power state that supports long-term idle */ - for (i = drv->state_count - 1; i >= 0; i--) + for (i = drv->state_count - 1; i >= 0; i--) { if (drv->states[i].enter_dead) - return drv->states[i].enter_dead(dev, i); + drv->states[i].enter_dead(dev, i); + } + /* + * If :enter_dead() is successful, it will never return, so reaching + * here means that all of them failed above or were not present. + */ return -ENODEV; } diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index f49818a13013..788a11cdb34b 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -129,16 +129,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; - ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + ret = pcim_request_all_regions(pdev, pci_name(pdev)); if (ret) { - dev_err(&pdev->dev, "Failed to map pci regions.\n"); + dev_err(&pdev->dev, "Failed to request PCI regions.\n"); goto out_err; } i = 0; for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { bar = &accel_pci_dev->pci_bars[i++]; - bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + bar->virt_addr = pcim_iomap(pdev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to ioremap PCI region.\n"); + ret = -ENOMEM; + goto out_err; + } } pci_set_master(pdev); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 659905e45950..115eabfd1f6b 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -131,16 +131,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; - ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + ret = pcim_request_all_regions(pdev, pci_name(pdev)); if (ret) { - dev_err(&pdev->dev, "Failed to map pci regions.\n"); + dev_err(&pdev->dev, "Failed to request PCI regions.\n"); goto out_err; } i = 0; for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { bar = &accel_pci_dev->pci_bars[i++]; - bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + bar->virt_addr = pcim_iomap(pdev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to ioremap PCI region.\n"); + ret = -ENOMEM; + goto out_err; + } } pci_set_master(pdev); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 400e36d9908f..94d0e73e42de 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -739,18 +739,22 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, dev_err(dev, "Unable to get usable DMA configuration\n"); goto clear_drvdata; } - /* Map PF's configuration registers */ - err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM, - OTX2_CPT_DRV_NAME); + err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME); if (err) { - dev_err(dev, "Couldn't get PCI resources 0x%x\n", err); + dev_err(dev, "Couldn't request PCI resources 0x%x\n", err); goto clear_drvdata; } pci_set_master(pdev); pci_set_drvdata(pdev, cptpf); cptpf->pdev = pdev; - cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM]; + /* Map PF's configuration registers */ + cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!cptpf->reg_base) { + err = -ENOMEM; + dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err); + goto clear_drvdata; + } /* Check if AF driver is up, otherwise defer probe */ err = cpt_is_pf_usable(cptpf); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index 527d34cc258b..d0b6ee901f62 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -358,9 +358,8 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, dev_err(dev, "Unable to get usable DMA configuration\n"); goto clear_drvdata; } - /* Map VF's configuration registers */ - ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM, - OTX2_CPTVF_DRV_NAME); + + ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME); if (ret) { dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret); goto clear_drvdata; @@ -369,7 +368,13 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, cptvf); cptvf->pdev = pdev; - cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM]; + /* Map VF's configuration registers */ + cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!cptvf->reg_base) { + ret = -ENOMEM; + dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret); + goto clear_drvdata; + } otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag); diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index e9cd7939c407..2a1f164db98e 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -247,8 +247,8 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, dpa_perf->dpa_range = dent->dpa_range; dpa_perf->qos_class = dent->qos_class; dev_dbg(dev, - "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", - dent->dpa_range.start, dpa_perf->qos_class, + "DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", + &dent->dpa_range, dpa_perf->qos_class, dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth, dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth, dent->coord[ACCESS_COORDINATE_CPU].read_latency, @@ -279,8 +279,8 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, range_contains(&pmem_range, &dent->dpa_range)) update_perf_entry(dev, dent, &mds->pmem_perf); else - dev_dbg(dev, "no partition for dsmas dpa: %#llx\n", - dent->dpa_range.start); + dev_dbg(dev, "no partition for dsmas dpa: %pra\n", + &dent->dpa_range); } } diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 0c62b4069ba0..800466f96a68 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -89,6 +89,11 @@ resource_size_t __rcrb_to_component(struct device *dev, enum cxl_rcrb which); u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb); +#define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0) +#define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0) +#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8) +#define PCI_CAP_EXP_SIZEOF 0x3c + extern struct rw_semaphore cxl_dpa_rwsem; extern struct rw_semaphore cxl_region_rwsem; diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 223c273c0cd1..ff0c96ade241 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -424,7 +424,6 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_dev_state *cxlds = cxlmd->cxlds; struct device *dev = &cxled->cxld.dev; - int rc; switch (mode) { case CXL_DECODER_RAM: @@ -435,11 +434,9 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, return -EINVAL; } - down_write(&cxl_dpa_rwsem); - if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { - rc = -EBUSY; - goto out; - } + guard(rwsem_write)(&cxl_dpa_rwsem); + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) + return -EBUSY; /* * Only allow modes that are supported by the current partition @@ -447,21 +444,15 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, */ if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { dev_dbg(dev, "no available pmem capacity\n"); - rc = -ENXIO; - goto out; + return -ENXIO; } if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { dev_dbg(dev, "no available ram capacity\n"); - rc = -ENXIO; - goto out; + return -ENXIO; } cxled->mode = mode; - rc = 0; -out: - up_write(&cxl_dpa_rwsem); - - return rc; + return 0; } int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index dff618c708dc..70d0a017e99c 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2537,9 +2537,8 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); } -static ssize_t create_pmem_region_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t create_region_store(struct device *dev, const char *buf, + size_t len, enum cxl_decoder_mode mode) { struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); struct cxl_region *cxlr; @@ -2549,31 +2548,26 @@ static ssize_t create_pmem_region_store(struct device *dev, if (rc != 1) return -EINVAL; - cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); + cxlr = __create_region(cxlrd, mode, id); if (IS_ERR(cxlr)) return PTR_ERR(cxlr); return len; } + +static ssize_t create_pmem_region_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return create_region_store(dev, buf, len, CXL_DECODER_PMEM); +} DEVICE_ATTR_RW(create_pmem_region); static ssize_t create_ram_region_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); - struct cxl_region *cxlr; - int rc, id; - - rc = sscanf(buf, "region%d\n", &id); - if (rc != 1) - return -EINVAL; - - cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); - if (IS_ERR(cxlr)) - return PTR_ERR(cxlr); - - return len; + return create_region_store(dev, buf, len, CXL_DECODER_RAM); } DEVICE_ATTR_RW(create_ram_region); diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index e1082e749c69..429973a2165b 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -52,7 +52,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET); if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { - dev_err(dev, + dev_dbg(dev, "Couldn't locate the CXL.cache and CXL.mem capability array header.\n"); return; } @@ -506,6 +506,62 @@ out: return offset; } +static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport) +{ + resource_size_t rcrb = dport->rcrb.base; + void __iomem *addr; + u32 cap_hdr; + u16 offset; + + if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB")) + return CXL_RESOURCE_NONE; + + addr = ioremap(rcrb, SZ_4K); + if (!addr) { + dev_err(dev, "Failed to map region %pr\n", addr); + release_mem_region(rcrb, SZ_4K); + return CXL_RESOURCE_NONE; + } + + offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST)); + cap_hdr = readl(addr + offset); + while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) { + offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr); + if (offset == 0 || offset > SZ_4K) { + offset = 0; + break; + } + cap_hdr = readl(addr + offset); + } + + iounmap(addr); + release_mem_region(rcrb, SZ_4K); + if (!offset) + return CXL_RESOURCE_NONE; + + return offset; +} + +int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport) +{ + void __iomem *dport_pcie_cap = NULL; + resource_size_t pos; + struct cxl_rcrb_info *ri; + + ri = &dport->rcrb; + pos = cxl_rcrb_to_linkcap(&pdev->dev, dport); + if (pos == CXL_RESOURCE_NONE) + return -ENXIO; + + dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev, + ri->base + pos, + PCI_CAP_EXP_SIZEOF); + dport->regs.rcd_pcie_cap = dport_pcie_cap; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, CXL); + resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, enum cxl_rcrb which) { diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 5406e3ab3d4a..f6015f24ad38 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -235,6 +235,14 @@ struct cxl_regs { struct_group_tagged(cxl_rch_regs, rch_regs, void __iomem *dport_aer; ); + + /* + * RCD upstream port specific PCIe cap register + * @pcie_cap: CXL 3.0 8.2.1.2 RCD Upstream Port RCRB + */ + struct_group_tagged(cxl_rcd_regs, rcd_regs, + void __iomem *rcd_pcie_cap; + ); }; struct cxl_reg_map { @@ -304,6 +312,7 @@ int cxl_setup_regs(struct cxl_register_map *map); struct cxl_dport; resource_size_t cxl_rcd_component_reg_phys(struct device *dev, struct cxl_dport *dport); +int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport); #define CXL_RESOURCE_NONE ((resource_size_t) -1) #define CXL_TARGET_STRLEN 20 diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 188412d45e0d..b2cb81f6d9e7 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -475,9 +475,9 @@ static bool is_cxl_restricted(struct pci_dev *pdev) } static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, - struct cxl_register_map *map) + struct cxl_register_map *map, + struct cxl_dport *dport) { - struct cxl_dport *dport; resource_size_t component_reg_phys; *map = (struct cxl_register_map) { @@ -513,11 +513,24 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, * is an RCH and try to extract the Component Registers from * an RCRB. */ - if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) - rc = cxl_rcrb_get_comp_regs(pdev, map); - - if (rc) + if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) { + struct cxl_dport *dport; + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); + if (!port) + return -EPROBE_DEFER; + + rc = cxl_rcrb_get_comp_regs(pdev, map, dport); + if (rc) + return rc; + + rc = cxl_dport_map_rcd_linkcap(pdev, dport); + if (rc) + return rc; + + } else if (rc) { return rc; + } return cxl_setup_regs(map); } @@ -764,10 +777,6 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge, return 0; } - rc = cxl_mem_alloc_event_buf(mds); - if (rc) - return rc; - rc = cxl_event_get_int_policy(mds, &policy); if (rc) return rc; @@ -781,6 +790,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge, return -EBUSY; } + rc = cxl_mem_alloc_event_buf(mds); + if (rc) + return rc; + rc = cxl_event_irqsetup(mds); if (rc) return rc; @@ -807,6 +820,83 @@ static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds) return 0; } +static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size_t width) +{ + struct cxl_dev_state *cxlds = dev_get_drvdata(dev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *root_dev; + struct cxl_dport *dport; + struct cxl_port *root __free(put_cxl_port) = + cxl_mem_find_port(cxlmd, &dport); + + if (!root) + return -ENXIO; + + root_dev = root->uport_dev; + if (!root_dev) + return -ENXIO; + + guard(device)(root_dev); + if (!root_dev->driver) + return -ENXIO; + + switch (width) { + case 2: + return sysfs_emit(buf, "%#x\n", + readw(dport->regs.rcd_pcie_cap + offset)); + case 4: + return sysfs_emit(buf, "%#x\n", + readl(dport->regs.rcd_pcie_cap + offset)); + default: + return -EINVAL; + } +} + +static ssize_t rcd_link_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCAP, buf, sizeof(u32)); +} +static DEVICE_ATTR_RO(rcd_link_cap); + +static ssize_t rcd_link_ctrl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCTL, buf, sizeof(u16)); +} +static DEVICE_ATTR_RO(rcd_link_ctrl); + +static ssize_t rcd_link_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKSTA, buf, sizeof(u16)); +} +static DEVICE_ATTR_RO(rcd_link_status); + +static struct attribute *cxl_rcd_attrs[] = { + &dev_attr_rcd_link_cap.attr, + &dev_attr_rcd_link_ctrl.attr, + &dev_attr_rcd_link_status.attr, + NULL +}; + +static umode_t cxl_rcd_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (is_cxl_restricted(pdev)) + return a->mode; + + return 0; +} + +static struct attribute_group cxl_rcd_group = { + .attrs = cxl_rcd_attrs, + .is_visible = cxl_rcd_visible, +}; +__ATTRIBUTE_GROUPS(cxl_rcd); + static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); @@ -1016,6 +1106,7 @@ static struct pci_driver cxl_pci_driver = { .id_table = cxl_mem_pci_tbl, .probe = cxl_pci_probe, .err_handler = &cxl_error_handlers, + .dev_groups = cxl_rcd_groups, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index 446617b73aea..0867115aeef2 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -40,6 +40,12 @@ struct dax_region { struct device *youngest; }; +/** + * struct dax_mapping - device to display mapping range attributes + * @dev: device representing this range + * @range_id: index within dev_dax ranges array + * @id: ida of this mapping + */ struct dax_mapping { struct device dev; int range_id; @@ -47,6 +53,18 @@ struct dax_mapping { }; /** + * struct dev_dax_range - tuple represenging a range of memory used by dev_dax + * @pgoff: page offset + * @range: resource-span + * @mapping: reference to the dax_mapping for this range + */ +struct dev_dax_range { + unsigned long pgoff; + struct range range; + struct dax_mapping *mapping; +}; + +/** * struct dev_dax - instance data for a subdivision of a dax region, and * data while the device is activated in the driver. * @region - parent region @@ -58,7 +76,7 @@ struct dax_mapping { * @dev - device core * @pgmap - pgmap for memmap setup / lifetime (driver owned) * @nr_range: size of @ranges - * @ranges: resource-span + pgoff tuples for the instance + * @ranges: range tuples of memory used */ struct dev_dax { struct dax_region *region; @@ -72,11 +90,7 @@ struct dev_dax { struct dev_pagemap *pgmap; bool memmap_on_memory; int nr_range; - struct dev_dax_range { - unsigned long pgoff; - struct range range; - struct dax_mapping *mapping; - } *ranges; + struct dev_dax_range *ranges; }; /* diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile deleted file mode 100644 index 191c31f0d4f0..000000000000 --- a/drivers/dax/pmem/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o -obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o - -dax_pmem-y := pmem.o -dax_pmem_core-y := core.o -dax_pmem_compat-y := compat.o diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c deleted file mode 100644 index dfe91a2990fe..000000000000 --- a/drivers/dax/pmem/pmem.c +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ -#include <linux/percpu-refcount.h> -#include <linux/memremap.h> -#include <linux/module.h> -#include <linux/pfn_t.h> -#include <linux/nd.h> -#include "../bus.h" - - diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index b46eb8a552d7..fee04fdb0822 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -36,6 +36,7 @@ config UDMABUF depends on DMA_SHARED_BUFFER depends on MEMFD_CREATE || COMPILE_TEST depends on MMU + select VMAP_PFN help A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 8892bc701a66..5ad0e9e2e1b9 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -176,8 +176,9 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) dmabuf = file->private_data; /* only support discovering the end of the buffer, - but also allow SEEK_SET to maintain the idiomatic - SEEK_END(0), SEEK_CUR(0) pattern */ + * but also allow SEEK_SET to maintain the idiomatic + * SEEK_END(0), SEEK_CUR(0) pattern. + */ if (whence == SEEK_END) base = dmabuf->size; else if (whence == SEEK_SET) @@ -558,7 +559,7 @@ static struct file *dma_buf_getfile(size_t size, int flags) * Override ->i_ino with the unique and dmabuffs specific * value. */ - inode->i_ino = atomic64_add_return(1, &dmabuf_inode); + inode->i_ino = atomic64_inc_return(&dmabuf_inode); flags &= O_ACCMODE | O_NONBLOCK; file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops); @@ -782,13 +783,14 @@ static void mangle_sg_table(struct sg_table *sg_table) /* To catch abuse of the underlying struct page by importers mix * up the bits, but take care to preserve the low SG_ bits to * not corrupt the sgt. The mixing is undone in __unmap_dma_buf - * before passing the sgt back to the exporter. */ + * before passing the sgt back to the exporter. + */ for_each_sgtable_sg(sg_table, sg, i) sg->page_link ^= ~0xffUL; #endif } -static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, +static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; @@ -1296,10 +1298,12 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); * vmap interface is introduced. Note that on very old 32-bit architectures * vmalloc space might be limited and result in vmap calls failing. * - * Interfaces:: + * Interfaces: * - * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) - * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) + * .. code-block:: c + * + * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) + * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) * * The vmap call can fail if there is no vmap support in the exporter, or if * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference @@ -1356,10 +1360,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); * enough, since adding interfaces to intercept pagefaults and allow pte * shootdowns would increase the complexity quite a bit. * - * Interface:: + * Interface: + * + * .. code-block:: c * - * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, - * unsigned long); + * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); * * If the importing subsystem simply provides a special-purpose mmap call to * set up a mapping in userspace, calling do_mmap with &dma_buf.file will @@ -1694,7 +1699,7 @@ static int dma_buf_init_debugfs(void) dma_buf_debugfs_dir = d; - d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, + d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir, NULL, &dma_buf_debug_fops); if (IS_ERR(d)) { pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 0393a9bba3a8..f0cdd3e99d36 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -309,8 +309,8 @@ bool dma_fence_begin_signalling(void) if (in_atomic()) return true; - /* ... and non-recursive readlock */ - lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_); + /* ... and non-recursive successful read_trylock */ + lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_); return false; } @@ -341,7 +341,7 @@ void __dma_fence_might_wait(void) lock_map_acquire(&dma_fence_lockdep_map); lock_map_release(&dma_fence_lockdep_map); if (tmp) - lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_); + lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_); } #endif @@ -412,7 +412,7 @@ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) unsigned long flags; int ret; - if (!fence) + if (WARN_ON(!fence)) return -EINVAL; spin_lock_irqsave(fence->lock, flags); @@ -464,7 +464,7 @@ int dma_fence_signal(struct dma_fence *fence) int ret; bool tmp; - if (!fence) + if (WARN_ON(!fence)) return -EINVAL; tmp = dma_fence_begin_signalling(); diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 93be88b805fe..9512d050563a 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -309,13 +309,13 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, struct page *page = cma_pages; while (nr_clear_pages > 0) { - void *vaddr = kmap_atomic(page); + void *vaddr = kmap_local_page(page); memset(vaddr, 0, PAGE_SIZE); - kunmap_atomic(vaddr); + kunmap_local(vaddr); /* * Avoid wasting time zeroing memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL. */ if (fatal_signal_pending(current)) goto free_cma; @@ -366,7 +366,7 @@ static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; -static int __add_cma_heap(struct cma *cma, void *data) +static int __init __add_cma_heap(struct cma *cma, void *data) { struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; @@ -391,7 +391,7 @@ static int __add_cma_heap(struct cma *cma, void *data) return 0; } -static int add_default_cma_heap(void) +static int __init add_default_cma_heap(void) { struct cma *default_cma = dev_get_cma_area(NULL); int ret = 0; diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index d78cdb9d01e5..26d5dc89ea16 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -421,7 +421,7 @@ static const struct dma_heap_ops system_heap_ops = { .allocate = system_heap_allocate, }; -static int system_heap_create(void) +static int __init system_heap_create(void) { struct dma_heap_export_info exp_info; diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index c353029789cf..f5905d67dedb 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -173,11 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence) return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); } -static bool timeline_fence_enable_signaling(struct dma_fence *fence) -{ - return true; -} - static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) { @@ -211,7 +206,6 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin static const struct dma_fence_ops timeline_fence_ops = { .get_driver_name = timeline_fence_get_driver_name, .get_timeline_name = timeline_fence_get_timeline_name, - .enable_signaling = timeline_fence_enable_signaling, .signaled = timeline_fence_signaled, .release = timeline_fence_release, .fence_value_str = timeline_fence_value_str, diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 047c3cd2ceff..8ce1f074c2d3 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -27,15 +27,21 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is struct udmabuf { pgoff_t pagecount; struct folio **folios; + + /** + * Unlike folios, pinned_folios is only used for unpin. + * So, nr_pinned is not the same to pagecount, the pinned_folios + * only set each folio which already pinned when udmabuf_create. + * Note that, since a folio may be pinned multiple times, each folio + * can be added to pinned_folios multiple times, depending on how many + * times the folio has been pinned when create. + */ + pgoff_t nr_pinned; + struct folio **pinned_folios; + struct sg_table *sg; struct miscdevice *device; pgoff_t *offsets; - struct list_head unpin_list; -}; - -struct udmabuf_folio { - struct folio *folio; - struct list_head list; }; static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) @@ -43,7 +49,8 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; pgoff_t pgoff = vmf->pgoff; - unsigned long pfn; + unsigned long addr, pfn; + vm_fault_t ret; if (pgoff >= ubuf->pagecount) return VM_FAULT_SIGBUS; @@ -51,7 +58,35 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) pfn = folio_pfn(ubuf->folios[pgoff]); pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT; - return vmf_insert_pfn(vma, vmf->address, pfn); + ret = vmf_insert_pfn(vma, vmf->address, pfn); + if (ret & VM_FAULT_ERROR) + return ret; + + /* pre fault */ + pgoff = vma->vm_pgoff; + addr = vma->vm_start; + + for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) { + if (addr == vmf->address) + continue; + + if (WARN_ON(pgoff >= ubuf->pagecount)) + break; + + pfn = folio_pfn(ubuf->folios[pgoff]); + pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT; + + /** + * If the below vmf_insert_pfn() fails, we do not return an + * error here during this pre-fault step. However, an error + * will be returned if the failure occurs when the addr is + * truly accessed. + */ + if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) + break; + } + + return ret; } static const struct vm_operations_struct udmabuf_vm_ops = { @@ -74,21 +109,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) { struct udmabuf *ubuf = buf->priv; - struct page **pages; + unsigned long *pfns; void *vaddr; pgoff_t pg; dma_resv_assert_held(buf->resv); - pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); - if (!pages) + /** + * HVO may free tail pages, so just use pfn to map each folio + * into vmalloc area. + */ + pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); + if (!pfns) return -ENOMEM; - for (pg = 0; pg < ubuf->pagecount; pg++) - pages[pg] = &ubuf->folios[pg]->page; + for (pg = 0; pg < ubuf->pagecount; pg++) { + unsigned long pfn = folio_pfn(ubuf->folios[pg]); + + pfn += ubuf->offsets[pg] >> PAGE_SHIFT; + pfns[pg] = pfn; + } - vaddr = vm_map_ram(pages, ubuf->pagecount, -1); - kfree(pages); + vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); + kvfree(pfns); if (!vaddr) return -EINVAL; @@ -159,34 +202,42 @@ static void unmap_udmabuf(struct dma_buf_attachment *at, return put_sg_table(at->dev, sg, direction); } -static void unpin_all_folios(struct list_head *unpin_list) +static void unpin_all_folios(struct udmabuf *ubuf) { - struct udmabuf_folio *ubuf_folio; + pgoff_t i; - while (!list_empty(unpin_list)) { - ubuf_folio = list_first_entry(unpin_list, - struct udmabuf_folio, list); - unpin_folio(ubuf_folio->folio); + for (i = 0; i < ubuf->nr_pinned; ++i) + unpin_folio(ubuf->pinned_folios[i]); - list_del(&ubuf_folio->list); - kfree(ubuf_folio); - } + kvfree(ubuf->pinned_folios); } -static int add_to_unpin_list(struct list_head *unpin_list, - struct folio *folio) +static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt) { - struct udmabuf_folio *ubuf_folio; + ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); + if (!ubuf->folios) + return -ENOMEM; - ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL); - if (!ubuf_folio) + ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL); + if (!ubuf->offsets) + return -ENOMEM; + + ubuf->pinned_folios = kvmalloc_array(pgcnt, + sizeof(*ubuf->pinned_folios), + GFP_KERNEL); + if (!ubuf->pinned_folios) return -ENOMEM; - ubuf_folio->folio = folio; - list_add_tail(&ubuf_folio->list, unpin_list); return 0; } +static __always_inline void deinit_udmabuf(struct udmabuf *ubuf) +{ + unpin_all_folios(ubuf); + kvfree(ubuf->offsets); + kvfree(ubuf->folios); +} + static void release_udmabuf(struct dma_buf *buf) { struct udmabuf *ubuf = buf->priv; @@ -195,9 +246,7 @@ static void release_udmabuf(struct dma_buf *buf) if (ubuf->sg) put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); - unpin_all_folios(&ubuf->unpin_list); - kfree(ubuf->offsets); - kfree(ubuf->folios); + deinit_udmabuf(ubuf); kfree(ubuf); } @@ -254,9 +303,6 @@ static int check_memfd_seals(struct file *memfd) { int seals; - if (!memfd) - return -EBADFD; - if (!shmem_file(memfd) && !is_file_hugepages(memfd)) return -EBADFD; @@ -291,100 +337,116 @@ static int export_udmabuf(struct udmabuf *ubuf, return dma_buf_fd(buf, flags); } +static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd, + loff_t start, loff_t size, struct folio **folios) +{ + pgoff_t nr_pinned = ubuf->nr_pinned; + pgoff_t upgcnt = ubuf->pagecount; + u32 cur_folio, cur_pgcnt; + pgoff_t pgoff, pgcnt; + long nr_folios; + loff_t end; + + pgcnt = size >> PAGE_SHIFT; + end = start + (pgcnt << PAGE_SHIFT) - 1; + nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff); + if (nr_folios <= 0) + return nr_folios ? nr_folios : -EINVAL; + + cur_pgcnt = 0; + for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) { + pgoff_t subpgoff = pgoff; + size_t fsize = folio_size(folios[cur_folio]); + + ubuf->pinned_folios[nr_pinned++] = folios[cur_folio]; + + for (; subpgoff < fsize; subpgoff += PAGE_SIZE) { + ubuf->folios[upgcnt] = folios[cur_folio]; + ubuf->offsets[upgcnt] = subpgoff; + ++upgcnt; + + if (++cur_pgcnt >= pgcnt) + goto end; + } + + /** + * In a given range, only the first subpage of the first folio + * has an offset, that is returned by memfd_pin_folios(). + * The first subpages of other folios (in the range) have an + * offset of 0. + */ + pgoff = 0; + } +end: + ubuf->pagecount = upgcnt; + ubuf->nr_pinned = nr_pinned; + return 0; +} + static long udmabuf_create(struct miscdevice *device, struct udmabuf_create_list *head, struct udmabuf_create_item *list) { - pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0; - long nr_folios, ret = -EINVAL; - struct file *memfd = NULL; - struct folio **folios; + unsigned long max_nr_folios = 0; + struct folio **folios = NULL; + pgoff_t pgcnt = 0, pglimit; struct udmabuf *ubuf; - u32 i, j, k, flags; - loff_t end; + long ret = -EINVAL; + u32 i, flags; ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL); if (!ubuf) return -ENOMEM; - INIT_LIST_HEAD(&ubuf->unpin_list); pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { - if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) - goto err; - if (!IS_ALIGNED(list[i].size, PAGE_SIZE)) - goto err; - ubuf->pagecount += list[i].size >> PAGE_SHIFT; - if (ubuf->pagecount > pglimit) - goto err; + pgoff_t subpgcnt; + + if (!PAGE_ALIGNED(list[i].offset)) + goto err_noinit; + if (!PAGE_ALIGNED(list[i].size)) + goto err_noinit; + + subpgcnt = list[i].size >> PAGE_SHIFT; + pgcnt += subpgcnt; + if (pgcnt > pglimit) + goto err_noinit; + + max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios); } - if (!ubuf->pagecount) - goto err; + if (!pgcnt) + goto err_noinit; - ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios), - GFP_KERNEL); - if (!ubuf->folios) { - ret = -ENOMEM; + ret = init_udmabuf(ubuf, pgcnt); + if (ret) goto err; - } - ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets), - GFP_KERNEL); - if (!ubuf->offsets) { + + folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL); + if (!folios) { ret = -ENOMEM; goto err; } - pgbuf = 0; for (i = 0; i < head->count; i++) { - memfd = fget(list[i].memfd); - ret = check_memfd_seals(memfd); - if (ret < 0) - goto err; + struct file *memfd = fget(list[i].memfd); - pgcnt = list[i].size >> PAGE_SHIFT; - folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL); - if (!folios) { - ret = -ENOMEM; + if (!memfd) { + ret = -EBADFD; goto err; } - end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1; - ret = memfd_pin_folios(memfd, list[i].offset, end, - folios, pgcnt, &pgoff); - if (ret <= 0) { - kfree(folios); - if (!ret) - ret = -EINVAL; + ret = check_memfd_seals(memfd); + if (ret < 0) { + fput(memfd); goto err; } - nr_folios = ret; - pgoff >>= PAGE_SHIFT; - for (j = 0, k = 0; j < pgcnt; j++) { - ubuf->folios[pgbuf] = folios[k]; - ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT; - - if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) { - ret = add_to_unpin_list(&ubuf->unpin_list, - folios[k]); - if (ret < 0) { - kfree(folios); - goto err; - } - } - - pgbuf++; - if (++pgoff == folio_nr_pages(folios[k])) { - pgoff = 0; - if (++k == nr_folios) - break; - } - } - - kfree(folios); + ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset, + list[i].size, folios); fput(memfd); - memfd = NULL; + if (ret) + goto err; } flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0; @@ -392,15 +454,14 @@ static long udmabuf_create(struct miscdevice *device, if (ret < 0) goto err; + kvfree(folios); return ret; err: - if (memfd) - fput(memfd); - unpin_all_folios(&ubuf->unpin_list); - kfree(ubuf->offsets); - kfree(ubuf->folios); + deinit_udmabuf(ubuf); +err_noinit: kfree(ubuf); + kvfree(folios); return ret; } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d9ec1e69e428..e994d6e0779e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -378,6 +378,20 @@ config LOONGSON1_APB_DMA This selects support for the APB DMA controller in Loongson1 SoCs, which is required by Loongson1 NAND and audio support. +config LOONGSON2_APB_DMA + tristate "Loongson2 APB DMA support" + depends on LOONGARCH || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the Loongson2 APB DMA controller driver. The + DMA controller is having single DMA channel which can be + configured for different peripherals like audio, nand, sdio + etc which is in APB bus. + + This DMA controller transfers data from memory to peripheral fifo. + It does not support memory to memory data transfer. + config LPC18XX_DMAMUX bool "NXP LPC18xx/43xx DMA MUX for PL080" depends on ARCH_LPC18XX || COMPILE_TEST @@ -396,20 +410,6 @@ config LPC32XX_DMAMUX Support for PL080 multiplexed DMA request lines on LPC32XX platrofm. -config LS2X_APB_DMA - tristate "Loongson LS2X APB DMA support" - depends on LOONGARCH || COMPILE_TEST - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help - Support for the Loongson LS2X APB DMA controller driver. The - DMA controller is having single DMA channel which can be - configured for different peripherals like audio, nand, sdio - etc which is in APB bus. - - This DMA controller transfers data from memory to peripheral fifo. - It does not support memory to memory data transfer. - config MCF_EDMA tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs" depends on M5441x || (COMPILE_TEST && FSL_EDMA=n) diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index ad6a03c052ec..5b2a52f4f2ee 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -50,9 +50,9 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-y += idxd/ obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o +obj-$(CONFIG_LOONGSON2_APB_DMA) += loongson2-apb-dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o -obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index a58a1600dd65..2abbe11e797e 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -9,18 +9,21 @@ * Mika Westerberg <mika.westerberg@linux.intel.com> */ +#include <linux/acpi.h> +#include <linux/acpi_dma.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/module.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/ioport.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> -#include <linux/slab.h> -#include <linux/ioport.h> -#include <linux/acpi.h> -#include <linux/acpi_dma.h> #include <linux/property.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); @@ -236,7 +239,7 @@ int acpi_dma_controller_free(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_dma_controller_free); -static void devm_acpi_dma_release(struct device *dev, void *res) +static void devm_acpi_dma_free(void *dev) { acpi_dma_controller_free(dev); } @@ -259,37 +262,15 @@ int devm_acpi_dma_controller_register(struct device *dev, (struct acpi_dma_spec *, struct acpi_dma *), void *data) { - void *res; int ret; - res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); - if (!res) - return -ENOMEM; - ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); - if (ret) { - devres_free(res); + if (ret) return ret; - } - devres_add(dev, res); - return 0; -} -EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); -/** - * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() - * @dev: device that is unregistering as DMA controller - * - * Unregister a DMA controller registered with - * devm_acpi_dma_controller_register(). Normally this function will not need to - * be called and the resource management code will ensure that the resource is - * freed. - */ -void devm_acpi_dma_controller_free(struct device *dev) -{ - WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL)); + return devm_add_action_or_reset(dev, devm_acpi_dma_free, dev); } -EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); +EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); /** * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index e6a6566b309e..a203fdd84950 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -954,7 +954,7 @@ static struct platform_driver msgdma_driver = { .of_match_table = of_match_ptr(msgdma_match), }, .probe = msgdma_probe, - .remove_new = msgdma_remove, + .remove = msgdma_remove, }; module_platform_driver(msgdma_driver); diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c index b0a1f3ad851b..6d9079458fe9 100644 --- a/drivers/dma/amd/qdma/qdma.c +++ b/drivers/dma/amd/qdma/qdma.c @@ -1133,7 +1133,7 @@ static struct platform_driver amd_qdma_driver = { .name = "amd-qdma", }, .probe = amd_qdma_probe, - .remove_new = amd_qdma_remove, + .remove = amd_qdma_remove, }; module_platform_driver(amd_qdma_driver); diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index 9588773dd2eb..c499173d80b2 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -950,7 +950,7 @@ static struct platform_driver apple_admac_driver = { .of_match_table = admac_of_match, }, .probe = admac_probe, - .remove_new = admac_remove, + .remove = admac_remove, }; module_platform_driver(apple_admac_driver); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index baebddc740b0..2d147712cbc6 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -2250,7 +2250,7 @@ static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = { }; static struct platform_driver at_dma_driver = { - .remove_new = at_dma_remove, + .remove = at_dma_remove, .shutdown = at_dma_shutdown, .id_table = atdma_devtypes, .driver = { diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 299396121e6d..9c7b40220004 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2476,7 +2476,7 @@ MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids); static struct platform_driver at_xdmac_driver = { .probe = at_xdmac_probe, - .remove_new = at_xdmac_remove, + .remove = at_xdmac_remove, .driver = { .name = "at_xdmac", .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index cfa6e1167a1f..7f0e76439ce5 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(of, sba_of_match); static struct platform_driver sba_driver = { .probe = sba_probe, - .remove_new = sba_remove, + .remove = sba_remove, .driver = { .name = "bcm-sba-raid", .of_match_table = sba_of_match, diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e1b92b4d7b05..7ba52dee40a9 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -1029,7 +1029,7 @@ static void bcm2835_dma_remove(struct platform_device *pdev) static struct platform_driver bcm2835_dma_driver = { .probe = bcm2835_dma_probe, - .remove_new = bcm2835_dma_remove, + .remove = bcm2835_dma_remove, .driver = { .name = "bcm2835-dma", .of_match_table = of_match_ptr(bcm2835_dma_of_match), diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 0bbaa7620bdd..6c4d655ffe77 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -486,7 +486,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); static struct platform_driver mpc52xx_bcom_of_platform_driver = { .probe = mpc52xx_bcom_probe, - .remove_new = mpc52xx_bcom_remove, + .remove = mpc52xx_bcom_remove, .driver = { .name = DRIVER_NAME, .of_match_table = mpc52xx_bcom_of_match, diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index c9cfa341db51..100057603fd4 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1122,7 +1122,7 @@ MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); static struct platform_driver jz4780_dma_driver = { .probe = jz4780_dma_probe, - .remove_new = jz4780_dma_remove, + .remove = jz4780_dma_remove, .driver = { .name = "jz4780-dma", .of_match_table = jz4780_dma_dt_match, diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index fffafa86d964..b23536645ff7 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1676,7 +1676,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); static struct platform_driver dw_driver = { .probe = dw_probe, - .remove_new = dw_remove, + .remove = dw_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = dw_dma_of_id_table, diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 47c58ad468cb..2606cf9cd429 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -191,7 +191,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = { static struct platform_driver dw_driver = { .probe = dw_probe, - .remove_new = dw_remove, + .remove = dw_remove, .shutdown = dw_shutdown, .driver = { .name = DRV_NAME, diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 6b98a23e3332..e424bb5c40e7 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -929,8 +929,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) /* Sanity check the channel parameters */ if (!edmac->edma->m2m) { - if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 || - edmac->dma_cfg.port > EP93XX_DMA_IRDA) + if (edmac->dma_cfg.port > EP93XX_DMA_IRDA) return -EINVAL; if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan)) return -EINVAL; diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index f9f1eda79254..60de1003193a 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -740,7 +740,7 @@ static struct platform_driver fsl_edma_driver = { .pm = &fsl_edma_pm_ops, }, .probe = fsl_edma_probe, - .remove_new = fsl_edma_remove, + .remove = fsl_edma_remove, }; static int __init fsl_edma_init(void) diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 5005e138fc23..823f5c6bc2e1 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -1288,7 +1288,7 @@ static struct platform_driver fsl_qdma_driver = { .of_match_table = fsl_qdma_dt_ids, }, .probe = fsl_qdma_probe, - .remove_new = fsl_qdma_remove, + .remove = fsl_qdma_remove, }; module_platform_driver(fsl_qdma_driver); diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 014ff523d5ec..6aa97e258a55 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -886,7 +886,7 @@ static struct platform_driver fsl_re_driver = { .of_match_table = fsl_re_ids, }, .probe = fsl_re_probe, - .remove_new = fsl_re_remove, + .remove = fsl_re_remove, }; module_platform_driver(fsl_re_driver); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 18a6c4bf6275..b5e7d18b9766 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1404,7 +1404,7 @@ static struct platform_driver fsldma_of_driver = { #endif }, .probe = fsldma_of_probe, - .remove_new = fsldma_of_remove, + .remove = fsldma_of_remove, }; /*----------------------------------------------------------------------------*/ diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 3c648308a54a..d147353d47ab 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -693,7 +693,7 @@ static const struct dev_pm_ops idma64_dev_pm_ops = { static struct platform_driver idma64_platform_driver = { .probe = idma64_platform_probe, - .remove_new = idma64_platform_remove, + .remove = idma64_platform_remove, .driver = { .name = LPSS_IDMA64_DRIVER_NAME, .pm = &idma64_dev_pm_ops, diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index e16dbf9ab324..c426511f2104 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -6,6 +6,10 @@ #include <uapi/linux/idxd.h> /* PCI Config */ +#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb +#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 +#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 + #define DEVICE_VERSION_1 0x100 #define DEVICE_VERSION_2 0x200 diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 0532dd2640dc..4127c1bdcca7 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -1076,7 +1076,7 @@ static struct platform_driver mdc_dma_driver = { .of_match_table = of_match_ptr(mdc_dma_of_match), }, .probe = mdc_dma_probe, - .remove_new = mdc_dma_remove, + .remove = mdc_dma_remove, }; module_platform_driver(mdc_dma_driver); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index e913f0db99da..a651e0995ce8 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1233,7 +1233,7 @@ static struct platform_driver imxdma_driver = { .name = "imx-dma", .of_match_table = imx_dma_of_dev_id, }, - .remove_new = imxdma_remove, + .remove = imxdma_remove, }; static int __init imxdma_module_init(void) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 72299a08af44..3449006cd14b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -2440,7 +2440,7 @@ static struct platform_driver sdma_driver = { .name = "imx-sdma", .of_match_table = sdma_dt_ids, }, - .remove_new = sdma_remove, + .remove = sdma_remove, .probe = sdma_probe, }; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 5de8c21d41e7..acc2983e28e0 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -1028,7 +1028,7 @@ static struct platform_driver k3_pdma_driver = { .of_match_table = k3_pdma_dt_ids, }, .probe = k3_dma_probe, - .remove_new = k3_dma_remove, + .remove = k3_dma_remove, }; module_platform_driver(k3_pdma_driver); diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/loongson2-apb-dma.c index 9652e8666722..367ed34ce4da 100644 --- a/drivers/dma/ls2x-apb-dma.c +++ b/drivers/dma/loongson2-apb-dma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Driver for the Loongson LS2X APB DMA Controller + * Driver for the Loongson-2 APB DMA Controller * * Copyright (C) 2017-2023 Loongson Corporation */ @@ -692,7 +692,7 @@ MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table); static struct platform_driver ls2x_dmac_driver = { .probe = ls2x_dma_probe, - .remove_new = ls2x_dma_remove, + .remove = ls2x_dma_remove, .driver = { .name = "ls2x-apbdma", .of_match_table = ls2x_dma_of_match_table, @@ -700,6 +700,6 @@ static struct platform_driver ls2x_dmac_driver = { }; module_platform_driver(ls2x_dmac_driver); -MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver"); +MODULE_DESCRIPTION("Loongson-2 APB DMA Controller driver"); MODULE_AUTHOR("Loongson Technology Corporation Limited"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c index 0c5862bf26f8..9e1c6400c77b 100644 --- a/drivers/dma/mcf-edma-main.c +++ b/drivers/dma/mcf-edma-main.c @@ -267,7 +267,7 @@ static struct platform_driver mcf_edma_driver = { .name = "mcf-edma", }, .probe = mcf_edma_probe, - .remove_new = mcf_edma_remove, + .remove = mcf_edma_remove, }; bool mcf_edma_filter_fn(struct dma_chan *chan, void *param) diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index b69eabf12a24..d5ddb4e30e71 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -922,7 +922,7 @@ static void mtk_cqdma_remove(struct platform_device *pdev) static struct platform_driver mtk_cqdma_driver = { .probe = mtk_cqdma_probe, - .remove_new = mtk_cqdma_remove, + .remove = mtk_cqdma_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = mtk_cqdma_match, diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 58c7961ab9ad..fa77bb24a430 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -1038,7 +1038,7 @@ static void mtk_hsdma_remove(struct platform_device *pdev) static struct platform_driver mtk_hsdma_driver = { .probe = mtk_hsdma_probe, - .remove_new = mtk_hsdma_remove, + .remove = mtk_hsdma_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = mtk_hsdma_match, diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 1bdc1500be40..08e15177427b 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -637,7 +637,7 @@ static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { static struct platform_driver mtk_uart_apdma_driver = { .probe = mtk_uart_apdma_probe, - .remove_new = mtk_uart_apdma_remove, + .remove = mtk_uart_apdma_remove, .driver = { .name = KBUILD_MODNAME, .pm = &mtk_uart_apdma_pm_ops, diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c index 7b41c670970a..9a5ec247ed6d 100644 --- a/drivers/dma/milbeaut-hdmac.c +++ b/drivers/dma/milbeaut-hdmac.c @@ -571,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match); static struct platform_driver milbeaut_hdmac_driver = { .probe = milbeaut_hdmac_probe, - .remove_new = milbeaut_hdmac_remove, + .remove = milbeaut_hdmac_remove, .driver = { .name = "milbeaut-m10v-hdmac", .of_match_table = milbeaut_hdmac_match, diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c index 2cce529b448e..58d4fd6df0bf 100644 --- a/drivers/dma/milbeaut-xdmac.c +++ b/drivers/dma/milbeaut-xdmac.c @@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match); static struct platform_driver milbeaut_xdmac_driver = { .probe = milbeaut_xdmac_probe, - .remove_new = milbeaut_xdmac_remove, + .remove = milbeaut_xdmac_remove, .driver = { .name = "milbeaut-m10v-xdmac", .of_match_table = milbeaut_xdmac_match, diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 136fcaeff8dd..a95d31103d30 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1137,7 +1137,7 @@ static struct platform_driver mmp_pdma_driver = { }, .id_table = mmp_pdma_id_table, .probe = mmp_pdma_probe, - .remove_new = mmp_pdma_remove, + .remove = mmp_pdma_remove, }; module_platform_driver(mmp_pdma_driver); diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index b76fe99e1151..c8dc504510f1 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -736,7 +736,7 @@ static struct platform_driver mmp_tdma_driver = { .of_match_table = mmp_tdma_dt_ids, }, .probe = mmp_tdma_probe, - .remove_new = mmp_tdma_remove, + .remove = mmp_tdma_remove, }; module_platform_driver(mmp_tdma_driver); diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 66dc6d31b603..de09e1ab7767 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -644,7 +644,7 @@ MODULE_DEVICE_TABLE(of, moxart_dma_match); static struct platform_driver moxart_driver = { .probe = moxart_probe, - .remove_new = moxart_remove, + .remove = moxart_remove, .driver = { .name = "moxart-dma-engine", .of_match_table = moxart_dma_match, diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 68c247a46321..bf131cb5db66 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -1110,7 +1110,7 @@ MODULE_DEVICE_TABLE(of, mpc_dma_match); static struct platform_driver mpc_dma_driver = { .probe = mpc_dma_probe, - .remove_new = mpc_dma_remove, + .remove = mpc_dma_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc_dma_match, diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index c8c67f4d982c..cad4d4fb51ac 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -635,7 +635,7 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) writel(MV_XOR_V2_DESC_NUM, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); - /* write the DESQ address to the DMA enngine*/ + /* write the DESQ address to the DMA engine*/ writel(lower_32_bits(xor_dev->hw_desq), xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); writel(upper_32_bits(xor_dev->hw_desq), @@ -884,7 +884,7 @@ static struct platform_driver mv_xor_v2_driver = { .probe = mv_xor_v2_probe, .suspend = mv_xor_v2_suspend, .resume = mv_xor_v2_resume, - .remove_new = mv_xor_v2_remove, + .remove = mv_xor_v2_remove, .driver = { .name = "mv_xor_v2", .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 3b011a91d48e..0d6324c4e2be 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1515,7 +1515,7 @@ static struct platform_driver nbpf_driver = { }, .id_table = nbpf_ids, .probe = nbpf_probe, - .remove_new = nbpf_remove, + .remove = nbpf_remove, }; module_platform_driver(nbpf_driver); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index aa436f9e3571..57cec757d8f5 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -1252,7 +1252,7 @@ static void owl_dma_remove(struct platform_device *pdev) static struct platform_driver owl_dma_driver = { .probe = owl_dma_probe, - .remove_new = owl_dma_remove, + .remove = owl_dma_remove, .driver = { .name = "dma-owl", .of_match_table = of_match_ptr(owl_dma_match), diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 7b78759ac734..9d2a5a967a99 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4549,7 +4549,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); static struct platform_driver ppc440spe_adma_driver = { .probe = ppc440spe_adma_probe, - .remove_new = ppc440spe_adma_remove, + .remove = ppc440spe_adma_remove, .driver = { .name = "PPC440SP(E)-ADMA", .of_match_table = ppc440spe_adma_of_match, diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 31f8da810c05..e50cf3357e5e 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1442,7 +1442,7 @@ static struct platform_driver pxad_driver = { }, .id_table = pxad_id_table, .probe = pxad_probe, - .remove_new = pxad_remove, + .remove = pxad_remove, }; static bool pxad_filter_fn(struct dma_chan *chan, void *param) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d43a881e43b9..bbc3276992bb 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -1469,7 +1469,7 @@ static const struct dev_pm_ops bam_dma_pm_ops = { static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, - .remove_new = bam_dma_remove, + .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", .pm = &bam_dma_pm_ops, diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 4d2cd8d9ec74..c2b3e4452e71 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -948,7 +948,7 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); static struct platform_driver hidma_driver = { .probe = hidma_probe, - .remove_new = hidma_remove, + .remove = hidma_remove, .shutdown = hidma_shutdown, .driver = { .name = "hidma", diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c index c1db398adc84..6be54fddcee1 100644 --- a/drivers/dma/qcom/qcom_adm.c +++ b/drivers/dma/qcom/qcom_adm.c @@ -937,7 +937,7 @@ MODULE_DEVICE_TABLE(of, adm_of_match); static struct platform_driver adm_dma_driver = { .probe = adm_dma_probe, - .remove_new = adm_dma_remove, + .remove = adm_dma_remove, .driver = { .name = "adm-dma-engine", .of_match_table = adm_of_match, diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 01e656c69e6c..dc1a9a05252e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -1079,7 +1079,7 @@ static struct platform_driver sa11x0_dma_driver = { .pm = &sa11x0_dma_pm_ops, }, .probe = sa11x0_dma_probe, - .remove_new = sa11x0_dma_remove, + .remove = sa11x0_dma_remove, }; static int __init sa11x0_dma_init(void) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 428473611115..7ad3c29be146 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -354,7 +354,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id) if (!residue) { tasklet_hi_schedule(&chan->done_tasklet); } else { - /* submit next trascatioin if possible */ + /* submit next transaction if possible */ struct sf_pdma_desc *desc = chan->desc; desc->src_addr += desc->xfer_size - residue; @@ -633,7 +633,7 @@ MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); static struct platform_driver sf_pdma_driver = { .probe = sf_pdma_probe, - .remove_new = sf_pdma_remove, + .remove = sf_pdma_remove, .driver = { .name = "sf-pdma", .of_match_table = sf_pdma_dt_ids, diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index c0b2997ab7fd..6ea5a880b433 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -49,10 +49,10 @@ config RENESAS_USB_DMAC SoCs. config RZ_DMAC - tristate "Renesas RZ/{G2L,V2L} DMA Controller" - depends on ARCH_RZG2L || COMPILE_TEST + tristate "Renesas RZ DMA Controller" + depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST select RENESAS_DMA select DMA_VIRTUAL_CHANNELS help - This driver supports the general purpose DMA controller found in the - Renesas RZ/{G2L,V2L} SoC variants. + This driver supports the general purpose DMA controller typically + found in the Renesas RZ SoC variants. diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 1094a2f82164..2679c1f09faf 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -2037,7 +2037,7 @@ static struct platform_driver rcar_dmac_driver = { .of_match_table = rcar_dmac_of_ids, }, .probe = rcar_dmac_probe, - .remove_new = rcar_dmac_remove, + .remove = rcar_dmac_remove, .shutdown = rcar_dmac_shutdown, }; diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c index 811389fc9cb8..9235db551026 100644 --- a/drivers/dma/sh/rz-dmac.c +++ b/drivers/dma/sh/rz-dmac.c @@ -893,7 +893,7 @@ static int rz_dmac_probe(struct platform_device *pdev) /* Initialize the channels. */ INIT_LIST_HEAD(&dmac->engine.channels); - dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev); + dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev); if (IS_ERR(dmac->rstc)) return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc), "failed to get resets\n"); @@ -1004,7 +1004,7 @@ static struct platform_driver rz_dmac_driver = { .of_match_table = of_rz_dmac_match, }, .probe = rz_dmac_probe, - .remove_new = rz_dmac_remove, + .remove = rz_dmac_remove, }; module_platform_driver(rz_dmac_driver); diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 588c5f409a80..fdd41e1c2263 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -961,7 +961,7 @@ void shdma_chan_probe(struct shdma_dev *sdev, spin_lock_init(&schan->chan_lock); - /* Init descripter manage list */ + /* Init descriptor manage list */ INIT_LIST_HEAD(&schan->ld_queue); INIT_LIST_HEAD(&schan->ld_free); diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 8ead0a1fd237..093e449e19ee 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -906,7 +906,7 @@ static struct platform_driver sh_dmae_driver = { .pm = &sh_dmae_pm, .name = SH_DMAE_DRV_NAME, }, - .remove_new = sh_dmae_remove, + .remove = sh_dmae_remove, }; static int __init sh_dmae_init(void) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index f7cd0cad056c..7e2b6c97fa2f 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -301,7 +301,7 @@ static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc = NULL; unsigned long flags; - /* Get a freed descritpor */ + /* Get a freed descriptor */ spin_lock_irqsave(&chan->vc.lock, flags); list_for_each_entry(desc, &chan->desc_freed, node) { if (sg_len <= desc->sg_allocated_len) { @@ -899,7 +899,7 @@ static struct platform_driver usb_dmac_driver = { .of_match_table = usb_dmac_of_ids, }, .probe = usb_dmac_probe, - .remove_new = usb_dmac_remove, + .remove = usb_dmac_remove, .shutdown = usb_dmac_shutdown, }; diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 3f54ff37c5e0..187a090463ce 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1298,7 +1298,7 @@ static const struct dev_pm_ops sprd_dma_pm_ops = { static struct platform_driver sprd_dma_driver = { .probe = sprd_dma_probe, - .remove_new = sprd_dma_remove, + .remove = sprd_dma_remove, .driver = { .name = "sprd-dma", .of_match_table = sprd_dma_match, diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index 8880b5e336f8..c65ee0c7bfbd 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c @@ -858,7 +858,7 @@ static struct platform_driver st_fdma_platform_driver = { .of_match_table = st_fdma_match, }, .probe = st_fdma_probe, - .remove_new = st_fdma_remove, + .remove = st_fdma_remove, }; module_platform_driver(st_fdma_platform_driver); diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index 0be6e944df6f..0c6c4258b195 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -221,6 +221,8 @@ enum stm32_dma3_port_data_width { #define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */ #define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */ #define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */ +#define STM32_DMA3_DT_NOPACK BIT(16) /* CTR1_PAM */ +#define STM32_DMA3_DT_NOREFACT BIT(17) /* struct stm32_dma3_chan .config_set bitfield */ #define STM32_DMA3_CFG_SET_DT BIT(0) @@ -228,6 +230,8 @@ enum stm32_dma3_port_data_width { #define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA) #define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64) +#define STM32_DMA3_MAX_BURST_LEN (1 + min_t(u32, FIELD_MAX(CTR1_SBL_1), \ + FIELD_MAX(CTR1_DBL_1))) #define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \ ((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); }) #define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \ @@ -293,6 +297,10 @@ struct stm32_dma3_chan { u32 dma_status; }; +struct stm32_dma3_pdata { + u32 axi_max_burst_len; +}; + struct stm32_dma3_ddata { struct dma_device dma_dev; void __iomem *base; @@ -301,6 +309,7 @@ struct stm32_dma3_ddata { u32 dma_channels; u32 dma_requests; enum stm32_dma3_port_data_width ports_max_dw[2]; + u32 axi_max_burst_len; }; static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan) @@ -533,7 +542,8 @@ static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst, return 1 << __ffs(len | addr | max_dw); } -static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 chan_max_burst) +static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, + u32 chan_max_burst, u32 bus_max_burst) { u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1; @@ -544,8 +554,9 @@ static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 cha /* * HW doesn't modify the burst if burst size <= half of the fifo size. * If len is not a multiple of burst size, last burst is shortened by HW. + * Take care of maximum burst supported on interconnect bus. */ - return max_burst; + return min_t(u32, max_burst, bus_max_burst); } static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir, @@ -554,6 +565,7 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf { struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); struct dma_device dma_device = ddata->dma_dev; + u32 src_max_burst = STM32_DMA3_MAX_BURST_LEN, dst_max_burst = STM32_DMA3_MAX_BURST_LEN; u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max; u32 _ctr1 = 0, _ctr2 = 0; u32 ch_conf = chan->dt_config.ch_conf; @@ -594,10 +606,14 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf _ctr1 |= CTR1_SINC; if (sap) _ctr1 |= CTR1_SAP; + if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */ + src_max_burst = ddata->axi_max_burst_len; if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf)) _ctr1 |= CTR1_DINC; if (dap) _ctr1 |= CTR1_DAP; + if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */ + dst_max_burst = ddata->axi_max_burst_len; _ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ; if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf)) @@ -617,11 +633,16 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf /* Set destination (device) data width and burst */ ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr)); - dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst)); + dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst, + dst_max_burst)); /* Set source (memory) data width and burst */ sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr); - sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst); + sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst); + if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) { + sdw = ddw; + sbl_max = dbl_max; + } _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); @@ -647,11 +668,17 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf /* Set source (device) data width and burst */ sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr)); - sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst)); + sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst, + src_max_burst)); /* Set destination (memory) data width and burst */ ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr); - dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst); + dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst); + if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) || + ((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */ + ddw = sdw; + dbl_max = sbl_max; + } _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); @@ -678,22 +705,24 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf init_dw = sdw; init_bl_max = sbl_max; sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr); - sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst); + sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst); if (chan->config_set & STM32_DMA3_CFG_SET_DMA) { sdw = min_t(u32, init_dw, sdw); - sbl_max = min_t(u32, init_bl_max, - stm32_dma3_get_max_burst(len, sdw, chan->max_burst)); + sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw, + chan->max_burst, + src_max_burst)); } /* Set destination (memory) data width and burst */ init_dw = ddw; init_bl_max = dbl_max; ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr); - dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst); + dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst); if (chan->config_set & STM32_DMA3_CFG_SET_DMA) { ddw = min_t(u32, init_dw, ddw); - dbl_max = min_t(u32, init_bl_max, - stm32_dma3_get_max_burst(len, ddw, chan->max_burst)); + dbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, ddw, + chan->max_burst, + dst_max_burst)); } _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); @@ -1116,6 +1145,28 @@ static void stm32_dma3_free_chan_resources(struct dma_chan *c) chan->config_set = 0; } +static u32 stm32_dma3_get_ll_count(struct stm32_dma3_chan *chan, size_t len, bool prevent_refactor) +{ + u32 count; + + if (prevent_refactor) + return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE); + + count = len / STM32_DMA3_MAX_BLOCK_SIZE; + len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE; + + if (len >= chan->max_burst) { + count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */ + len -= (len / chan->max_burst) * chan->max_burst; + } + + /* Unaligned remainder fits in one extra item */ + if (len > 0) + count += 1; + + return count; +} + static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan, dma_addr_t dst, dma_addr_t src) { @@ -1150,8 +1201,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha struct stm32_dma3_swdesc *swdesc; size_t next_size, offset; u32 count, i, ctr1, ctr2; + bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || + !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); - count = DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE); + count = stm32_dma3_get_ll_count(chan, len, prevent_refactor); swdesc = stm32_dma3_chan_desc_alloc(chan, count); if (!swdesc) @@ -1167,6 +1220,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha remaining = len - offset; next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE); + if (!prevent_refactor && + (next_size < STM32_DMA3_MAX_BLOCK_SIZE && next_size >= chan->max_burst)) + next_size = chan->max_burst * (remaining / chan->max_burst); + ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_MEM, &swdesc->ccr, &ctr1, &ctr2, src + offset, dst + offset, next_size); if (ret) @@ -1203,14 +1260,13 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan size_t len; dma_addr_t sg_addr, dev_addr, src, dst; u32 i, j, count, ctr1, ctr2; + bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || + !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); int ret; - count = sg_len; - for_each_sg(sgl, sg, sg_len, i) { - len = sg_dma_len(sg); - if (len > STM32_DMA3_MAX_BLOCK_SIZE) - count += DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE) - 1; - } + count = 0; + for_each_sg(sgl, sg, sg_len, i) + count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor); swdesc = stm32_dma3_chan_desc_alloc(chan, count); if (!swdesc) @@ -1227,6 +1283,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan do { size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE); + if (!prevent_refactor && + (chunk < STM32_DMA3_MAX_BLOCK_SIZE && chunk >= chan->max_burst)) + chunk = chan->max_burst * (len / chan->max_burst); + if (dir == DMA_MEM_TO_DEV) { src = sg_addr; dst = dev_addr; @@ -1259,6 +1319,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan } while (len); } + if (count != sg_len && chan->tcem != CTR2_TCEM_CHANNEL) + dev_warn(chan2dev(chan), "Linked-list refactored, %d items instead of %d\n", + count, sg_len); + /* Enable Error interrupts */ swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE; /* Enable Transfer state interrupts */ @@ -1601,8 +1665,12 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) return chan_reserved; } +static struct stm32_dma3_pdata stm32mp25_pdata = { + .axi_max_burst_len = 16, +}; + static const struct of_device_id stm32_dma3_of_match[] = { - { .compatible = "st,stm32mp25-dma3", }, + { .compatible = "st,stm32mp25-dma3", .data = &stm32mp25_pdata, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, stm32_dma3_of_match); @@ -1610,6 +1678,7 @@ MODULE_DEVICE_TABLE(of, stm32_dma3_of_match); static int stm32_dma3_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + const struct stm32_dma3_pdata *pdata; struct stm32_dma3_ddata *ddata; struct reset_control *reset; struct stm32_dma3_chan *chan; @@ -1704,6 +1773,16 @@ static int stm32_dma3_probe(struct platform_device *pdev) else /* Dual master ports */ ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr); + /* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */ + ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN; + pdata = device_get_match_data(&pdev->dev); + if (pdata && pdata->axi_max_burst_len) { + ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len, + STM32_DMA3_MAX_BURST_LEN); + dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n", + ddata->axi_max_burst_len); + } + ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans), GFP_KERNEL); if (!ddata->chans) { @@ -1827,7 +1906,7 @@ static const struct dev_pm_ops stm32_dma3_pm_ops = { static struct platform_driver stm32_dma3_driver = { .probe = stm32_dma3_probe, - .remove_new = stm32_dma3_remove, + .remove = stm32_dma3_remove, .driver = { .name = "stm32-dma3", .of_match_table = stm32_dma3_of_match, diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 2e7f9b07fdd2..f37cdf6f2179 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -1292,7 +1292,7 @@ MODULE_DEVICE_TABLE(of, sun4i_dma_match); static struct platform_driver sun4i_dma_driver = { .probe = sun4i_dma_probe, - .remove_new = sun4i_dma_remove, + .remove = sun4i_dma_remove, .driver = { .name = "sun4i-dma", .of_match_table = sun4i_dma_match, diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 583bf49031cf..95ecb12caaa5 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1488,7 +1488,7 @@ static void sun6i_dma_remove(struct platform_device *pdev) static struct platform_driver sun6i_dma_driver = { .probe = sun6i_dma_probe, - .remove_new = sun6i_dma_remove, + .remove = sun6i_dma_remove, .driver = { .name = "sun6i-dma", .of_match_table = sun6i_dma_match, diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index 3642508e88bb..cacf3757adc2 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -1532,7 +1532,7 @@ static struct platform_driver tegra_dma_driver = { .of_match_table = tegra_dma_of_match, }, .probe = tegra_dma_probe, - .remove_new = tegra_dma_remove, + .remove = tegra_dma_remove, }; module_platform_driver(tegra_dma_driver); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 7d1acda2d72b..14a61e53a41b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1675,7 +1675,7 @@ static struct platform_driver tegra_dmac_driver = { .of_match_table = tegra_dma_of_match, }, .probe = tegra_dma_probe, - .remove_new = tegra_dma_remove, + .remove = tegra_dma_remove, }; module_platform_driver(tegra_dmac_driver); diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 24ad7077c53b..2953008d42ef 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -1008,7 +1008,7 @@ static struct platform_driver tegra_admac_driver = { .of_match_table = tegra_adma_of_match, }, .probe = tegra_adma_probe, - .remove_new = tegra_adma_remove, + .remove = tegra_adma_remove, }; module_platform_driver(tegra_admac_driver); diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index a8bb70c2d109..8d8c3d6038fc 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -1243,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = { static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, - .remove_new = cppi41_dma_remove, + .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .pm = &cppi41_pm_ops, diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 5f8d2e93ff3f..343e986e66e7 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2636,7 +2636,7 @@ static const struct dev_pm_ops edma_pm_ops = { static struct platform_driver edma_driver = { .probe = edma_probe, - .remove_new = edma_remove, + .remove = edma_remove, .driver = { .name = "edma", .pm = &edma_pm_ops, diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 6ab9bfbdc480..8c023c6e623a 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1915,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match); static struct platform_driver omap_dma_driver = { .probe = omap_dma_probe, - .remove_new = omap_dma_remove, + .remove = omap_dma_remove, .driver = { .name = "omap-dma-engine", .of_match_table = omap_dma_match, diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 7410025605e0..ecaf002558af 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -761,7 +761,7 @@ static struct platform_driver td_driver = { .name = DRIVER_NAME, }, .probe = td_probe, - .remove_new = td_remove, + .remove = td_remove, }; module_platform_driver(td_driver); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 44ba377b4b5a..35d5221683b2 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1260,14 +1260,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = { }; static struct platform_driver txx9dmac_chan_driver = { - .remove_new = txx9dmac_chan_remove, + .remove = txx9dmac_chan_remove, .driver = { .name = "txx9dmac-chan", }, }; static struct platform_driver txx9dmac_driver = { - .remove_new = txx9dmac_remove, + .remove = txx9dmac_remove, .shutdown = txx9dmac_shutdown, .driver = { .name = "txx9dmac", diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c index ad7125f6e2ca..7a99f86ecb5a 100644 --- a/drivers/dma/uniphier-mdmac.c +++ b/drivers/dma/uniphier-mdmac.c @@ -493,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match); static struct platform_driver uniphier_mdmac_driver = { .probe = uniphier_mdmac_probe, - .remove_new = uniphier_mdmac_remove, + .remove = uniphier_mdmac_remove, .driver = { .name = "uniphier-mio-dmac", .of_match_table = uniphier_mdmac_match, diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index 3ce2dc2ad9de..ceeb6171c9d1 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -603,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match); static struct platform_driver uniphier_xdmac_driver = { .probe = uniphier_xdmac_probe, - .remove_new = uniphier_xdmac_remove, + .remove = uniphier_xdmac_remove, .driver = { .name = "uniphier-xdmac", .of_match_table = uniphier_xdmac_match, diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 275848a9c450..f64624ea44ad 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -1815,7 +1815,7 @@ MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr); static struct platform_driver xgene_dma_driver = { .probe = xgene_dma_probe, - .remove_new = xgene_dma_remove, + .remove = xgene_dma_remove, .driver = { .name = "X-Gene-DMA", .of_match_table = xgene_dma_of_match_ptr, diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 718842fdaf98..93772abc3b49 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -1315,7 +1315,7 @@ static struct platform_driver xdma_driver = { }, .id_table = xdma_id_table, .probe = xdma_probe, - .remove_new = xdma_remove, + .remove = xdma_remove, }; module_platform_driver(xdma_driver); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 5eb51ae93e89..1bdd57de87a6 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -3271,7 +3271,7 @@ static struct platform_driver xilinx_vdma_driver = { .of_match_table = xilinx_dma_of_ids, }, .probe = xilinx_dma_probe, - .remove_new = xilinx_dma_remove, + .remove = xilinx_dma_remove, }; module_platform_driver(xilinx_vdma_driver); diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index be87764af9e8..ee5d9fdbfd7f 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -1863,7 +1863,7 @@ MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); static struct platform_driver xilinx_dpdma_driver = { .probe = xilinx_dpdma_probe, - .remove_new = xilinx_dpdma_remove, + .remove = xilinx_dpdma_remove, .driver = { .name = "xilinx-zynqmp-dpdma", .of_match_table = xilinx_dpdma_of_match, diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9ae46f1198fe..d05fc5fcc77d 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -366,7 +366,7 @@ static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) } writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); - /* Clearing the interrupt account rgisters */ + /* Clearing the interrupt account registers */ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); @@ -1192,7 +1192,7 @@ static struct platform_driver zynqmp_dma_driver = { .pm = &zynqmp_dma_dev_pm_ops, }, .probe = zynqmp_dma_probe, - .remove_new = zynqmp_dma_remove, + .remove = zynqmp_dma_remove, }; module_platform_driver(zynqmp_dma_driver); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 81af6c344d6b..06f7b43a6f78 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -311,24 +311,6 @@ config EDAC_CELL Cell Broadband Engine internal memory controller on platform without a hypervisor -config EDAC_AMD8131 - tristate "AMD8131 HyperTransport PCI-X Tunnel" - depends on PCI && PPC_MAPLE - help - Support for error detection and correction on the - AMD8131 HyperTransport PCI-X Tunnel chip. - Note, add more Kconfig dependency if it's adopted - on some machine other than Maple. - -config EDAC_AMD8111 - tristate "AMD8111 HyperTransport I/O Hub" - depends on PCI && PPC_MAPLE - help - Support for error detection and correction on the - AMD8111 HyperTransport I/O Hub chip. - Note, add more Kconfig dependency if it's adopted - on some machine other than Maple. - config EDAC_CPC925 tristate "IBM CPC925 Memory Controller (PPC970FX)" depends on PPC64 diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index faf310eec4a6..f9cf19d8d13d 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -63,8 +63,6 @@ i10nm_edac-y := i10nm_base.o obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o -obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o -obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c deleted file mode 100644 index a6d3013d5823..000000000000 --- a/drivers/edac/amd8111_edac.c +++ /dev/null @@ -1,596 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module - * - * Copyright (c) 2008 Wind River Systems, Inc. - * - * Authors: Cao Qingtao <qingtao.cao@windriver.com> - * Benjamin Walsh <benjamin.walsh@windriver.com> - * Hu Yongqi <yongqi.hu@windriver.com> - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/bitops.h> -#include <linux/edac.h> -#include <linux/pci_ids.h> -#include <asm/io.h> - -#include "edac_module.h" -#include "amd8111_edac.h" - -#define AMD8111_EDAC_REVISION " Ver: 1.0.0" -#define AMD8111_EDAC_MOD_STR "amd8111_edac" - -#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 - -enum amd8111_edac_devs { - LPC_BRIDGE = 0, -}; - -enum amd8111_edac_pcis { - PCI_BRIDGE = 0, -}; - -/* Wrapper functions for accessing PCI configuration space */ -static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32) -{ - int ret; - - ret = pci_read_config_dword(dev, reg, val32); - if (ret != 0) - printk(KERN_ERR AMD8111_EDAC_MOD_STR - " PCI Access Read Error at 0x%x\n", reg); - - return ret; -} - -static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8) -{ - int ret; - - ret = pci_read_config_byte(dev, reg, val8); - if (ret != 0) - printk(KERN_ERR AMD8111_EDAC_MOD_STR - " PCI Access Read Error at 0x%x\n", reg); -} - -static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32) -{ - int ret; - - ret = pci_write_config_dword(dev, reg, val32); - if (ret != 0) - printk(KERN_ERR AMD8111_EDAC_MOD_STR - " PCI Access Write Error at 0x%x\n", reg); -} - -static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8) -{ - int ret; - - ret = pci_write_config_byte(dev, reg, val8); - if (ret != 0) - printk(KERN_ERR AMD8111_EDAC_MOD_STR - " PCI Access Write Error at 0x%x\n", reg); -} - -/* - * device-specific methods for amd8111 PCI Bridge Controller - * - * Error Reporting and Handling for amd8111 chipset could be found - * in its datasheet 3.1.2 section, P37 - */ -static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info) -{ - u32 val32; - struct pci_dev *dev = pci_info->dev; - - /* First clear error detection flags on the host interface */ - - /* Clear SSE/SMA/STA flags in the global status register*/ - edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); - if (val32 & PCI_STSCMD_CLEAR_MASK) - edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); - - /* Clear CRC and Link Fail flags in HT Link Control reg */ - edac_pci_read_dword(dev, REG_HT_LINK, &val32); - if (val32 & HT_LINK_CLEAR_MASK) - edac_pci_write_dword(dev, REG_HT_LINK, val32); - - /* Second clear all fault on the secondary interface */ - - /* Clear error flags in the memory-base limit reg. */ - edac_pci_read_dword(dev, REG_MEM_LIM, &val32); - if (val32 & MEM_LIMIT_CLEAR_MASK) - edac_pci_write_dword(dev, REG_MEM_LIM, val32); - - /* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */ - edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); - if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK) - edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); - - /* Last enable error detections */ - if (edac_op_state == EDAC_OPSTATE_POLL) { - /* Enable System Error reporting in global status register */ - edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); - val32 |= PCI_STSCMD_SERREN; - edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); - - /* Enable CRC Sync flood packets to HyperTransport Link */ - edac_pci_read_dword(dev, REG_HT_LINK, &val32); - val32 |= HT_LINK_CRCFEN; - edac_pci_write_dword(dev, REG_HT_LINK, val32); - - /* Enable SSE reporting etc in Interrupt control reg */ - edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); - val32 |= PCI_INTBRG_CTRL_POLL_MASK; - edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); - } -} - -static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info) -{ - u32 val32; - struct pci_dev *dev = pci_info->dev; - - if (edac_op_state == EDAC_OPSTATE_POLL) { - /* Disable System Error reporting */ - edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); - val32 &= ~PCI_STSCMD_SERREN; - edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); - - /* Disable CRC flood packets */ - edac_pci_read_dword(dev, REG_HT_LINK, &val32); - val32 &= ~HT_LINK_CRCFEN; - edac_pci_write_dword(dev, REG_HT_LINK, val32); - - /* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */ - edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); - val32 &= ~PCI_INTBRG_CTRL_POLL_MASK; - edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); - } -} - -static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev) -{ - struct amd8111_pci_info *pci_info = edac_dev->pvt_info; - struct pci_dev *dev = pci_info->dev; - u32 val32; - - /* Check out PCI Bridge Status and Command Register */ - edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); - if (val32 & PCI_STSCMD_CLEAR_MASK) { - printk(KERN_INFO "Error(s) in PCI bridge status and command" - "register on device %s\n", pci_info->ctl_name); - printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n", - (val32 & PCI_STSCMD_SSE) != 0, - (val32 & PCI_STSCMD_RMA) != 0, - (val32 & PCI_STSCMD_RTA) != 0); - - val32 |= PCI_STSCMD_CLEAR_MASK; - edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } - - /* Check out HyperTransport Link Control Register */ - edac_pci_read_dword(dev, REG_HT_LINK, &val32); - if (val32 & HT_LINK_LKFAIL) { - printk(KERN_INFO "Error(s) in hypertransport link control" - "register on device %s\n", pci_info->ctl_name); - printk(KERN_INFO "LKFAIL: %d\n", - (val32 & HT_LINK_LKFAIL) != 0); - - val32 |= HT_LINK_LKFAIL; - edac_pci_write_dword(dev, REG_HT_LINK, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } - - /* Check out PCI Interrupt and Bridge Control Register */ - edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); - if (val32 & PCI_INTBRG_CTRL_DTSTAT) { - printk(KERN_INFO "Error(s) in PCI interrupt and bridge control" - "register on device %s\n", pci_info->ctl_name); - printk(KERN_INFO "DTSTAT: %d\n", - (val32 & PCI_INTBRG_CTRL_DTSTAT) != 0); - - val32 |= PCI_INTBRG_CTRL_DTSTAT; - edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } - - /* Check out PCI Bridge Memory Base-Limit Register */ - edac_pci_read_dword(dev, REG_MEM_LIM, &val32); - if (val32 & MEM_LIMIT_CLEAR_MASK) { - printk(KERN_INFO - "Error(s) in mem limit register on %s device\n", - pci_info->ctl_name); - printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n" - "RTA: %d, STA: %d, MDPE: %d\n", - (val32 & MEM_LIMIT_DPE) != 0, - (val32 & MEM_LIMIT_RSE) != 0, - (val32 & MEM_LIMIT_RMA) != 0, - (val32 & MEM_LIMIT_RTA) != 0, - (val32 & MEM_LIMIT_STA) != 0, - (val32 & MEM_LIMIT_MDPE) != 0); - - val32 |= MEM_LIMIT_CLEAR_MASK; - edac_pci_write_dword(dev, REG_MEM_LIM, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } -} - -static struct resource *legacy_io_res; -static int at_compat_reg_broken; -#define LEGACY_NR_PORTS 1 - -/* device-specific methods for amd8111 LPC Bridge device */ -static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info) -{ - u8 val8; - struct pci_dev *dev = dev_info->dev; - - /* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */ - legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS, - AMD8111_EDAC_MOD_STR); - if (!legacy_io_res) - printk(KERN_INFO "%s: failed to request legacy I/O region " - "start %d, len %d\n", __func__, - REG_AT_COMPAT, LEGACY_NR_PORTS); - else { - val8 = __do_inb(REG_AT_COMPAT); - if (val8 == 0xff) { /* buggy port */ - printk(KERN_INFO "%s: port %d is buggy, not supported" - " by hardware?\n", __func__, REG_AT_COMPAT); - at_compat_reg_broken = 1; - release_region(REG_AT_COMPAT, LEGACY_NR_PORTS); - legacy_io_res = NULL; - } else { - u8 out8 = 0; - if (val8 & AT_COMPAT_SERR) - out8 = AT_COMPAT_CLRSERR; - if (val8 & AT_COMPAT_IOCHK) - out8 |= AT_COMPAT_CLRIOCHK; - if (out8 > 0) - __do_outb(out8, REG_AT_COMPAT); - } - } - - /* Second clear error flags on LPC bridge */ - edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8); - if (val8 & IO_CTRL_1_CLEAR_MASK) - edac_pci_write_byte(dev, REG_IO_CTRL_1, val8); -} - -static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info) -{ - if (legacy_io_res) - release_region(REG_AT_COMPAT, LEGACY_NR_PORTS); -} - -static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev) -{ - struct amd8111_dev_info *dev_info = edac_dev->pvt_info; - struct pci_dev *dev = dev_info->dev; - u8 val8; - - edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8); - if (val8 & IO_CTRL_1_CLEAR_MASK) { - printk(KERN_INFO - "Error(s) in IO control register on %s device\n", - dev_info->ctl_name); - printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n", - (val8 & IO_CTRL_1_LPC_ERR) != 0, - (val8 & IO_CTRL_1_PW2LPC) != 0); - - val8 |= IO_CTRL_1_CLEAR_MASK; - edac_pci_write_byte(dev, REG_IO_CTRL_1, val8); - - edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); - } - - if (at_compat_reg_broken == 0) { - u8 out8 = 0; - val8 = __do_inb(REG_AT_COMPAT); - if (val8 & AT_COMPAT_SERR) - out8 = AT_COMPAT_CLRSERR; - if (val8 & AT_COMPAT_IOCHK) - out8 |= AT_COMPAT_CLRIOCHK; - if (out8 > 0) { - __do_outb(out8, REG_AT_COMPAT); - edac_device_handle_ue(edac_dev, 0, 0, - edac_dev->ctl_name); - } - } -} - -/* General devices represented by edac_device_ctl_info */ -static struct amd8111_dev_info amd8111_devices[] = { - [LPC_BRIDGE] = { - .err_dev = PCI_DEVICE_ID_AMD_8111_LPC, - .ctl_name = "lpc", - .init = amd8111_lpc_bridge_init, - .exit = amd8111_lpc_bridge_exit, - .check = amd8111_lpc_bridge_check, - }, - {0}, -}; - -/* PCI controllers represented by edac_pci_ctl_info */ -static struct amd8111_pci_info amd8111_pcis[] = { - [PCI_BRIDGE] = { - .err_dev = PCI_DEVICE_ID_AMD_8111_PCI, - .ctl_name = "AMD8111_PCI_Controller", - .init = amd8111_pci_bridge_init, - .exit = amd8111_pci_bridge_exit, - .check = amd8111_pci_bridge_check, - }, - {0}, -}; - -static int amd8111_dev_probe(struct pci_dev *dev, - const struct pci_device_id *id) -{ - struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data]; - int ret = -ENODEV; - - dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD, - dev_info->err_dev, NULL); - - if (!dev_info->dev) { - printk(KERN_ERR "EDAC device not found:" - "vendor %x, device %x, name %s\n", - PCI_VENDOR_ID_AMD, dev_info->err_dev, - dev_info->ctl_name); - goto err; - } - - if (pci_enable_device(dev_info->dev)) { - printk(KERN_ERR "failed to enable:" - "vendor %x, device %x, name %s\n", - PCI_VENDOR_ID_AMD, dev_info->err_dev, - dev_info->ctl_name); - goto err_dev_put; - } - - /* - * we do not allocate extra private structure for - * edac_device_ctl_info, but make use of existing - * one instead. - */ - dev_info->edac_idx = edac_device_alloc_index(); - dev_info->edac_dev = - edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, - NULL, 0, 0, dev_info->edac_idx); - if (!dev_info->edac_dev) { - ret = -ENOMEM; - goto err_dev_put; - } - - dev_info->edac_dev->pvt_info = dev_info; - dev_info->edac_dev->dev = &dev_info->dev->dev; - dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; - dev_info->edac_dev->ctl_name = dev_info->ctl_name; - dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev); - - if (edac_op_state == EDAC_OPSTATE_POLL) - dev_info->edac_dev->edac_check = dev_info->check; - - if (dev_info->init) - dev_info->init(dev_info); - - if (edac_device_add_device(dev_info->edac_dev) > 0) { - printk(KERN_ERR "failed to add edac_dev for %s\n", - dev_info->ctl_name); - goto err_edac_free_ctl; - } - - printk(KERN_INFO "added one edac_dev on AMD8111 " - "vendor %x, device %x, name %s\n", - PCI_VENDOR_ID_AMD, dev_info->err_dev, - dev_info->ctl_name); - - return 0; - -err_edac_free_ctl: - edac_device_free_ctl_info(dev_info->edac_dev); -err_dev_put: - pci_dev_put(dev_info->dev); -err: - return ret; -} - -static void amd8111_dev_remove(struct pci_dev *dev) -{ - struct amd8111_dev_info *dev_info; - - for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++) - if (dev_info->dev->device == dev->device) - break; - - if (!dev_info->err_dev) /* should never happen */ - return; - - if (dev_info->edac_dev) { - edac_device_del_device(dev_info->edac_dev->dev); - edac_device_free_ctl_info(dev_info->edac_dev); - } - - if (dev_info->exit) - dev_info->exit(dev_info); - - pci_dev_put(dev_info->dev); -} - -static int amd8111_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id) -{ - struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data]; - int ret = -ENODEV; - - pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD, - pci_info->err_dev, NULL); - - if (!pci_info->dev) { - printk(KERN_ERR "EDAC device not found:" - "vendor %x, device %x, name %s\n", - PCI_VENDOR_ID_AMD, pci_info->err_dev, - pci_info->ctl_name); - goto err; - } - - if (pci_enable_device(pci_info->dev)) { - printk(KERN_ERR "failed to enable:" - "vendor %x, device %x, name %s\n", - PCI_VENDOR_ID_AMD, pci_info->err_dev, - pci_info->ctl_name); - goto err_dev_put; - } - - /* - * we do not allocate extra private structure for - * edac_pci_ctl_info, but make use of existing - * one instead. - */ - pci_info->edac_idx = edac_pci_alloc_index(); - pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name); - if (!pci_info->edac_dev) { - ret = -ENOMEM; - goto err_dev_put; - } - - pci_info->edac_dev->pvt_info = pci_info; - pci_info->edac_dev->dev = &pci_info->dev->dev; - pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; - pci_info->edac_dev->ctl_name = pci_info->ctl_name; - pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev); - - if (edac_op_state == EDAC_OPSTATE_POLL) - pci_info->edac_dev->edac_check = pci_info->check; - - if (pci_info->init) - pci_info->init(pci_info); - - if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) { - printk(KERN_ERR "failed to add edac_pci for %s\n", - pci_info->ctl_name); - goto err_edac_free_ctl; - } - - printk(KERN_INFO "added one edac_pci on AMD8111 " - "vendor %x, device %x, name %s\n", - PCI_VENDOR_ID_AMD, pci_info->err_dev, - pci_info->ctl_name); - - return 0; - -err_edac_free_ctl: - edac_pci_free_ctl_info(pci_info->edac_dev); -err_dev_put: - pci_dev_put(pci_info->dev); -err: - return ret; -} - -static void amd8111_pci_remove(struct pci_dev *dev) -{ - struct amd8111_pci_info *pci_info; - - for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++) - if (pci_info->dev->device == dev->device) - break; - - if (!pci_info->err_dev) /* should never happen */ - return; - - if (pci_info->edac_dev) { - edac_pci_del_device(pci_info->edac_dev->dev); - edac_pci_free_ctl_info(pci_info->edac_dev); - } - - if (pci_info->exit) - pci_info->exit(pci_info); - - pci_dev_put(pci_info->dev); -} - -/* PCI Device ID talbe for general EDAC device */ -static const struct pci_device_id amd8111_edac_dev_tbl[] = { - { - PCI_VEND_DEV(AMD, 8111_LPC), - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = 0, - .class_mask = 0, - .driver_data = LPC_BRIDGE, - }, - { - 0, - } /* table is NULL-terminated */ -}; -MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl); - -static struct pci_driver amd8111_edac_dev_driver = { - .name = "AMD8111_EDAC_DEV", - .probe = amd8111_dev_probe, - .remove = amd8111_dev_remove, - .id_table = amd8111_edac_dev_tbl, -}; - -/* PCI Device ID table for EDAC PCI controller */ -static const struct pci_device_id amd8111_edac_pci_tbl[] = { - { - PCI_VEND_DEV(AMD, 8111_PCI), - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = 0, - .class_mask = 0, - .driver_data = PCI_BRIDGE, - }, - { - 0, - } /* table is NULL-terminated */ -}; -MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl); - -static struct pci_driver amd8111_edac_pci_driver = { - .name = "AMD8111_EDAC_PCI", - .probe = amd8111_pci_probe, - .remove = amd8111_pci_remove, - .id_table = amd8111_edac_pci_tbl, -}; - -static int __init amd8111_edac_init(void) -{ - int val; - - printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n"); - printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n"); - - /* Only POLL mode supported so far */ - edac_op_state = EDAC_OPSTATE_POLL; - - val = pci_register_driver(&amd8111_edac_dev_driver); - val |= pci_register_driver(&amd8111_edac_pci_driver); - - return val; -} - -static void __exit amd8111_edac_exit(void) -{ - pci_unregister_driver(&amd8111_edac_pci_driver); - pci_unregister_driver(&amd8111_edac_dev_driver); -} - - -module_init(amd8111_edac_init); -module_exit(amd8111_edac_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); -MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module"); diff --git a/drivers/edac/amd8111_edac.h b/drivers/edac/amd8111_edac.h deleted file mode 100644 index 200cab1b3e42..000000000000 --- a/drivers/edac/amd8111_edac.h +++ /dev/null @@ -1,118 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip - * - * Copyright (c) 2008 Wind River Systems, Inc. - * - * Authors: Cao Qingtao <qingtao.cao@windriver.com> - * Benjamin Walsh <benjamin.walsh@windriver.com> - * Hu Yongqi <yongqi.hu@windriver.com> - */ - -#ifndef _AMD8111_EDAC_H_ -#define _AMD8111_EDAC_H_ - -/************************************************************ - * PCI Bridge Status and Command Register, DevA:0x04 - ************************************************************/ -#define REG_PCI_STSCMD 0x04 -enum pci_stscmd_bits { - PCI_STSCMD_SSE = BIT(30), - PCI_STSCMD_RMA = BIT(29), - PCI_STSCMD_RTA = BIT(28), - PCI_STSCMD_SERREN = BIT(8), - PCI_STSCMD_CLEAR_MASK = (PCI_STSCMD_SSE | - PCI_STSCMD_RMA | - PCI_STSCMD_RTA) -}; - -/************************************************************ - * PCI Bridge Memory Base-Limit Register, DevA:0x1c - ************************************************************/ -#define REG_MEM_LIM 0x1c -enum mem_limit_bits { - MEM_LIMIT_DPE = BIT(31), - MEM_LIMIT_RSE = BIT(30), - MEM_LIMIT_RMA = BIT(29), - MEM_LIMIT_RTA = BIT(28), - MEM_LIMIT_STA = BIT(27), - MEM_LIMIT_MDPE = BIT(24), - MEM_LIMIT_CLEAR_MASK = (MEM_LIMIT_DPE | - MEM_LIMIT_RSE | - MEM_LIMIT_RMA | - MEM_LIMIT_RTA | - MEM_LIMIT_STA | - MEM_LIMIT_MDPE) -}; - -/************************************************************ - * HyperTransport Link Control Register, DevA:0xc4 - ************************************************************/ -#define REG_HT_LINK 0xc4 -enum ht_link_bits { - HT_LINK_LKFAIL = BIT(4), - HT_LINK_CRCFEN = BIT(1), - HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL) -}; - -/************************************************************ - * PCI Bridge Interrupt and Bridge Control, DevA:0x3c - ************************************************************/ -#define REG_PCI_INTBRG_CTRL 0x3c -enum pci_intbrg_ctrl_bits { - PCI_INTBRG_CTRL_DTSERREN = BIT(27), - PCI_INTBRG_CTRL_DTSTAT = BIT(26), - PCI_INTBRG_CTRL_MARSP = BIT(21), - PCI_INTBRG_CTRL_SERREN = BIT(17), - PCI_INTBRG_CTRL_PEREN = BIT(16), - PCI_INTBRG_CTRL_CLEAR_MASK = (PCI_INTBRG_CTRL_DTSTAT), - PCI_INTBRG_CTRL_POLL_MASK = (PCI_INTBRG_CTRL_DTSERREN | - PCI_INTBRG_CTRL_MARSP | - PCI_INTBRG_CTRL_SERREN) -}; - -/************************************************************ - * I/O Control 1 Register, DevB:0x40 - ************************************************************/ -#define REG_IO_CTRL_1 0x40 -enum io_ctrl_1_bits { - IO_CTRL_1_NMIONERR = BIT(7), - IO_CTRL_1_LPC_ERR = BIT(6), - IO_CTRL_1_PW2LPC = BIT(1), - IO_CTRL_1_CLEAR_MASK = (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC) -}; - -/************************************************************ - * Legacy I/O Space Registers - ************************************************************/ -#define REG_AT_COMPAT 0x61 -enum at_compat_bits { - AT_COMPAT_SERR = BIT(7), - AT_COMPAT_IOCHK = BIT(6), - AT_COMPAT_CLRIOCHK = BIT(3), - AT_COMPAT_CLRSERR = BIT(2), -}; - -struct amd8111_dev_info { - u16 err_dev; /* PCI Device ID */ - struct pci_dev *dev; - int edac_idx; /* device index */ - char *ctl_name; - struct edac_device_ctl_info *edac_dev; - void (*init)(struct amd8111_dev_info *dev_info); - void (*exit)(struct amd8111_dev_info *dev_info); - void (*check)(struct edac_device_ctl_info *edac_dev); -}; - -struct amd8111_pci_info { - u16 err_dev; /* PCI Device ID */ - struct pci_dev *dev; - int edac_idx; /* pci index */ - const char *ctl_name; - struct edac_pci_ctl_info *edac_dev; - void (*init)(struct amd8111_pci_info *dev_info); - void (*exit)(struct amd8111_pci_info *dev_info); - void (*check)(struct edac_pci_ctl_info *edac_dev); -}; - -#endif /* _AMD8111_EDAC_H_ */ diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c deleted file mode 100644 index 28610ba514f4..000000000000 --- a/drivers/edac/amd8131_edac.c +++ /dev/null @@ -1,358 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module - * - * Copyright (c) 2008 Wind River Systems, Inc. - * - * Authors: Cao Qingtao <qingtao.cao@windriver.com> - * Benjamin Walsh <benjamin.walsh@windriver.com> - * Hu Yongqi <yongqi.hu@windriver.com> - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/bitops.h> -#include <linux/edac.h> -#include <linux/pci_ids.h> - -#include "edac_module.h" -#include "amd8131_edac.h" - -#define AMD8131_EDAC_REVISION " Ver: 1.0.0" -#define AMD8131_EDAC_MOD_STR "amd8131_edac" - -/* Wrapper functions for accessing PCI configuration space */ -static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32) -{ - int ret; - - ret = pci_read_config_dword(dev, reg, val32); - if (ret != 0) - printk(KERN_ERR AMD8131_EDAC_MOD_STR - " PCI Access Read Error at 0x%x\n", reg); -} - -static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32) -{ - int ret; - - ret = pci_write_config_dword(dev, reg, val32); - if (ret != 0) - printk(KERN_ERR AMD8131_EDAC_MOD_STR - " PCI Access Write Error at 0x%x\n", reg); -} - -/* Support up to two AMD8131 chipsets on a platform */ -static struct amd8131_dev_info amd8131_devices[] = { - { - .inst = NORTH_A, - .devfn = DEVFN_PCIX_BRIDGE_NORTH_A, - .ctl_name = "AMD8131_PCIX_NORTH_A", - }, - { - .inst = NORTH_B, - .devfn = DEVFN_PCIX_BRIDGE_NORTH_B, - .ctl_name = "AMD8131_PCIX_NORTH_B", - }, - { - .inst = SOUTH_A, - .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A, - .ctl_name = "AMD8131_PCIX_SOUTH_A", - }, - { - .inst = SOUTH_B, - .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B, - .ctl_name = "AMD8131_PCIX_SOUTH_B", - }, - {.inst = NO_BRIDGE,}, -}; - -static void amd8131_pcix_init(struct amd8131_dev_info *dev_info) -{ - u32 val32; - struct pci_dev *dev = dev_info->dev; - - /* First clear error detection flags */ - edac_pci_read_dword(dev, REG_MEM_LIM, &val32); - if (val32 & MEM_LIMIT_MASK) - edac_pci_write_dword(dev, REG_MEM_LIM, val32); - - /* Clear Discard Timer Timedout flag */ - edac_pci_read_dword(dev, REG_INT_CTLR, &val32); - if (val32 & INT_CTLR_DTS) - edac_pci_write_dword(dev, REG_INT_CTLR, val32); - - /* Clear CRC Error flag on link side A */ - edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); - if (val32 & LNK_CTRL_CRCERR_A) - edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); - - /* Clear CRC Error flag on link side B */ - edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); - if (val32 & LNK_CTRL_CRCERR_B) - edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); - - /* - * Then enable all error detections. - * - * Setup Discard Timer Sync Flood Enable, - * System Error Enable and Parity Error Enable. - */ - edac_pci_read_dword(dev, REG_INT_CTLR, &val32); - val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE; - edac_pci_write_dword(dev, REG_INT_CTLR, val32); - - /* Enable overall SERR Error detection */ - edac_pci_read_dword(dev, REG_STS_CMD, &val32); - val32 |= STS_CMD_SERREN; - edac_pci_write_dword(dev, REG_STS_CMD, val32); - - /* Setup CRC Flood Enable for link side A */ - edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); - val32 |= LNK_CTRL_CRCFEN; - edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); - - /* Setup CRC Flood Enable for link side B */ - edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); - val32 |= LNK_CTRL_CRCFEN; - edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); -} - -static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info) -{ - u32 val32; - struct pci_dev *dev = dev_info->dev; - - /* Disable SERR, PERR and DTSE Error detection */ - edac_pci_read_dword(dev, REG_INT_CTLR, &val32); - val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE); - edac_pci_write_dword(dev, REG_INT_CTLR, val32); - - /* Disable overall System Error detection */ - edac_pci_read_dword(dev, REG_STS_CMD, &val32); - val32 &= ~STS_CMD_SERREN; - edac_pci_write_dword(dev, REG_STS_CMD, val32); - - /* Disable CRC Sync Flood on link side A */ - edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); - val32 &= ~LNK_CTRL_CRCFEN; - edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); - - /* Disable CRC Sync Flood on link side B */ - edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); - val32 &= ~LNK_CTRL_CRCFEN; - edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); -} - -static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev) -{ - struct amd8131_dev_info *dev_info = edac_dev->pvt_info; - struct pci_dev *dev = dev_info->dev; - u32 val32; - - /* Check PCI-X Bridge Memory Base-Limit Register for errors */ - edac_pci_read_dword(dev, REG_MEM_LIM, &val32); - if (val32 & MEM_LIMIT_MASK) { - printk(KERN_INFO "Error(s) in mem limit register " - "on %s bridge\n", dev_info->ctl_name); - printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n" - "RTA: %d, STA: %d, MDPE: %d\n", - val32 & MEM_LIMIT_DPE, - val32 & MEM_LIMIT_RSE, - val32 & MEM_LIMIT_RMA, - val32 & MEM_LIMIT_RTA, - val32 & MEM_LIMIT_STA, - val32 & MEM_LIMIT_MDPE); - - val32 |= MEM_LIMIT_MASK; - edac_pci_write_dword(dev, REG_MEM_LIM, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } - - /* Check if Discard Timer timed out */ - edac_pci_read_dword(dev, REG_INT_CTLR, &val32); - if (val32 & INT_CTLR_DTS) { - printk(KERN_INFO "Error(s) in interrupt and control register " - "on %s bridge\n", dev_info->ctl_name); - printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS); - - val32 |= INT_CTLR_DTS; - edac_pci_write_dword(dev, REG_INT_CTLR, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } - - /* Check if CRC error happens on link side A */ - edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); - if (val32 & LNK_CTRL_CRCERR_A) { - printk(KERN_INFO "Error(s) in link conf and control register " - "on %s bridge\n", dev_info->ctl_name); - printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A); - - val32 |= LNK_CTRL_CRCERR_A; - edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } - - /* Check if CRC error happens on link side B */ - edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); - if (val32 & LNK_CTRL_CRCERR_B) { - printk(KERN_INFO "Error(s) in link conf and control register " - "on %s bridge\n", dev_info->ctl_name); - printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B); - - val32 |= LNK_CTRL_CRCERR_B; - edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); - - edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); - } -} - -static struct amd8131_info amd8131_chipset = { - .err_dev = PCI_DEVICE_ID_AMD_8131_APIC, - .devices = amd8131_devices, - .init = amd8131_pcix_init, - .exit = amd8131_pcix_exit, - .check = amd8131_pcix_check, -}; - -/* - * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID, - * so amd8131_probe() would be called by kernel 4 times, with different - * address of pci_dev for each of them each time. - */ -static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id) -{ - struct amd8131_dev_info *dev_info; - - for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE; - dev_info++) - if (dev_info->devfn == dev->devfn) - break; - - if (dev_info->inst == NO_BRIDGE) /* should never happen */ - return -ENODEV; - - /* - * We can't call pci_get_device() as we are used to do because - * there are 4 of them but pci_dev_get() instead. - */ - dev_info->dev = pci_dev_get(dev); - - if (pci_enable_device(dev_info->dev)) { - pci_dev_put(dev_info->dev); - printk(KERN_ERR "failed to enable:" - "vendor %x, device %x, devfn %x, name %s\n", - PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev, - dev_info->devfn, dev_info->ctl_name); - return -ENODEV; - } - - /* - * we do not allocate extra private structure for - * edac_pci_ctl_info, but make use of existing - * one instead. - */ - dev_info->edac_idx = edac_pci_alloc_index(); - dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name); - if (!dev_info->edac_dev) - return -ENOMEM; - - dev_info->edac_dev->pvt_info = dev_info; - dev_info->edac_dev->dev = &dev_info->dev->dev; - dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR; - dev_info->edac_dev->ctl_name = dev_info->ctl_name; - dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev); - - if (edac_op_state == EDAC_OPSTATE_POLL) - dev_info->edac_dev->edac_check = amd8131_chipset.check; - - if (amd8131_chipset.init) - amd8131_chipset.init(dev_info); - - if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) { - printk(KERN_ERR "failed edac_pci_add_device() for %s\n", - dev_info->ctl_name); - edac_pci_free_ctl_info(dev_info->edac_dev); - return -ENODEV; - } - - printk(KERN_INFO "added one device on AMD8131 " - "vendor %x, device %x, devfn %x, name %s\n", - PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev, - dev_info->devfn, dev_info->ctl_name); - - return 0; -} - -static void amd8131_remove(struct pci_dev *dev) -{ - struct amd8131_dev_info *dev_info; - - for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE; - dev_info++) - if (dev_info->devfn == dev->devfn) - break; - - if (dev_info->inst == NO_BRIDGE) /* should never happen */ - return; - - if (dev_info->edac_dev) { - edac_pci_del_device(dev_info->edac_dev->dev); - edac_pci_free_ctl_info(dev_info->edac_dev); - } - - if (amd8131_chipset.exit) - amd8131_chipset.exit(dev_info); - - pci_dev_put(dev_info->dev); -} - -static const struct pci_device_id amd8131_edac_pci_tbl[] = { - { - PCI_VEND_DEV(AMD, 8131_BRIDGE), - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = 0, - .class_mask = 0, - .driver_data = 0, - }, - { - 0, - } /* table is NULL-terminated */ -}; -MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl); - -static struct pci_driver amd8131_edac_driver = { - .name = AMD8131_EDAC_MOD_STR, - .probe = amd8131_probe, - .remove = amd8131_remove, - .id_table = amd8131_edac_pci_tbl, -}; - -static int __init amd8131_edac_init(void) -{ - printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n"); - printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n"); - - /* Only POLL mode supported so far */ - edac_op_state = EDAC_OPSTATE_POLL; - - return pci_register_driver(&amd8131_edac_driver); -} - -static void __exit amd8131_edac_exit(void) -{ - pci_unregister_driver(&amd8131_edac_driver); -} - -module_init(amd8131_edac_init); -module_exit(amd8131_edac_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); -MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module"); diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h deleted file mode 100644 index 5f362abdaf12..000000000000 --- a/drivers/edac/amd8131_edac.h +++ /dev/null @@ -1,107 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip - * - * Copyright (c) 2008 Wind River Systems, Inc. - * - * Authors: Cao Qingtao <qingtao.cao@windriver.com> - * Benjamin Walsh <benjamin.walsh@windriver.com> - * Hu Yongqi <yongqi.hu@windriver.com> - */ - -#ifndef _AMD8131_EDAC_H_ -#define _AMD8131_EDAC_H_ - -#define DEVFN_PCIX_BRIDGE_NORTH_A 8 -#define DEVFN_PCIX_BRIDGE_NORTH_B 16 -#define DEVFN_PCIX_BRIDGE_SOUTH_A 24 -#define DEVFN_PCIX_BRIDGE_SOUTH_B 32 - -/************************************************************ - * PCI-X Bridge Status and Command Register, DevA:0x04 - ************************************************************/ -#define REG_STS_CMD 0x04 -enum sts_cmd_bits { - STS_CMD_SSE = BIT(30), - STS_CMD_SERREN = BIT(8) -}; - -/************************************************************ - * PCI-X Bridge Interrupt and Bridge Control Register, - ************************************************************/ -#define REG_INT_CTLR 0x3c -enum int_ctlr_bits { - INT_CTLR_DTSE = BIT(27), - INT_CTLR_DTS = BIT(26), - INT_CTLR_SERR = BIT(17), - INT_CTLR_PERR = BIT(16) -}; - -/************************************************************ - * PCI-X Bridge Memory Base-Limit Register, DevA:0x1C - ************************************************************/ -#define REG_MEM_LIM 0x1c -enum mem_limit_bits { - MEM_LIMIT_DPE = BIT(31), - MEM_LIMIT_RSE = BIT(30), - MEM_LIMIT_RMA = BIT(29), - MEM_LIMIT_RTA = BIT(28), - MEM_LIMIT_STA = BIT(27), - MEM_LIMIT_MDPE = BIT(24), - MEM_LIMIT_MASK = MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA| - MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE -}; - -/************************************************************ - * Link Configuration And Control Register, side A - ************************************************************/ -#define REG_LNK_CTRL_A 0xc4 - -/************************************************************ - * Link Configuration And Control Register, side B - ************************************************************/ -#define REG_LNK_CTRL_B 0xc8 - -enum lnk_ctrl_bits { - LNK_CTRL_CRCERR_A = BIT(9), - LNK_CTRL_CRCERR_B = BIT(8), - LNK_CTRL_CRCFEN = BIT(1) -}; - -enum pcix_bridge_inst { - NORTH_A = 0, - NORTH_B = 1, - SOUTH_A = 2, - SOUTH_B = 3, - NO_BRIDGE = 4 -}; - -struct amd8131_dev_info { - int devfn; - enum pcix_bridge_inst inst; - struct pci_dev *dev; - int edac_idx; /* pci device index */ - char *ctl_name; - struct edac_pci_ctl_info *edac_dev; -}; - -/* - * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC - * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are - * four PCIX Bridges on ATCA-6101 altogether. - * - * These PCIX Bridges share the same PCI Device ID and are all of - * Function Zero, they could be discrimated by their pci_dev->devfn. - * They share the same set of init/check/exit methods, and their - * private structures are collected in the devices[] array. - */ -struct amd8131_info { - u16 err_dev; /* PCI Device ID for AMD8131 APIC*/ - struct amd8131_dev_info *devices; - void (*init)(struct amd8131_dev_info *dev_info); - void (*exit)(struct amd8131_dev_info *dev_info); - void (*check)(struct edac_pci_ctl_info *edac_dev); -}; - -#endif /* _AMD8131_EDAC_H_ */ - diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 892b94cfd626..74a6aa7d8cc9 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -56,7 +56,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) * two cases: either the path goes through this node, in which case * the hop count is the sum of the two biggest child depths plus 2. * Or it could be the case that the max hop path is entirely - * containted in a child tree, in which case the max hop count is just + * contained in a child tree, in which case the max hop count is just * the max hop count of this child. */ static void update_hop_count(struct fw_node *node) diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 0ae2c84ecafe..9b298af1cac0 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -80,7 +80,7 @@ struct fw_card_driver { /* * Allow the specified node ID to do direct DMA out and in of * host memory. The card will disable this for all node when - * a bus reset happens, so driver need to reenable this after + * a bus reset happens, so driver need to re-enable this after * bus reset. Returns 0 on success, -ENODEV if the card * doesn't support this, -ESTALE if the generation doesn't * match. diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7ee55c2804de..c02aed11b590 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1384,7 +1384,7 @@ struct driver_data { }; /* - * This function apppends a packet to the DMA queue for transmission. + * This function appends a packet to the DMA queue for transmission. * Must always be called with the ochi->lock held to ensure proper * generation handling and locking around packet queue manipulation. */ @@ -2213,7 +2213,7 @@ static irqreturn_t irq_handler(int irq, void *data) if (unlikely(param_debug > 0)) { dev_notice_ratelimited(ohci->card.device, - "The debug parameter is superceded by tracepoints events, and deprecated."); + "The debug parameter is superseded by tracepoints events, and deprecated."); } /* @@ -2614,7 +2614,7 @@ static int ohci_set_config_rom(struct fw_card *card, * ConfigRomHeader and BusOptions doesn't honor the * noByteSwapData bit, so with a be32 config rom, the * controller will load be32 values in to these registers - * during the atomic update, even on litte endian + * during the atomic update, even on little endian * architectures. The workaround we use is to put a 0 in the * header quadlet; 0 is endian agnostic and means that the * config rom isn't ready yet. In the bus reset tasklet we @@ -3726,12 +3726,11 @@ static int pci_probe(struct pci_dev *dev, return -ENXIO; } - err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name); - if (err) { + ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name); + if (IS_ERR(ohci->registers)) { ohci_err(ohci, "request and map MMIO resource unavailable\n"); return -ENXIO; } - ohci->registers = pcim_iomap_table(dev)[0]; for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) if ((ohci_quirks[i].vendor == dev->vendor) && diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index 2328ca58bba6..a1ebbe9b73b1 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -78,6 +78,7 @@ struct psci_0_1_function_ids get_psci_0_1_function_ids(void) static u32 psci_cpu_suspend_feature; static bool psci_system_reset2_supported; +static bool psci_system_off2_hibernate_supported; static inline bool psci_has_ext_power_state(void) { @@ -333,6 +334,36 @@ static void psci_sys_poweroff(void) invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } +#ifdef CONFIG_HIBERNATION +static int psci_sys_hibernate(struct sys_off_data *data) +{ + /* + * If no hibernate type is specified SYSTEM_OFF2 defaults to selecting + * HIBERNATE_OFF. + * + * There are hypervisors in the wild that do not align with the spec and + * reject calls that explicitly provide a hibernate type. For + * compatibility with these nonstandard implementations, pass 0 as the + * type. + */ + if (system_entering_hibernation()) + invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), 0, 0, 0); + return NOTIFY_DONE; +} + +static int __init psci_hibernate_init(void) +{ + if (psci_system_off2_hibernate_supported) { + /* Higher priority than EFI shutdown, but only for hibernate */ + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE + 2, + psci_sys_hibernate, NULL); + } + return 0; +} +subsys_initcall(psci_hibernate_init); +#endif + static int psci_features(u32 psci_func_id) { return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, @@ -364,6 +395,7 @@ static const struct { PSCI_ID_NATIVE(1_1, SYSTEM_RESET2), PSCI_ID(1_1, MEM_PROTECT), PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE), + PSCI_ID_NATIVE(1_3, SYSTEM_OFF2), }; static int psci_debugfs_read(struct seq_file *s, void *data) @@ -525,6 +557,18 @@ static void __init psci_init_system_reset2(void) psci_system_reset2_supported = true; } +static void __init psci_init_system_off2(void) +{ + int ret; + + ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2)); + if (ret < 0) + return; + + if (ret & PSCI_1_3_OFF_TYPE_HIBERNATE_OFF) + psci_system_off2_hibernate_supported = true; +} + static void __init psci_init_system_suspend(void) { int ret; @@ -655,6 +699,7 @@ static int __init psci_probe(void) psci_init_cpu_suspend(); psci_init_system_suspend(); psci_init_system_reset2(); + psci_init_system_off2(); kvm_init_hyp_services(); } diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 80cac3a5f976..602807d6afcc 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -39,14 +39,6 @@ struct cci_drvdata { struct dfl_fpga_cdev *cdev; /* container device */ }; -static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev) -{ - if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME)) - return NULL; - - return pcim_iomap_table(pcidev)[0]; -} - static int cci_pci_alloc_irq(struct pci_dev *pcidev) { int ret, nvec = pci_msix_vec_count(pcidev); @@ -235,9 +227,9 @@ static int find_dfls_by_default(struct pci_dev *pcidev, u64 v; /* start to find Device Feature List from Bar 0 */ - base = cci_pci_ioremap_bar0(pcidev); - if (!base) - return -ENOMEM; + base = pcim_iomap_region(pcidev, 0, DRV_NAME); + if (IS_ERR(base)) + return PTR_ERR(base); /* * PF device has FME and Ports/AFUs, and VF device only has one @@ -296,7 +288,7 @@ static int find_dfls_by_default(struct pci_dev *pcidev, } /* release I/O mappings for next step enumeration */ - pcim_iounmap_regions(pcidev, BIT(0)); + pcim_iounmap_region(pcidev, 0); return ret; } diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 039fef26546e..73e660c5e38a 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -261,6 +261,11 @@ static int altera_gpio_probe(struct platform_device *pdev) altera_gc->gc.set = altera_gpio_set; altera_gc->gc.owner = THIS_MODULE; altera_gc->gc.parent = &pdev->dev; + altera_gc->gc.base = -1; + + altera_gc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev)); + if (!altera_gc->gc.label) + return -ENOMEM; altera_gc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(altera_gc->regs)) diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 5170fe7599cd..d5909a4f0433 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -99,11 +99,13 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset, struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip); unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset); unsigned int bit = exar_offset_to_bit(exar_gpio, offset); + unsigned int bit_value = value ? BIT(bit) : 0; - if (value) - regmap_set_bits(exar_gpio->regmap, addr, BIT(bit)); - else - regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit)); + /* + * regmap_write_bits() forces value to be written when an external + * pull up/down might otherwise indicate value was already set. + */ + regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value); } static int exar_direction_output(struct gpio_chip *chip, unsigned int offset, diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index 421d7e3a6c66..cd20604f26de 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -78,24 +78,25 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id if (retval) return retval; - retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev)); - if (retval) - return dev_err_probe(dev, retval, "I/O memory mapping error\n"); - - base = pcim_iomap_table(pdev)[1]; + base = pcim_iomap_region(pdev, 1, pci_name(pdev)); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), "I/O memory mapping error\n"); irq_base = readl(base + 0 * sizeof(u32)); gpio_base = readl(base + 1 * sizeof(u32)); /* Release the IO mapping, since we already get the info from BAR1 */ - pcim_iounmap_regions(pdev, BIT(1)); + pcim_iounmap_region(pdev, 1); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; - priv->reg_base = pcim_iomap_table(pdev)[0]; + priv->reg_base = pcim_iomap_region(pdev, 0, pci_name(pdev)); + if (IS_ERR(priv->reg_base)) + return dev_err_probe(dev, PTR_ERR(priv->reg_base), + "I/O memory mapping error\n"); priv->pin_info.pin_ranges = mrfld_gpio_ranges; priv->pin_info.nranges = ARRAY_SIZE(mrfld_gpio_ranges); diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c index 9ef24449126a..3ea32c5e33d1 100644 --- a/drivers/gpio/gpio-mpsse.c +++ b/drivers/gpio/gpio-mpsse.c @@ -403,7 +403,7 @@ static void gpio_mpsse_ida_remove(void *data) { struct mpsse_priv *priv = data; - ida_simple_remove(&gpio_mpsse_ida, priv->id); + ida_free(&gpio_mpsse_ida, priv->id); } static int gpio_mpsse_probe(struct usb_interface *interface, @@ -422,7 +422,7 @@ static int gpio_mpsse_probe(struct usb_interface *interface, priv->intf = interface; priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber; - priv->id = ida_simple_get(&gpio_mpsse_ida, 0, 0, GFP_KERNEL); + priv->id = ida_alloc(&gpio_mpsse_ida, GFP_KERNEL); if (priv->id < 0) return priv->id; diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c index 2de61337ad3b..d7230fd83f5d 100644 --- a/drivers/gpio/gpio-zevio.c +++ b/drivers/gpio/gpio-zevio.c @@ -11,6 +11,7 @@ #include <linux/io.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -169,6 +170,7 @@ static const struct gpio_chip zevio_gpio_chip = { /* Initialization */ static int zevio_gpio_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct zevio_gpio *controller; int status, i; @@ -180,6 +182,10 @@ static int zevio_gpio_probe(struct platform_device *pdev) controller->chip = zevio_gpio_chip; controller->chip.parent = &pdev->dev; + controller->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev)); + if (!controller->chip.label) + return -ENOMEM; + controller->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(controller->regs)) return dev_err_probe(&pdev->dev, PTR_ERR(controller->regs), diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1cb5a4f19293..5504721007cc 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,9 +9,6 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA select DRM_PANEL_ORIENTATION_QUIRKS - select DRM_KMS_HELPER if DRM_FBDEV_EMULATION - select FB_CORE if DRM_FBDEV_EMULATION - select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select HDMI select I2C select DMA_SHARED_BUFFER @@ -152,6 +149,7 @@ config DRM_PANIC_SCREEN config DRM_PANIC_SCREEN_QR_CODE bool "Add a panic screen with a QR code" depends on DRM_PANIC && RUST + select ZLIB_DEFLATE help This option adds a QR code generator, and a panic screen with a QR code. The QR code will contain the last lines of kmsg and other debug @@ -210,9 +208,47 @@ config DRM_DEBUG_MODESET_LOCK If in doubt, say "N". +config DRM_CLIENT + bool + depends on DRM + help + Enables support for DRM clients. DRM drivers that need + struct drm_client_dev and its interfaces should select this + option. Drivers that support the default clients should + select DRM_CLIENT_SELECTION instead. + +config DRM_CLIENT_LIB + tristate + depends on DRM + select DRM_KMS_HELPER if DRM_FBDEV_EMULATION + select FB_CORE if DRM_FBDEV_EMULATION + help + This option enables the DRM client library and selects all + modules and components according to the enabled clients. + +config DRM_CLIENT_SELECTION + tristate + depends on DRM + select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION + help + Drivers that support in-kernel DRM clients have to select this + option. + +config DRM_CLIENT_SETUP + bool + depends on DRM_CLIENT_SELECTION + help + Enables the DRM client selection. DRM drivers that support the + default clients should select DRM_CLIENT_SELECTION instead. + +menu "Supported DRM clients" + depends on DRM_CLIENT_SELECTION + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM + depends on DRM_CLIENT_SELECTION + select DRM_CLIENT + select DRM_CLIENT_SETUP select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE default FB help @@ -251,6 +287,8 @@ config DRM_FBDEV_LEAK_PHYS_SMEM If in doubt, say "N" or spread the word to your closed source library vendor. +endmenu + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM @@ -320,19 +358,21 @@ config DRM_TTM_HELPER tristate depends on DRM select DRM_TTM + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Helpers for ttm-based gem objects config DRM_GEM_DMA_HELPER tristate depends on DRM - select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION + select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Choose this if you need the GEM DMA helper functions config DRM_GEM_SHMEM_HELPER tristate depends on DRM && MMU + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Choose this if you need the GEM shmem helper functions @@ -472,6 +512,7 @@ source "drivers/gpu/drm/imagination/Kconfig" config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 784229d4504d..463afad1b5ca 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -34,15 +34,12 @@ endif subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror drm-y := \ - drm_aperture.o \ drm_atomic.o \ drm_atomic_uapi.o \ drm_auth.o \ drm_blend.o \ drm_bridge.o \ drm_cache.o \ - drm_client.o \ - drm_client_modeset.o \ drm_color_mgmt.o \ drm_connector.o \ drm_crtc.o \ @@ -68,6 +65,7 @@ drm-y := \ drm_prime.o \ drm_print.o \ drm_property.o \ + drm_rect.o \ drm_syncobj.o \ drm_sysfs.o \ drm_trace_points.o \ @@ -75,6 +73,10 @@ drm-y := \ drm_vblank_work.o \ drm_vma_manager.o \ drm_writeback.o +drm-$(CONFIG_DRM_CLIENT) += \ + drm_client.o \ + drm_client_event.o \ + drm_client_modeset.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o @@ -140,7 +142,6 @@ drm_kms_helper-y := \ drm_modeset_helper.o \ drm_plane_helper.o \ drm_probe_helper.o \ - drm_rect.o \ drm_self_refresh_helper.o \ drm_simple_kms_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o @@ -148,6 +149,14 @@ drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o # +# DRM clients +# + +drm_client_lib-y := drm_client_setup.o +drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o +obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o + +# # Drivers and the rest # diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 0051fb1b437f..41fa3377d9cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -5,7 +5,10 @@ config DRM_AMDGPU depends on DRM && PCI && MMU depends on !UML select FW_LOADER + select DRM_CLIENT + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index b0f95a7649bf..3a588fecb0c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -85,16 +85,9 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -246,7 +239,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) dev_err(adev->dev, "Failed to get BIF handle\n"); return -EINVAL; } - r = cmn_block->version->funcs->resume(adev); + r = amdgpu_ip_block_resume(cmn_block); if (r) return r; @@ -282,15 +275,10 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - adev->ip_blocks[i].status.hw = true; + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -304,7 +292,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", @@ -417,6 +405,7 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = { static struct amdgpu_reset_handler *aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = { &aldebaran_mode2_handler, + &xgmi_reset_on_init_handler, }; int aldebaran_reset_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9b1e0ede05a4..d8bc6da50016 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -118,7 +118,7 @@ #define MAX_GPU_INSTANCE 64 -#define GFX_SLICE_PERIOD msecs_to_jiffies(250) +#define GFX_SLICE_PERIOD_MS 250 struct amdgpu_gpu_instance { struct amdgpu_device *adev; @@ -131,10 +131,6 @@ struct amdgpu_mgpu_info { uint32_t num_gpu; uint32_t num_dgpu; uint32_t num_apu; - - /* delayed reset_func for XGMI configuration if necessary */ - struct delayed_work delayed_reset_work; - bool pending_reset; }; enum amdgpu_ss { @@ -303,6 +299,12 @@ extern int amdgpu_wbrf; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) +/* reset mask */ +#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ +#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ +#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ +#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ + /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -365,8 +367,11 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); #define AMDGPU_MAX_IP_NUM 16 @@ -389,6 +394,7 @@ struct amdgpu_ip_block_version { struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; + struct amdgpu_device *adev; }; int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, @@ -563,6 +569,7 @@ enum amd_reset_method { AMD_RESET_METHOD_MODE2, AMD_RESET_METHOD_BACO, AMD_RESET_METHOD_PCI, + AMD_RESET_METHOD_ON_INIT, }; struct amdgpu_video_codec_info { @@ -821,6 +828,24 @@ struct amdgpu_mqd { struct amdgpu_mqd_prop *p); }; +/* + * Custom Init levels could be defined for different situations where a full + * initialization of all hardware blocks are not expected. Sample cases are + * custom init sequences after resume after S0i3/S3, reset on initialization, + * partial reset of blocks etc. Presently, this defines only two levels. Levels + * are described in corresponding struct definitions - amdgpu_init_default, + * amdgpu_init_minimal_xgmi. + */ +enum amdgpu_init_lvl_id { + AMDGPU_INIT_LEVEL_DEFAULT, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI, +}; + +struct amdgpu_init_level { + enum amdgpu_init_lvl_id level; + uint32_t hwini_ip_block_mask; +}; + #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 struct amdgpu_reset_domain; @@ -1092,8 +1117,6 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; - /* indicate amdgpu suspension status */ - bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1166,6 +1189,8 @@ struct amdgpu_device { bool enforce_isolation[MAX_XCP]; /* Added this mutex for cleaner shader isolation between GFX and compute processes */ struct mutex enforce_isolation_mutex; + + struct amdgpu_init_level *init_lvl; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, @@ -1261,6 +1286,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context); +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); + int emu_soc_asic_init(struct amdgpu_device *adev); /* @@ -1443,6 +1470,8 @@ struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, struct dma_fence *gang); bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -1450,23 +1479,15 @@ void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); -bool amdgpu_atpx_dgpu_req_power_for_displays(void); bool amdgpu_has_atpx(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } -static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } static inline bool amdgpu_has_atpx(void) { return false; } #endif -#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void); -#else -static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } -#endif - /* * KMS */ @@ -1619,4 +1640,6 @@ extern const struct attribute_group amdgpu_vram_mgr_attr_group; extern const struct attribute_group amdgpu_gtt_mgr_attr_group; extern const struct attribute_group amdgpu_flash_attr_group; +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 2ca127173135..9d6345146495 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_ return -EINVAL; } - if (start + count >= max_count) + if (start + count > max_count) return -EINVAL; count = min_t(int, count, max_count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index bf6c4a0d0525..ec5e0dcf8613 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -98,9 +98,9 @@ enum { ACP_TILE_DSP2, }; -static int acp_sw_init(void *handle) +static int acp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->acp.parent = adev->dev; @@ -112,9 +112,9 @@ static int acp_sw_init(void *handle) return 0; } -static int acp_sw_fini(void *handle) +static int acp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->acp.cgs_device) amdgpu_cgs_destroy_device(adev->acp.cgs_device); @@ -219,10 +219,10 @@ static const struct dmi_system_id acp_quirk_table[] = { /** * acp_hw_init - start and test ACP block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int acp_hw_init(void *handle) +static int acp_hw_init(struct amdgpu_ip_block *ip_block) { int r; u64 acp_base; @@ -230,13 +230,7 @@ static int acp_hw_init(void *handle) u32 count = 0; struct i2s_platform_data *i2s_pdata = NULL; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - const struct amdgpu_ip_block *ip_block = - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); - - if (!ip_block) - return -EINVAL; + struct amdgpu_device *adev = ip_block->adev; r = amd_acp_hw_init(adev->acp.cgs_device, ip_block->version->major, ip_block->version->minor); @@ -503,14 +497,14 @@ failure: /** * acp_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int acp_hw_fini(void *handle) +static int acp_hw_fini(struct amdgpu_ip_block *ip_block) { u32 val = 0; u32 count = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* return early if no ACP */ if (!adev->acp.acp_genpd) { @@ -565,9 +559,9 @@ static int acp_hw_fini(void *handle) return 0; } -static int acp_suspend(void *handle) +static int acp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* power up on suspend */ if (!adev->acp.acp_cell) @@ -575,9 +569,9 @@ static int acp_suspend(void *handle) return 0; } -static int acp_resume(void *handle) +static int acp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* power down again on resume */ if (!adev->acp.acp_cell) @@ -585,26 +579,11 @@ static int acp_resume(void *handle) return 0; } -static int acp_early_init(void *handle) -{ - return 0; -} - static bool acp_is_idle(void *handle) { return true; } -static int acp_wait_for_idle(void *handle) -{ - return 0; -} - -static int acp_soft_reset(void *handle) -{ - return 0; -} - static int acp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -624,8 +603,6 @@ static int acp_set_powergating_state(void *handle, static const struct amd_ip_funcs acp_ip_funcs = { .name = "acp_ip", - .early_init = acp_early_init, - .late_init = NULL, .sw_init = acp_sw_init, .sw_fini = acp_sw_fini, .hw_init = acp_hw_init, @@ -633,12 +610,8 @@ static const struct amd_ip_funcs acp_ip_funcs = { .suspend = acp_suspend, .resume = acp_resume, .is_idle = acp_is_idle, - .wait_for_idle = acp_wait_for_idle, - .soft_reset = acp_soft_reset, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version acp_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 7dd55ed57c1d..b8d4e07d2043 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -800,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, return -EIO; } + kfree(info); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4f08b153cb66..3afcd1e8aa54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -834,6 +834,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + if (!kiq_ring->sched.ready || adev->job_hang) + return 0; + ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL); if (!ring_funcs) return -ENOMEM; @@ -858,8 +861,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); - if (kiq_ring->sched.ready && !adev->job_hang) - r = amdgpu_ring_test_helper(kiq_ring); + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. + */ + r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); @@ -889,3 +898,27 @@ int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id) return kgd2kfd_start_sched(adev->kfd.dev, node_id); } + +/* check if there are KFD queues active */ +bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id) +{ + if (!adev->kfd.init_complete) + return false; + + return kgd2kfd_compute_active(adev->kfd.dev, node_id); +} + +/* Config CGTT_SQ_CLK_CTRL */ +int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable) +{ + int r; + + if (!adev->kfd.init_complete) + return 0; + + r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable, + reg_override_enable, perfmon_override_enable); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f9d119448442..4b80ad860639 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -266,6 +266,10 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, u32 inst); int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id); int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id); +int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); +bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id); + /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -428,6 +432,7 @@ int kgd2kfd_check_and_lock_kfd(void); void kgd2kfd_unlock_kfd(void); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); +bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); #else static inline int kgd2kfd_init(void) { @@ -508,5 +513,10 @@ static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { return 0; } + +static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) +{ + return false; +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 9435af2e6bdc..9abf29b58ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -299,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus if (r) goto out; } else { - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 3bc0cbf45bc5..cc66ebb7bae1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -944,9 +944,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * * @adev: Handle of device whose registers are to be read * @queue_idx: Index of queue in the queue-map bit-field - * @wave_cnt: Output parameter updated with number of waves in flight - * @vmid: Output parameter updated with VMID of queue whose wave count - * is being collected + * @queue_cnt: Stores the wave count and doorbell offset for an active queue * @inst: xcc's instance number on a multi-XCC setup */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, @@ -1133,10 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev, uint32_t low, high; uint64_t queue_addr = 0; - if (!adev->debug_exp_resets && - !adev->gfx.num_gfx_rings) - return 0; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); amdgpu_gfx_rlc_enter_safe_mode(adev, inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index fa572ba7f9fc..f30548f4c3b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2524,11 +2524,14 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, /* First eviction, stop the queues */ r = kgd2kfd_quiesce_mm(mni->mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); - if (r) + + if (r && r != -ESRCH) pr_err("Failed to quiesce KFD\n"); - queue_delayed_work(system_freezable_wq, - &process_info->restore_userptr_work, - msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); + + if (r != -ESRCH) + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, + msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); } mutex_unlock(&process_info->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 0c8975ac5af9..093141ad6ed0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1145,8 +1145,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, return 0; } -void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, - u32 eng_clock, u32 mem_clock) +int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, + u32 eng_clock, u32 mem_clock) { SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); @@ -1161,8 +1161,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, if (mem_clock) args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + return amdgpu_atom_execute_table(adev->mode_info.atom_context, index, + (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 0811474e8fd3..0e16432d9a72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -163,8 +163,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, bool strobe_mode, struct atom_mpll_param *mpll_param); -void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, - u32 eng_clock, u32 mem_clock); +int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, + u32 eng_clock, u32 mem_clock); bool amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 375f02002579..3893e6fc2f03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -89,18 +89,6 @@ bool amdgpu_is_atpx_hybrid(void) return amdgpu_atpx_priv.atpx.is_hybrid; } -bool amdgpu_atpx_dgpu_req_power_for_displays(void) -{ - return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; -} - -#if defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void) -{ - return amdgpu_atpx_priv.dhandle; -} -#endif - /** * amdgpu_atpx_call - call an ATPX method * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 9da4414de617..a68338cb7b4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -2095,6 +2095,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog) amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm); + amdgpu_debugfs_jpeg_sched_mask_init(adev); + amdgpu_debugfs_gfx_sched_mask_init(adev); + amdgpu_debugfs_compute_sched_mask_init(adev); + amdgpu_debugfs_sdma_sched_mask_init(adev); + amdgpu_ras_debugfs_create_all(adev); amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 5ac59b62020c..946c48829f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -203,6 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, struct amdgpu_coredump_info *coredump = data; struct drm_print_iterator iter; struct amdgpu_vm_fault_info *fault_info; + struct amdgpu_ip_block *ip_block; int ver; iter.data = buffer; @@ -282,13 +283,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, /* dump the ip state for each ip */ drm_printf(&p, "IP Dump\n"); for (int i = 0; i < coredump->adev->num_ip_blocks; i++) { - if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) { - drm_printf(&p, "IP: %s\n", - coredump->adev->ip_blocks[i] - .version->funcs->name); - coredump->adev->ip_blocks[i] - .version->funcs->print_ip_state( - (void *)coredump->adev, &p); + ip_block = &coredump->adev->ip_blocks[i]; + if (ip_block->version->funcs->print_ip_state) { + drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name); + ip_block->version->funcs->print_ip_state(ip_block, &p); drm_printf(&p, "\n"); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c2394c8b4d6b..0171d240fcb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,8 @@ * Alex Deucher * Jerome Glisse */ + +#include <linux/aperture.h> #include <linux/power_supply.h> #include <linux/kthread.h> #include <linux/module.h> @@ -35,10 +37,9 @@ #include <linux/pci-p2pdma.h> #include <linux/apple-gmux.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/device.h> @@ -144,6 +145,51 @@ const char *amdgpu_asic_name[] = { "LAST", }; +#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM - 1, 0) +/* + * Default init level where all blocks are expected to be initialized. This is + * the level of initialization expected by default and also after a full reset + * of the device. + */ +struct amdgpu_init_level amdgpu_init_default = { + .level = AMDGPU_INIT_LEVEL_DEFAULT, + .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, +}; + +/* + * Minimal blocks needed to be initialized before a XGMI hive can be reset. This + * is used for cases like reset on initialization where the entire hive needs to + * be reset before first use. + */ +struct amdgpu_init_level amdgpu_init_minimal_xgmi = { + .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI, + .hwini_ip_block_mask = + BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) | + BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) | + BIT(AMD_IP_BLOCK_TYPE_PSP) +}; + +static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, + enum amd_ip_block_type block) +{ + return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; +} + +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl) +{ + switch (lvl) { + case AMDGPU_INIT_LEVEL_MINIMAL_XGMI: + adev->init_lvl = &amdgpu_init_minimal_xgmi; + break; + case AMDGPU_INIT_LEVEL_DEFAULT: + fallthrough; + default: + adev->init_lvl = &amdgpu_init_default; + break; + } +} + static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); /** @@ -227,6 +273,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); } +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->suspend) { + r = ip_block->version->funcs->suspend(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "suspend of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = false; + return 0; +} + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->resume) { + r = ip_block->version->funcs->resume(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "resume of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = true; + return 0; +} + /** * DOC: board_info * @@ -1655,7 +1737,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* Don't post if we need to reset whole hive on init */ - if (adev->gmc.xgmi.pending_reset) + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) return false; if (adev->has_hw_reset) { @@ -2159,9 +2241,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, if (!adev->ip_blocks[i].status.valid) continue; if (adev->ip_blocks[i].version->type == block_type) { - r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); - if (r) - return r; + if (adev->ip_blocks[i].version->funcs->wait_for_idle) { + r = adev->ip_blocks[i].version->funcs->wait_for_idle( + &adev->ip_blocks[i]); + if (r) + return r; + } break; } } @@ -2170,26 +2255,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_idle - is the hardware IP idle + * amdgpu_device_ip_is_valid - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) * - * Check if the hardware IP is idle or not. - * Returns true if it the IP is idle, false if not. + * Check if the hardware IP is enable or not. + * Returns true if it the IP is enable, false if not. */ -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type) +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) { int i; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; if (adev->ip_blocks[i].version->type == block_type) - return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); + return adev->ip_blocks[i].status.valid; } - return true; + return false; } @@ -2271,6 +2354,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, ip_block_version->funcs->name); + adev->ip_blocks[adev->num_ip_blocks].adev = adev; + adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; return 0; @@ -2566,25 +2651,25 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) total = true; for (i = 0; i < adev->num_ip_blocks; i++) { + ip_block = &adev->ip_blocks[i]; + if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_WARN("disabled ip block: %d <%s>\n", i, adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; - } else { - if (adev->ip_blocks[i].version->funcs->early_init) { - r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); - if (r == -ENOENT) { - adev->ip_blocks[i].status.valid = false; - } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - total = false; - } else { - adev->ip_blocks[i].status.valid = true; - } + } else if (ip_block->version->funcs->early_init) { + r = ip_block->version->funcs->early_init(ip_block); + if (r == -ENOENT) { + adev->ip_blocks[i].status.valid = false; + } else if (r) { + DRM_ERROR("early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + total = false; } else { adev->ip_blocks[i].status.valid = true; } + } else { + adev->ip_blocks[i].status.valid = true; } /* get the vbios after the asic_funcs are set up */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { @@ -2633,10 +2718,13 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2658,7 +2746,10 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2681,6 +2772,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) continue; + if (!amdgpu_ip_member_of_hwini(adev, + AMD_IP_BLOCK_TYPE_PSP)) + break; + if (!adev->ip_blocks[i].status.sw) continue; @@ -2689,22 +2784,18 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) break; if (amdgpu_in_reset(adev) || adev->in_suspend) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } } else { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } - - adev->ip_blocks[i].status.hw = true; break; } } @@ -2786,6 +2877,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) */ static int amdgpu_device_ip_init(struct amdgpu_device *adev) { + bool init_badpage; int i, r; r = amdgpu_ras_init(adev); @@ -2795,17 +2887,23 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; - r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); - if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - goto init_failed; + if (adev->ip_blocks[i].version->funcs->sw_init) { + r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); + if (r) { + DRM_ERROR("sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + goto init_failed; + } } adev->ip_blocks[i].status.sw = true; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { /* need to do common hw init early so everything is set up for gmc */ - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2822,7 +2920,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); goto init_failed; } - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2895,7 +2993,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) * Note: theoretically, this should be called before all vram allocations * to protect retired page from abusing */ - r = amdgpu_ras_recovery_init(adev); + init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); + r = amdgpu_ras_recovery_init(adev, init_badpage); if (r) goto init_failed; @@ -2935,7 +3034,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, true); /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); } @@ -3135,7 +3234,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->funcs->late_init) { - r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3206,6 +3305,25 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (!ip_block->version->funcs->hw_fini) { + DRM_ERROR("hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); + } else { + r = ip_block->version->funcs->hw_fini(ip_block); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + } + } + + ip_block->status.hw = false; +} + /** * amdgpu_device_smu_fini_early - smu hw_fini wrapper * @@ -3215,7 +3333,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) */ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) { - int i, r; + int i; if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return; @@ -3224,13 +3342,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); break; } } @@ -3244,7 +3356,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].version->funcs->early_fini) continue; - r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { DRM_DEBUG("early_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3263,14 +3375,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); } if (amdgpu_sriov_vf(adev)) { @@ -3316,12 +3421,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); } - - r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + if (adev->ip_blocks[i].version->funcs->sw_fini) { + r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } } adev->ip_blocks[i].status.sw = false; adev->ip_blocks[i].status.valid = false; @@ -3331,7 +3437,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.late_initialized) continue; if (adev->ip_blocks[i].version->funcs->late_fini) - adev->ip_blocks[i].version->funcs->late_fini((void *)adev); + adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); adev->ip_blocks[i].status.late_initialized = false; } @@ -3403,15 +3509,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -3449,14 +3549,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } /* skip unnecessary suspend if we do not initialize them yet */ - if (adev->gmc.xgmi.pending_reset && - !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { - adev->ip_blocks[i].status.hw = false; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) continue; - } /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just @@ -3490,13 +3585,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); adev->ip_blocks[i].status.hw = false; + /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { @@ -3570,7 +3661,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) !block->status.valid) continue; - r = block->version->funcs->hw_init(adev); + r = block->version->funcs->hw_init(&adev->ip_blocks[i]); DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; @@ -3609,15 +3700,19 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) block->status.hw) continue; - if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) - r = block->version->funcs->resume(adev); - else - r = block->version->funcs->hw_init(adev); - - DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) - return r; - block->status.hw = true; + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; + } else { + r = block->version->funcs->hw_init(&adev->ip_blocks[i]); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + block->status.hw = true; + } } } @@ -3648,13 +3743,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = true; } } @@ -3686,13 +3777,9 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = true; } return 0; @@ -4149,7 +4236,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, * for throttling interrupt) = 60 seconds. */ ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); + ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1); + ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -4193,13 +4283,19 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_set_mcbp(adev); + /* + * By default, use default mode where all blocks are expected to be + * initialized. At present a 'swinit' of blocks is required to be + * completed before the need for a different level is detected. + */ + amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT); /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) return r; /* Get rid of things like offb */ - r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); + r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); if (r) return r; @@ -4265,20 +4361,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { if (adev->gmc.xgmi.num_physical_nodes) { dev_info(adev->dev, "Pending hive reset.\n"); - adev->gmc.xgmi.pending_reset = true; - /* Only need to init necessary block for SMU to handle the reset */ - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; - if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { - DRM_DEBUG("IP %s disabled for hw_init.\n", - adev->ip_blocks[i].version->funcs->name); - adev->ip_blocks[i].status.hw = true; - } - } + amdgpu_set_init_level(adev, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI); } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && !amdgpu_device_has_display_hardware(adev)) { r = psp_gpu_reset(adev); @@ -4386,7 +4470,7 @@ fence_driver_init: /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { r = amdgpu_device_ip_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); @@ -4436,6 +4520,7 @@ fence_driver_init: amdgpu_fru_sysfs_init(adev); amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_cfg_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4463,9 +4548,8 @@ fence_driver_init: if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->gmc.xgmi.pending_reset) - queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); amdgpu_device_check_iommu_direct_map(adev); @@ -4559,6 +4643,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_fru_sysfs_fini(adev); amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_cfg_sysfs_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4694,7 +4779,7 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; if (!adev->ip_blocks[i].version->funcs->prepare_suspend) continue; - r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); + r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) goto unprepare; } @@ -4711,13 +4796,13 @@ unprepare: * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer - * @fbcon : notify the fbdev of suspend + * @notify_clients: notify in-kernel DRM clients * * Puts the hw in the suspend state (all asics). * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4737,8 +4822,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); + if (notify_clients) + drm_client_dev_suspend(adev_to_drm(adev), false); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4773,13 +4858,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) * amdgpu_device_resume - initiate device resume * * @dev: drm dev pointer - * @fbcon : notify the fbdev of resume + * @notify_clients: notify in-kernel DRM clients * * Bring the hw back to operating state (all asics). * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4835,8 +4920,8 @@ exit: /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); + if (notify_clients) + drm_client_dev_resume(adev_to_drm(adev), false); amdgpu_ras_resume(adev); @@ -4898,7 +4983,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->funcs->check_soft_reset) adev->ip_blocks[i].status.hang = - adev->ip_blocks[i].version->funcs->check_soft_reset(adev); + adev->ip_blocks[i].version->funcs->check_soft_reset( + &adev->ip_blocks[i]); if (adev->ip_blocks[i].status.hang) { dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); asic_hang = true; @@ -4927,7 +5013,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->pre_soft_reset) { - r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -4989,7 +5075,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->soft_reset) { - r = adev->ip_blocks[i].version->funcs->soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -5018,7 +5104,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->post_soft_reset) - r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -5103,6 +5189,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); + + amdgpu_virt_ras_telemetry_post_reset(adev); + return 0; } @@ -5309,7 +5398,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < tmp_adev->num_ip_blocks; i++) if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) tmp_adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)tmp_adev); + ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); } @@ -5325,74 +5414,25 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -int amdgpu_do_asic_reset(struct list_head *device_list_handle, - struct amdgpu_reset_context *reset_context) +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) { - struct amdgpu_device *tmp_adev = NULL; - bool need_full_reset, skip_hw_reset, vram_lost = false; - int r = 0; - - /* Try reset handler method first */ - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); - - reset_context->reset_device_list = device_list_handle; - r = amdgpu_reset_perform_reset(tmp_adev, reset_context); - /* If reset handler not implemented, continue; otherwise return */ - if (r == -EOPNOTSUPP) - r = 0; - else - return r; - - /* Reset handler not implemented, use the default method */ - need_full_reset = - test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); - - /* - * ASIC reset has to be done on all XGMI hive nodes ASAP - * to allow proper links negotiation in FW (within 1 sec) - */ - if (!skip_hw_reset && need_full_reset) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* For XGMI run all resets in parallel to speed up the process */ - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - tmp_adev->gmc.xgmi.pending_reset = false; - if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) - r = -EALREADY; - } else - r = amdgpu_asic_reset(tmp_adev); + struct list_head *device_list_handle; + bool full_reset, vram_lost = false; + struct amdgpu_device *tmp_adev; + int r; - if (r) { - dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", - r, adev_to_drm(tmp_adev)->unique); - goto out; - } - } + device_list_handle = reset_context->reset_device_list; - /* For XGMI wait for all resets to complete before proceed */ - if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - flush_work(&tmp_adev->xgmi_reset_work); - r = tmp_adev->asic_reset_res; - if (r) - break; - } - } - } - } + if (!device_list_handle) + return -EINVAL; - if (!r && amdgpu_ras_intr_triggered()) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB); - } - - amdgpu_ras_intr_cleared(); - } + full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + r = 0; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (need_full_reset) { + /* After reset, it's default init level */ + amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT); + if (full_reset) { /* post card */ amdgpu_ras_set_fed(tmp_adev, false); r = amdgpu_device_asic_init(tmp_adev); @@ -5448,7 +5488,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); + drm_client_dev_resume(adev_to_drm(tmp_adev), false); /* * The GPU enters bad state once faulty pages @@ -5482,7 +5522,6 @@ out: r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); - need_full_reset = true; r = -EAGAIN; goto end; } @@ -5493,10 +5532,85 @@ out: } end: - if (need_full_reset) + return r; +} + +int amdgpu_do_asic_reset(struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *tmp_adev = NULL; + bool need_full_reset, skip_hw_reset; + int r = 0; + + /* Try reset handler method first */ + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + + reset_context->reset_device_list = device_list_handle; + r = amdgpu_reset_perform_reset(tmp_adev, reset_context); + /* If reset handler not implemented, continue; otherwise return */ + if (r == -EOPNOTSUPP) + r = 0; + else + return r; + + /* Reset handler not implemented, use the default method */ + need_full_reset = + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + + /* + * ASIC reset has to be done on all XGMI hive nodes ASAP + * to allow proper links negotiation in FW (within 1 sec) + */ + if (!skip_hw_reset && need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_unbound_wq, + &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + } else + r = amdgpu_asic_reset(tmp_adev); + + if (r) { + dev_err(tmp_adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); + goto out; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, device_list_handle, + reset_list) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + } + + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + amdgpu_ras_reset_error_count(tmp_adev, + AMDGPU_RAS_BLOCK__MMHUB); + } + + amdgpu_ras_intr_cleared(); + } + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r == -EAGAIN) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); else clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + +out: return r; } @@ -5734,7 +5848,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); + drm_client_dev_suspend(adev_to_drm(tmp_adev), false); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -5824,7 +5938,7 @@ skip_hw_reset: if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) @@ -6092,6 +6206,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); + if (!p2p_access) + dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", + pci_name(peer_adev->pdev)); bool is_large_bar = adev->gmc.visible_vram_size && adev->gmc.real_vram_size == adev->gmc.visible_vram_size; @@ -6331,7 +6448,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } amdgpu_device_unset_mp1_state(adev); @@ -6344,6 +6461,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); int r; + if (amdgpu_sriov_vf(adev)) + return false; + r = pci_save_state(pdev); if (!r) { kfree(adev->pci_state); @@ -6604,3 +6724,47 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, } return ret; } + +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring) +{ + ssize_t size = 0; + + if (!ring || !ring->adev) + return size; + + if (amdgpu_device_should_recover_gpu(ring->adev)) + size |= AMDGPU_RESET_TYPE_FULL; + + if (unlikely(!ring->adev->debug_disable_soft_recovery) && + !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) + size |= AMDGPU_RESET_TYPE_SOFT_RESET; + + return size; +} + +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset) +{ + ssize_t size = 0; + + if (supported_reset == 0) { + size += sysfs_emit_at(buf, size, "unsupported"); + size += sysfs_emit_at(buf, size, "\n"); + return size; + + } + + if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET) + size += sysfs_emit_at(buf, size, "soft "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) + size += sysfs_emit_at(buf, size, "queue "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE) + size += sysfs_emit_at(buf, size, "pipe "); + + if (supported_reset & AMDGPU_RESET_TYPE_FULL) + size += sysfs_emit_at(buf, size, "full "); + + size += sysfs_emit_at(buf, size, "\n"); + return size; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 4bd61c169ca8..1040204ac8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1723,45 +1723,85 @@ union nps_info { struct nps_info_v1_0 v1; }; +static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, + union nps_info *nps_data) +{ + uint64_t vram_size, pos, offset; + struct nps_info_header *nhdr; + struct binary_header bhdr; + uint16_t checksum; + + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; + pos = vram_size - DISCOVERY_TMR_OFFSET; + amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); + + offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); + checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); + + amdgpu_device_vram_access(adev, (pos + offset), nps_data, + sizeof(*nps_data), false); + + nhdr = (struct nps_info_header *)(nps_data); + if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, + le32_to_cpu(nhdr->size_bytes), + checksum)) { + dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); + return -EINVAL; + } + + return 0; +} + int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, - int *range_cnt) + int *range_cnt, bool refresh) { struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; + union nps_info nps_data; u16 offset; - int i; + int i, r; if (!nps_type || !range_cnt || !ranges) return -EINVAL; - if (!adev->mman.discovery_bin) { - dev_err(adev->dev, - "fetch mem range failed, ip discovery uninitialized\n"); - return -EINVAL; - } + if (refresh) { + r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); + if (r) + return r; + nps_info = &nps_data; + } else { + if (!adev->mman.discovery_bin) { + dev_err(adev->dev, + "fetch mem range failed, ip discovery uninitialized\n"); + return -EINVAL; + } - bhdr = (struct binary_header *)adev->mman.discovery_bin; - offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); + bhdr = (struct binary_header *)adev->mman.discovery_bin; + offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); - if (!offset) - return -ENOENT; + if (!offset) + return -ENOENT; - /* If verification fails, return as if NPS table doesn't exist */ - if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) - return -ENOENT; + /* If verification fails, return as if NPS table doesn't exist */ + if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) + return -ENOENT; - nps_info = (union nps_info *)(adev->mman.discovery_bin + offset); + nps_info = + (union nps_info *)(adev->mman.discovery_bin + offset); + } switch (le16_to_cpu(nps_info->v1.header.version_major)) { case 1: + mem_ranges = kvcalloc(nps_info->v1.count, + sizeof(*mem_ranges), + GFP_KERNEL); + if (!mem_ranges) + return -ENOMEM; *nps_type = nps_info->v1.nps_type; *range_cnt = nps_info->v1.count; - mem_ranges = kvzalloc( - *range_cnt * sizeof(struct amdgpu_gmc_memrange), - GFP_KERNEL); for (i = 0; i < *range_cnt; i++) { mem_ranges[i].base_address = nps_info->v1.instance_info[i].base_address; @@ -2492,6 +2532,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); + adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); } else { adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); @@ -2508,6 +2549,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); + adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); } break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index f5d36525ec3e..b44d56465c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -33,6 +33,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, - int *range_cnt); + int *range_cnt, bool refresh); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 81d9877c8735..38686203bea6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -23,6 +23,7 @@ */ #include <drm/amdgpu_drm.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem.h> @@ -231,8 +232,6 @@ int amdgpu_wbrf = -1; int amdgpu_damage_clips = -1; /* auto */ int amdgpu_umsch_mm_fwlog; -static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); - DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, "DRM_UT_CORE", "DRM_UT_DRIVER", @@ -247,9 +246,6 @@ DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), - .delayed_reset_work = __DELAYED_WORK_INITIALIZER( - mgpu_info.delayed_reset_work, - amdgpu_drv_delayed_reset_work_handler, 0), }; int amdgpu_ras_enable = -1; uint amdgpu_ras_mask = 0xffffffff; @@ -892,7 +888,7 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444); * the ABM algorithm, with 1 being the least reduction and 4 being the most * reduction. * - * Defaults to -1, or disabled. Userspace can only override this level after + * Defaults to -1, or auto. Userspace can only override this level after * boot if it's set to auto. */ int amdgpu_dm_abm_level = -1; @@ -2365,11 +2361,15 @@ retry_init: */ if (adev->mode_info.mode_config_initialized && !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) { + const struct drm_format_info *format; + /* select 8 bpp console on low vram cards */ if (adev->gmc.real_vram_size <= (32*1024*1024)) - drm_fbdev_ttm_setup(adev_to_drm(adev), 8); + format = drm_format_info(DRM_FORMAT_C8); else - drm_fbdev_ttm_setup(adev_to_drm(adev), 32); + format = NULL; + + drm_client_setup(adev_to_drm(adev), format); } ret = amdgpu_debugfs_init(adev); @@ -2434,6 +2434,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); amdgpu_xcp_dev_unplug(adev); + amdgpu_gmc_prepare_nps_mode_change(adev); drm_dev_unplug(dev); if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { @@ -2472,82 +2473,6 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) adev->mp1_state = PP_MP1_STATE_NONE; } -/** - * amdgpu_drv_delayed_reset_work_handler - work handler for reset - * - * @work: work_struct. - */ -static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) -{ - struct list_head device_list; - struct amdgpu_device *adev; - int i, r; - struct amdgpu_reset_context reset_context; - - memset(&reset_context, 0, sizeof(reset_context)); - - mutex_lock(&mgpu_info.mutex); - if (mgpu_info.pending_reset == true) { - mutex_unlock(&mgpu_info.mutex); - return; - } - mgpu_info.pending_reset = true; - mutex_unlock(&mgpu_info.mutex); - - /* Use a common context, just need to make sure full reset is done */ - reset_context.method = AMD_RESET_METHOD_NONE; - set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - reset_context.reset_req_dev = adev; - r = amdgpu_device_pre_asic_reset(adev, &reset_context); - if (r) { - dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", - r, adev_to_drm(adev)->unique); - } - if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work)) - r = -EALREADY; - } - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - flush_work(&adev->xgmi_reset_work); - adev->gmc.xgmi.pending_reset = false; - } - - /* reset function will rebuild the xgmi hive info , clear it now */ - for (i = 0; i < mgpu_info.num_dgpu; i++) - amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev); - - INIT_LIST_HEAD(&device_list); - - for (i = 0; i < mgpu_info.num_dgpu; i++) - list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list); - - /* unregister the GPU first, reset function will add them back */ - list_for_each_entry(adev, &device_list, reset_list) - amdgpu_unregister_gpu_instance(adev); - - /* Use a common context, just need to make sure full reset is done */ - set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); - r = amdgpu_do_asic_reset(&device_list, &reset_context); - - if (r) { - DRM_ERROR("reinit gpus failure"); - return; - } - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) { - kgd2kfd_init_zone_device(adev); - amdgpu_amdkfd_device_init(adev); - amdgpu_amdkfd_drm_client_create(adev); - } - amdgpu_ttm_set_buffer_funcs_status(adev, true); - } -} - static int amdgpu_pmops_prepare(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); @@ -2580,7 +2505,6 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2595,7 +2519,6 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); @@ -2982,6 +2905,7 @@ static const struct drm_driver amdgpu_kms_driver = { .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, + DRM_FBDEV_TTM_DRIVER_OPS, .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, #ifdef CONFIG_PROC_FS @@ -3008,6 +2932,7 @@ const struct drm_driver amdgpu_partition_driver = { .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, + DRM_FBDEV_TTM_DRIVER_OPS, .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, @@ -3068,6 +2993,12 @@ static int __init amdgpu_init(void) /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); + if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) { + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_crit("Overdrive is enabled, please disable it before " + "reporting any bugs unrelated to overdrive.\n"); + } + /* let modprobe override vga console setting */ return pci_register_driver(&amdgpu_kms_pci_driver); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c index 35fee3e8cde2..8cd69836dd99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c @@ -200,7 +200,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, dev_err_ratelimited(&i2c_adap->dev, "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", eeprom_addr, buf_size, - read ? "read" : "write", EEPROM_OFFSET_SIZE); + str_read_write(read), EEPROM_OFFSET_SIZE); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index c7df7fa3459f..df2cf5c33925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -33,6 +33,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> +#include <drm/drm_file.h> #include "amdgpu.h" #include "amdgpu_vm.h" @@ -59,18 +60,25 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_mem_stats stats; + struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { }; ktime_t usage[AMDGPU_HW_IP_NUM]; - unsigned int hw_ip; + const char *pl_name[] = { + [TTM_PL_VRAM] = "vram", + [TTM_PL_TT] = "gtt", + [TTM_PL_SYSTEM] = "cpu", + [AMDGPU_PL_GDS] = "gds", + [AMDGPU_PL_GWS] = "gws", + [AMDGPU_PL_OA] = "oa", + [AMDGPU_PL_DOORBELL] = "doorbell", + }; + unsigned int hw_ip, i; int ret; - memset(&stats, 0, sizeof(stats)); - ret = amdgpu_bo_reserve(vm->root.bo, false); if (ret) return; - amdgpu_vm_get_memory(vm, &stats); + amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats)); amdgpu_bo_unreserve(vm->root.bo); amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage); @@ -82,24 +90,33 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) */ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); - drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); - drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); - drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); - drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n", - stats.visible_vram/1024UL); + + for (i = 0; i < ARRAY_SIZE(pl_name); i++) { + if (!pl_name[i]) + continue; + + drm_print_memory_stats(p, + &stats[i].drm, + DRM_GEM_OBJECT_RESIDENT | + DRM_GEM_OBJECT_PURGEABLE, + pl_name[i]); + } + + /* Legacy amdgpu keys, alias to drm-resident-memory-: */ + drm_printf(p, "drm-memory-vram:\t%llu KiB\n", + stats[TTM_PL_VRAM].drm.resident/1024UL); + drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", + stats[TTM_PL_TT].drm.resident/1024UL); + drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", + stats[TTM_PL_SYSTEM].drm.resident/1024UL); + + /* Amdgpu specific memory accounting keys: */ drm_printf(p, "amd-evicted-vram:\t%llu KiB\n", - stats.evicted_vram/1024UL); - drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n", - stats.evicted_visible_vram/1024UL); + stats[TTM_PL_VRAM].evicted/1024UL); drm_printf(p, "amd-requested-vram:\t%llu KiB\n", - stats.requested_vram/1024UL); - drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n", - stats.requested_visible_vram/1024UL); + stats[TTM_PL_VRAM].requested/1024UL); drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", - stats.requested_gtt/1024UL); - drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL); - drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL); - drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL); + stats[TTM_PL_TT].requested/1024UL); for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 256b95232de5..b2033f8352f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -78,8 +78,9 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) if (adev->dummy_page_addr) return 0; - adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); + adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); adev->dummy_page_addr = 0; @@ -99,8 +100,9 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) { if (!adev->dummy_page_addr) return; - dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); adev->dummy_page_addr = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f1ffab5a1eae..f57cc72c43cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -87,16 +87,6 @@ int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, return bit; } -void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, - int *me, int *pipe, int *queue) -{ - *queue = bit % adev->gfx.me.num_queue_per_pipe; - *pipe = (bit / adev->gfx.me.num_queue_per_pipe) - % adev->gfx.me.num_pipe_per_me; - *me = (bit / adev->gfx.me.num_queue_per_pipe) - / adev->gfx.me.num_pipe_per_me; -} - bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue) { @@ -415,7 +405,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); + kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL); if (!kiq->mqd_backup) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); @@ -438,7 +428,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.me.mqd_backup[i]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM; @@ -462,7 +452,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[j]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM; @@ -525,6 +515,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev)) + return 0; + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { @@ -538,20 +531,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.compute_ring[j], RESET_QUEUES, 0, 0); } - - /** - * This is workaround: only skip kiq_ring test - * during ras recovery in suspend stage for gfx9.4.3 + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. */ - if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) && - amdgpu_ras_in_recovery(adev)) { - spin_unlock(&kiq->ring_lock); - return 0; - } + r = amdgpu_ring_test_helper(kiq_ring); - if (kiq_ring->sched.ready && !adev->job_hang) - r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); return r; @@ -579,8 +567,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&kiq->ring_lock); + if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang) + return 0; + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_gfx_rings)) { spin_unlock(&kiq->ring_lock); @@ -593,11 +584,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.gfx_ring[j], PREEMPT_QUEUES, 0, 0); } - } + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); - if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) + /* + * Ring test will do a basic scratch register change check. + * Just run this to ensure that unmap queues that is submitted + * before got processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&kiq->ring_lock); + spin_unlock(&kiq->ring_lock); + } return r; } @@ -702,7 +699,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[j]); } - + /* Submit map queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that map queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) @@ -753,7 +756,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.gfx_ring[j]); } } - + /* Submit map queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that map queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) @@ -895,6 +904,9 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; + if (amdgpu_sriov_vf(adev)) + return r; + if (adev->gfx.cp_ecc_error_irq.funcs) { r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) @@ -1363,35 +1375,35 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return count; } +static const char *xcp_desc[] = { + [AMDGPU_SPX_PARTITION_MODE] = "SPX", + [AMDGPU_DPX_PARTITION_MODE] = "DPX", + [AMDGPU_TPX_PARTITION_MODE] = "TPX", + [AMDGPU_QPX_PARTITION_MODE] = "QPX", + [AMDGPU_CPX_PARTITION_MODE] = "CPX", +}; + static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, struct device_attribute *addr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - char *supported_partition; + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + int size = 0, mode; + char *sep = ""; - /* TBD */ - switch (NUM_XCC(adev->gfx.xcc_mask)) { - case 8: - supported_partition = "SPX, DPX, QPX, CPX"; - break; - case 6: - supported_partition = "SPX, TPX, CPX"; - break; - case 4: - supported_partition = "SPX, DPX, CPX"; - break; - /* this seems only existing in emulation phase */ - case 2: - supported_partition = "SPX, CPX"; - break; - default: - supported_partition = "Not supported"; - break; + if (!xcp_mgr || !xcp_mgr->avail_xcp_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_mgr->avail_xcp_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]); + sep = ", "; } - return sysfs_emit(buf, "%s\n", supported_partition); + size += sysfs_emit_at(buf, size, "\n"); + + return size; } static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) @@ -1586,9 +1598,11 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, if (adev->enforce_isolation[i] && !partition_values[i]) { /* Going from enabled to disabled */ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i)); + amdgpu_mes_set_enforce_isolation(adev, i, false); } else if (!adev->enforce_isolation[i] && partition_values[i]) { /* Going from disabled to enabled */ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); + amdgpu_mes_set_enforce_isolation(adev, i, true); } adev->enforce_isolation[i] = partition_values[i]; } @@ -1598,6 +1612,32 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, return count; } +static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset); +} + +static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset); +} + static DEVICE_ATTR(run_cleaner_shader, 0200, NULL, amdgpu_gfx_set_run_cleaner_shader); @@ -1611,45 +1651,136 @@ static DEVICE_ATTR(current_compute_partition, 0644, static DEVICE_ATTR(available_compute_partition, 0444, amdgpu_gfx_get_available_compute_partition, NULL); +static DEVICE_ATTR(gfx_reset_mask, 0444, + amdgpu_gfx_get_gfx_reset_mask, NULL); -int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +static DEVICE_ATTR(compute_reset_mask, 0444, + amdgpu_gfx_get_compute_reset_mask, NULL); + +static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev) { + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + bool xcp_switch_supported; int r; + if (!xcp_mgr) + return 0; + + xcp_switch_supported = + (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); + + if (!xcp_switch_supported) + dev_attr_current_compute_partition.attr.mode &= + ~(S_IWUSR | S_IWGRP | S_IWOTH); + r = device_create_file(adev->dev, &dev_attr_current_compute_partition); if (r) return r; - r = device_create_file(adev->dev, &dev_attr_available_compute_partition); + if (xcp_switch_supported) + r = device_create_file(adev->dev, + &dev_attr_available_compute_partition); return r; } -void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev) { + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + bool xcp_switch_supported; + + if (!xcp_mgr) + return; + + xcp_switch_supported = + (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); device_remove_file(adev->dev, &dev_attr_current_compute_partition); - device_remove_file(adev->dev, &dev_attr_available_compute_partition); + + if (xcp_switch_supported) + device_remove_file(adev->dev, + &dev_attr_available_compute_partition); } -int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) +static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) { int r; r = device_create_file(adev->dev, &dev_attr_enforce_isolation); if (r) return r; + if (adev->gfx.enable_cleaner_shader) + r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); - r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); - if (r) + return r; +} + +static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_enforce_isolation); + if (adev->gfx.enable_cleaner_shader) + device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); +} + +static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) return r; - return 0; + if (adev->gfx.num_gfx_rings) { + r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask); + if (r) + return r; + } + + if (adev->gfx.num_compute_rings) { + r = device_create_file(adev->dev, &dev_attr_compute_reset_mask); + if (r) + return r; + } + + return r; } -void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev) { - device_remove_file(adev->dev, &dev_attr_enforce_isolation); - device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); + if (!amdgpu_gpu_recovery) + return; + + if (adev->gfx.num_gfx_rings) + device_remove_file(adev->dev, &dev_attr_gfx_reset_mask); + + if (adev->gfx.num_compute_rings) + device_remove_file(adev->dev, &dev_attr_compute_reset_mask); +} + +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_gfx_sysfs_xcp_init(adev); + if (r) { + dev_err(adev->dev, "failed to create xcp sysfs files"); + return r; + } + + r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + if (r) + dev_err(adev->dev, "failed to create isolation sysfs files"); + + r = amdgpu_gfx_sysfs_reset_mask_init(adev); + if (r) + dev_err(adev->dev, "failed to create reset mask sysfs files"); + + return r; +} + +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +{ + amdgpu_gfx_sysfs_xcp_fini(adev); + amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_reset_mask_fini(adev); } int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, @@ -1737,7 +1868,7 @@ static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, if (adev->gfx.kfd_sch_req_count[idx] == 0 && adev->gfx.kfd_sch_inactive[idx]) { schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, - GFX_SLICE_PERIOD); + msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); } } else { if (adev->gfx.kfd_sch_req_count[idx] == 0) { @@ -1792,8 +1923,9 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); } if (fences) { + /* we've already had our timeslice, so let's wrap this up */ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, - GFX_SLICE_PERIOD); + msecs_to_jiffies(1)); } else { /* Tell KFD to resume the runqueue */ if (adev->kfd.init_complete) { @@ -1806,6 +1938,51 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) mutex_unlock(&adev->enforce_isolation_mutex); } +static void +amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev, + u32 idx) +{ + unsigned long cjiffies; + bool wait = false; + + mutex_lock(&adev->enforce_isolation_mutex); + if (adev->enforce_isolation[idx]) { + /* set the initial values if nothing is set */ + if (!adev->gfx.enforce_isolation_jiffies[idx]) { + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } + /* Make sure KFD gets a chance to run */ + if (amdgpu_amdkfd_compute_active(adev, idx)) { + cjiffies = jiffies; + if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) { + cjiffies -= adev->gfx.enforce_isolation_jiffies[idx]; + if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) { + /* if our time is up, let KGD work drain before scheduling more */ + wait = true; + /* reset the timer period */ + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } else { + /* set the timer period to what's left in our time slice */ + adev->gfx.enforce_isolation_time[idx] = + GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies); + } + } else { + /* if jiffies wrap around we will just wait a little longer */ + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + } + } else { + /* if there is no KFD work, then set the full slice period */ + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } + } + mutex_unlock(&adev->enforce_isolation_mutex); + + if (wait) + msleep(GFX_SLICE_PERIOD_MS); +} + void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -1822,6 +1999,9 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) if (idx >= MAX_XCP) return; + /* Don't submit more work until KFD has had some time */ + amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx); + mutex_lock(&adev->enforce_isolation_mutex); if (adev->enforce_isolation[idx]) { if (adev->kfd.init_complete) @@ -1853,3 +2033,144 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring) } mutex_unlock(&adev->enforce_isolation_mutex); } + +/* + * debugfs for to enable/disable gfx job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->gfx.num_gfx_rings) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { + ring = &adev->gfx.gfx_ring[i]; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { + ring = &adev->gfx.gfx_ring[i]; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops, + amdgpu_debugfs_gfx_sched_mask_get, + amdgpu_debugfs_gfx_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->gfx.num_gfx_rings > 1)) + return; + sprintf(name, "amdgpu_gfx_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_gfx_sched_mask_fops); +#endif +} + +/* + * debugfs for to enable/disable compute job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->gfx.num_compute_rings) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->gfx.num_compute_rings; ++i) { + ring = &adev->gfx.compute_ring[i]; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->gfx.num_compute_rings; ++i) { + ring = &adev->gfx.compute_ring[i]; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops, + amdgpu_debugfs_compute_sched_mask_get, + amdgpu_debugfs_compute_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->gfx.num_compute_rings > 1)) + return; + sprintf(name, "amdgpu_compute_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_compute_sched_mask_fops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5644e10a86a9..8b5bd63b5773 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -424,6 +424,8 @@ struct amdgpu_gfx { /* reset mask */ uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; + uint32_t gfx_supported_reset; + uint32_t compute_supported_reset; /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ @@ -472,6 +474,8 @@ struct amdgpu_gfx { struct mutex kfd_sch_mutex; u64 kfd_sch_req_count[MAX_XCP]; bool kfd_sch_inactive[MAX_XCP]; + unsigned long enforce_isolation_jiffies[MAX_XCP]; + unsigned long enforce_isolation_time[MAX_XCP]; }; struct amdgpu_gfx_ras_reg_entry { @@ -540,8 +544,6 @@ bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); -void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, - int *me, int *pipe, int *queue); bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); @@ -579,11 +581,11 @@ void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev); void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, unsigned int cleaner_shader_size, const void *cleaner_shader_ptr); -int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev); -void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev); void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work); void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); +void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); +void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 17a19d49d30a..1c19a65e6553 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -1065,18 +1065,6 @@ uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); } -/** - * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address - * from CPU's view - * - * @adev: amdgpu_device pointer - * @bo: amdgpu buffer object - */ -uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) -{ - return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; -} - int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) { struct amdgpu_bo *vram_bo = NULL; @@ -1130,6 +1118,79 @@ release_buffer: return ret; } +static const char *nps_desc[] = { + [AMDGPU_NPS1_PARTITION_MODE] = "NPS1", + [AMDGPU_NPS2_PARTITION_MODE] = "NPS2", + [AMDGPU_NPS3_PARTITION_MODE] = "NPS3", + [AMDGPU_NPS4_PARTITION_MODE] = "NPS4", + [AMDGPU_NPS6_PARTITION_MODE] = "NPS6", + [AMDGPU_NPS8_PARTITION_MODE] = "NPS8", +}; + +static ssize_t available_memory_partition_show(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + int size = 0, mode; + char *sep = ""; + + for_each_inst(mode, adev->gmc.supported_nps_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]); + sep = ", "; + } + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t current_memory_partition_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + struct amdgpu_hive_info *hive; + int i; + + mode = UNKNOWN_MEMORY_PARTITION_MODE; + for_each_inst(i, adev->gmc.supported_nps_modes) { + if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) { + mode = i; + break; + } + } + + if (mode == UNKNOWN_MEMORY_PARTITION_MODE) + return -EINVAL; + + if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) { + dev_info( + adev->dev, + "requested NPS mode is same as current NPS mode, skipping\n"); + return count; + } + + /* If device is part of hive, all devices in the hive should request the + * same mode. Hence store the requested mode in hive. + */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + atomic_set(&hive->requested_nps_mode, mode); + amdgpu_put_xgmi_hive(hive); + } else { + adev->gmc.requested_nps_mode = mode; + } + + dev_info( + adev->dev, + "NPS mode change requested, please remove and reload the driver\n"); + + return count; +} + static ssize_t current_memory_partition_show( struct device *dev, struct device_attribute *addr, char *buf) { @@ -1138,53 +1199,65 @@ static ssize_t current_memory_partition_show( enum amdgpu_memory_partition mode; mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - switch (mode) { - case AMDGPU_NPS1_PARTITION_MODE: - return sysfs_emit(buf, "NPS1\n"); - case AMDGPU_NPS2_PARTITION_MODE: - return sysfs_emit(buf, "NPS2\n"); - case AMDGPU_NPS3_PARTITION_MODE: - return sysfs_emit(buf, "NPS3\n"); - case AMDGPU_NPS4_PARTITION_MODE: - return sysfs_emit(buf, "NPS4\n"); - case AMDGPU_NPS6_PARTITION_MODE: - return sysfs_emit(buf, "NPS6\n"); - case AMDGPU_NPS8_PARTITION_MODE: - return sysfs_emit(buf, "NPS8\n"); - default: + if ((mode >= ARRAY_SIZE(nps_desc)) || + (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode)) return sysfs_emit(buf, "UNKNOWN\n"); - } + + return sysfs_emit(buf, "%s\n", nps_desc[mode]); } -static DEVICE_ATTR_RO(current_memory_partition); +static DEVICE_ATTR_RW(current_memory_partition); +static DEVICE_ATTR_RO(available_memory_partition); int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev) { + bool nps_switch_support; + int r = 0; + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) return 0; + nps_switch_support = (hweight32(adev->gmc.supported_nps_modes & + AMDGPU_ALL_NPS_MASK) > 1); + if (!nps_switch_support) + dev_attr_current_memory_partition.attr.mode &= + ~(S_IWUSR | S_IWGRP | S_IWOTH); + else + r = device_create_file(adev->dev, + &dev_attr_available_memory_partition); + + if (r) + return r; + return device_create_file(adev->dev, &dev_attr_current_memory_partition); } void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev) { + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) + return; + device_remove_file(adev->dev, &dev_attr_current_memory_partition); + device_remove_file(adev->dev, &dev_attr_available_memory_partition); } int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges, - int exp_ranges) + uint8_t *exp_ranges) { struct amdgpu_gmc_memrange *ranges; int range_cnt, ret, i, j; uint32_t nps_type; + bool refresh; - if (!mem_ranges) + if (!mem_ranges || !exp_ranges) return -EINVAL; + refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && + (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS); ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges, - &range_cnt); + &range_cnt, refresh); if (ret) return ret; @@ -1192,16 +1265,16 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, /* TODO: For now, expect ranges and partition count to be the same. * Adjust if there are holes expected in any NPS domain. */ - if (range_cnt != exp_ranges) { + if (*exp_ranges && (range_cnt != *exp_ranges)) { dev_warn( adev->dev, "NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d", - exp_ranges, nps_type, range_cnt); + *exp_ranges, nps_type, range_cnt); ret = -EINVAL; goto err; } - for (i = 0; i < exp_ranges; ++i) { + for (i = 0; i < range_cnt; ++i) { if (ranges[i].base_address >= ranges[i].limit_address) { dev_warn( adev->dev, @@ -1242,8 +1315,81 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, ranges[i].limit_address - ranges[i].base_address + 1; } + if (!*exp_ranges) + *exp_ranges = range_cnt; err: kfree(ranges); return ret; } + +int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, + int nps_mode) +{ + /* Not supported on VF devices and APUs */ + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return -EOPNOTSUPP; + + if (!adev->psp.funcs) { + dev_err(adev->dev, + "PSP interface not available for nps mode change request"); + return -EINVAL; + } + + return psp_memory_partition(&adev->psp, nps_mode); +} + +static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev, + int req_nps_mode, + int cur_nps_mode) +{ + return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) == + BIT(req_nps_mode)) && + req_nps_mode != cur_nps_mode); +} + +void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev) +{ + int req_nps_mode, cur_nps_mode, r; + struct amdgpu_hive_info *hive; + + if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes || + !adev->gmc.gmc_funcs->request_mem_partition_mode) + return; + + cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + req_nps_mode = atomic_read(&hive->requested_nps_mode); + if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, + cur_nps_mode)) { + amdgpu_put_xgmi_hive(hive); + return; + } + r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode); + amdgpu_put_xgmi_hive(hive); + goto out; + } + + req_nps_mode = adev->gmc.requested_nps_mode; + if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode)) + return; + + /* even if this fails, we should let driver unload w/o blocking */ + r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode); +out: + if (r) + dev_err(adev->dev, "NPS mode change request failed\n"); + else + dev_info( + adev->dev, + "NPS mode change request done, reload driver to complete the change\n"); +} + +bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) +{ + if (adev->gmc.gmc_funcs->need_reset_on_init) + return adev->gmc.gmc_funcs->need_reset_on_init(adev); + + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 4d951a1baefa..459a30fe239f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -73,6 +73,13 @@ enum amdgpu_memory_partition { AMDGPU_NPS8_PARTITION_MODE = 8, }; +#define AMDGPU_ALL_NPS_MASK \ + (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \ + BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \ + BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE)) + +#define AMDGPU_GMC_INIT_RESET_NPS BIT(0) + /* * GMC page fault information */ @@ -161,6 +168,10 @@ struct amdgpu_gmc_funcs { enum amdgpu_memory_partition (*query_mem_partition_mode)( struct amdgpu_device *adev); + /* Request NPS mode */ + int (*request_mem_partition_mode)(struct amdgpu_device *adev, + int nps_mode); + bool (*need_reset_on_init)(struct amdgpu_device *adev); }; struct amdgpu_xgmi_ras { @@ -182,7 +193,6 @@ struct amdgpu_xgmi { bool supported; struct ras_common_if *ras_if; bool connected_to_cpu; - bool pending_reset; struct amdgpu_xgmi_ras *ras; }; @@ -305,6 +315,9 @@ struct amdgpu_gmc { struct amdgpu_mem_partition_info *mem_partitions; uint8_t num_mem_partitions; const struct amdgpu_gmc_funcs *gmc_funcs; + enum amdgpu_memory_partition requested_nps_mode; + uint32_t supported_nps_modes; + uint32_t reset_flags; struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; @@ -447,13 +460,17 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); -uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev); void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev); int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges, - int exp_ranges); + uint8_t *exp_ranges); + +int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, + int nps_mode); +void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); +bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 00d6211e0fbf..f0765ccde668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -225,15 +225,6 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) kfree(i2c); } -/* Add the default buses */ -void amdgpu_i2c_init(struct amdgpu_device *adev) -{ - if (amdgpu_hw_i2c) - DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); - - amdgpu_atombios_i2c_init(adev); -} - /* remove all the buses */ void amdgpu_i2c_fini(struct amdgpu_device *adev) { @@ -247,22 +238,6 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) } } -/* Add additional buses */ -void amdgpu_i2c_add(struct amdgpu_device *adev, - const struct amdgpu_i2c_bus_rec *rec, - const char *name) -{ - struct drm_device *dev = adev_to_drm(adev); - int i; - - for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { - if (!adev->i2c_bus[i]) { - adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name); - return; - } - } -} - /* looks up bus based on id */ struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h index 63c2ff7499e1..21e3d1dad0a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h @@ -28,11 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, const struct amdgpu_i2c_bus_rec *rec, const char *name); void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); -void amdgpu_i2c_init(struct amdgpu_device *adev); void amdgpu_i2c_fini(struct amdgpu_device *adev); -void amdgpu_i2c_add(struct amdgpu_device *adev, - const struct amdgpu_i2c_bus_rec *rec, - const char *name); struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, const struct amdgpu_i2c_bus_rec *i2c_bus); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 92d27d32de41..8e712a11aba5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -342,15 +342,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * @ring: ring we want to submit job to * @job: job who wants to use the VMID * @id: resulting VMID - * @fence: fence to wait for if no id could be grabbed * * Try to reuse a VMID for this submission. */ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_vmid **id, - struct dma_fence **fence) + struct amdgpu_vmid **id) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; @@ -429,7 +427,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !id) goto error; } else { - r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); + r = amdgpu_vmid_grab_used(vm, ring, job, &id); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 4766e99dd98f..263ce1811cc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -33,33 +33,17 @@ #include "isp_v4_1_0.h" #include "isp_v4_1_1.h" -static int isp_sw_init(void *handle) -{ - return 0; -} - -static int isp_sw_fini(void *handle) -{ - return 0; -} - /** * isp_hw_init - start and test isp block * - * @handle: handle for amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int isp_hw_init(void *handle) +static int isp_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_isp *isp = &adev->isp; - const struct amdgpu_ip_block *ip_block = - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP); - - if (!ip_block) - return -EINVAL; - if (isp->funcs->hw_init != NULL) return isp->funcs->hw_init(isp); @@ -69,13 +53,12 @@ static int isp_hw_init(void *handle) /** * isp_hw_fini - stop the hardware block * - * @handle: handle for amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int isp_hw_fini(void *handle) +static int isp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_isp *isp = &adev->isp; + struct amdgpu_isp *isp = &ip_block->adev->isp; if (isp->funcs->hw_fini != NULL) return isp->funcs->hw_fini(isp); @@ -83,16 +66,6 @@ static int isp_hw_fini(void *handle) return -ENODEV; } -static int isp_suspend(void *handle) -{ - return 0; -} - -static int isp_resume(void *handle) -{ - return 0; -} - static int isp_load_fw_by_psp(struct amdgpu_device *adev) { const struct common_firmware_header *hdr; @@ -122,9 +95,10 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev) return r; } -static int isp_early_init(void *handle) +static int isp_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_isp *isp = &adev->isp; switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { @@ -154,16 +128,6 @@ static bool isp_is_idle(void *handle) return true; } -static int isp_wait_for_idle(void *handle) -{ - return 0; -} - -static int isp_soft_reset(void *handle) -{ - return 0; -} - static int isp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -179,16 +143,9 @@ static int isp_set_powergating_state(void *handle, static const struct amd_ip_funcs isp_ip_funcs = { .name = "isp_ip", .early_init = isp_early_init, - .late_init = NULL, - .sw_init = isp_sw_init, - .sw_fini = isp_sw_fini, .hw_init = isp_hw_init, .hw_fini = isp_hw_fini, - .suspend = isp_suspend, - .resume = isp_resume, .is_idle = isp_is_idle, - .wait_for_idle = isp_wait_for_idle, - .soft_reset = isp_soft_reset, .set_clockgating_state = isp_set_clockgating_state, .set_powergating_state = isp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 16f2605ac50b..b9d08bc96581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -42,7 +42,7 @@ static void amdgpu_job_do_core_dump(struct amdgpu_device *adev, for (i = 0; i < adev->num_ip_blocks; i++) if (adev->ip_blocks[i].version->funcs->dump_ip_state) adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)adev); + ->dump_ip_state((void *)&adev->ip_blocks[i]); dev_info(adev->dev, "Dumping IP State Completed\n"); amdgpu_coredump(adev, true, false, job); @@ -137,6 +137,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) /* attempt a per ring reset */ if (amdgpu_gpu_recovery && ring->funcs->reset) { + dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); /* stop the scheduler, but don't mess with the * bad job yet because if ring reset fails * we'll fall back to full GPU reset. @@ -149,9 +150,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_inc(&ring->adev->gpu_reset_counter); amdgpu_fence_driver_force_completion(ring); if (amdgpu_ring_sched_ready(ring)) - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); goto exit; } + dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); } if (amdgpu_device_should_recover_gpu(ring->adev)) { @@ -356,10 +358,10 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, if (r) goto error; - if (!fence && job->gang_submit) + if (job->gang_submit) fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); - while (!fence && job->vm && !job->vmid) { + if (!fence && job->vm && !job->vmid) { r = amdgpu_vmid_grab(job->vm, ring, job, &fence); if (r) { dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6df99cb00d9a..04eb51674596 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -47,7 +47,7 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) adev->jpeg.indirect_sram = true; for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; if (adev->jpeg.indirect_sram) { @@ -73,7 +73,7 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; amdgpu_bo_free_kernel( @@ -110,7 +110,7 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) unsigned int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) @@ -342,3 +342,111 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, return psp_execute_ip_fw_load(&adev->psp, &ucode); } + +/* + * debugfs for to enable/disable jpeg job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i, j; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j))) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i, j; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (ring->sched.ready) + mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j); + } + } + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops, + amdgpu_debugfs_jpeg_sched_mask_get, + amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1)) + return; + sprintf(name, "amdgpu_jpeg_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_jpeg_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset); +} + +static DEVICE_ATTR(jpeg_reset_mask, 0444, + amdgpu_get_jpeg_reset_mask, NULL); + +int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->jpeg.num_jpeg_inst) { + r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->jpeg.num_jpeg_inst) + device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index f9cdd873ac9b..3eb4a4653fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -128,6 +128,7 @@ struct amdgpu_jpeg { uint16_t inst_mask; uint8_t num_inst_per_aid; bool indirect_sram; + uint32_t supported_reset; }; int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); @@ -149,5 +150,8 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id); +void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev); +int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 18ee60378727..3ca03b5e0f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -348,6 +348,24 @@ static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgp return ret; } +static bool amdgpu_mca_bank_should_dump(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + struct mca_bank_entry *entry) +{ + bool ret; + + switch (type) { + case AMDGPU_MCA_ERROR_TYPE_CE: + ret = amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]); + break; + case AMDGPU_MCA_ERROR_TYPE_UE: + default: + ret = true; + break; + } + + return ret; +} + static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set, struct ras_query_context *qctx) { @@ -373,7 +391,8 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc amdgpu_mca_bank_set_add_entry(mca_set, &entry); - amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); + if (amdgpu_mca_bank_should_dump(adev, type, &entry)) + amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 7d4b540340e0..59ec20b07a6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -104,7 +104,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) return 0; r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_VRAM, &adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, &adev->mes.event_log_cpu_addr); @@ -192,17 +192,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev) (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]]; } - r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); - if (r) { - dev_err(adev->dev, - "(%d) read_val_offs alloc failed\n", r); - goto error; - } - adev->mes.read_val_gpu_addr = - adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); - adev->mes.read_val_ptr = - (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; - r = amdgpu_mes_doorbell_init(adev); if (r) goto error; @@ -223,8 +212,6 @@ error: amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } - if (adev->mes.read_val_ptr) - amdgpu_device_wb_free(adev, adev->mes.read_val_offs); idr_destroy(&adev->mes.pasid_idr); idr_destroy(&adev->mes.gang_id_idr); @@ -249,8 +236,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev) amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } - if (adev->mes.read_val_ptr) - amdgpu_device_wb_free(adev, adev->mes.read_val_offs); amdgpu_mes_doorbell_free(adev); @@ -905,7 +890,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, queue_input.me_id = ring->me; queue_input.pipe_id = ring->pipe; queue_input.queue_id = ring->queue; - queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0; queue_input.wptr_addr = ring->wptr_gpu_addr; queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; @@ -921,10 +906,19 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) { struct mes_misc_op_input op_input; int r, val = 0; + uint32_t addr_offset = 0; + uint64_t read_val_gpu_addr; + uint32_t *read_val_ptr; + if (amdgpu_device_wb_get(adev, &addr_offset)) { + DRM_ERROR("critical bug! too many mes readers\n"); + goto error; + } + read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4); + read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset]; op_input.op = MES_MISC_OP_READ_REG; op_input.read_reg.reg_offset = reg; - op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; + op_input.read_reg.buffer_addr = read_val_gpu_addr; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes rreg is not supported!\n"); @@ -935,9 +929,11 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) if (r) DRM_ERROR("failed to read reg (0x%x)\n", reg); else - val = *(adev->mes.read_val_ptr); + val = *(read_val_ptr); error: + if (addr_offset) + amdgpu_device_wb_free(adev, addr_offset); return val; } @@ -1594,6 +1590,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) char ucode_prefix[30]; char fw_name[50]; bool need_retry = false; + u32 *ucode_ptr; int r; amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, @@ -1631,6 +1628,10 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) adev->mes.data_start_addr[pipe] = le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); + ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data + + sizeof(union amdgpu_firmware_header)); + adev->mes.fw_version[pipe] = + le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { int ucode, ucode_data; @@ -1677,6 +1678,29 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) return is_supported; } +/* Fix me -- node_id is used to identify the correct MES instances in the future */ +int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable) +{ + struct mes_misc_op_input op_input = {0}; + int r; + + op_input.op = MES_MISC_OP_CHANGE_CONFIG; + op_input.change_config.option.limit_single_process = enable ? 1 : 0; + + if (!adev->mes.funcs->misc_op) { + dev_err(adev->dev, "mes change config is not supported!\n"); + r = -EINVAL; + goto error; + } + + r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + if (r) + dev_err(adev->dev, "failed to change_config.\n"); + +error: + return r; +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 96788c0f42f1..c6f93cbd6739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -40,6 +40,7 @@ #define AMDGPU_MES_VERSION_MASK 0x00000fff #define AMDGPU_MES_API_VERSION_MASK 0x00fff000 #define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000 +#define AMDGPU_MES_MSCRATCH_SIZE 0x8000 enum amdgpu_mes_priority_level { AMDGPU_MES_PRIORITY_LEVEL_LOW = 0, @@ -75,6 +76,7 @@ struct amdgpu_mes { uint32_t sched_version; uint32_t kiq_version; + uint32_t fw_version[AMDGPU_MAX_MES_PIPES]; bool enable_legacy_queue_map; uint32_t total_max_queue; @@ -119,9 +121,6 @@ struct amdgpu_mes { uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES]; uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES]; uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES]; - uint32_t read_val_offs; - uint64_t read_val_gpu_addr; - uint32_t *read_val_ptr; uint32_t saved_flags; @@ -310,6 +309,7 @@ enum mes_misc_opcode { MES_MISC_OP_WRM_REG_WAIT, MES_MISC_OP_WRM_REG_WR_WAIT, MES_MISC_OP_SET_SHADER_DEBUGGER, + MES_MISC_OP_CHANGE_CONFIG, }; struct mes_misc_op_input { @@ -348,6 +348,21 @@ struct mes_misc_op_input { uint32_t tcp_watch_cntl[4]; uint32_t trap_en; } set_shader_debugger; + + struct { + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + }; + uint32_t all; + } option; + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; + } change_config; }; }; @@ -518,4 +533,7 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes) } bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev); + +int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable); + #endif /* __AMDGPU_MES_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index f61d117b0caf..79c2f807b9fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -101,6 +101,7 @@ struct amdgpu_nbio_funcs { int (*get_compute_partition_mode)(struct amdgpu_device *adev); u32 (*get_memory_partition_mode)(struct amdgpu_device *adev, u32 *supp_modes); + bool (*is_nps_switch_requested)(struct amdgpu_device *adev); u64 (*get_pcie_replay_count)(struct amdgpu_device *adev); void (*set_reg_remap)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 971419e3a9bb..6852d50caa89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -40,6 +40,7 @@ #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_vram_mgr.h" +#include "amdgpu_vm.h" /** * DOC: amdgpu_object @@ -1171,54 +1172,71 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, } void amdgpu_bo_get_memory(struct amdgpu_bo *bo, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int sz) { + const unsigned int domain_to_pl[] = { + [ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM, + [ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT, + [ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM, + [ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS, + [ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS, + [ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA, + [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL, + }; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; + struct drm_gem_object *obj = &bo->tbo.base; uint64_t size = amdgpu_bo_size(bo); - struct drm_gem_object *obj; - bool shared; + unsigned int type; + + if (!res) { + /* + * If no backing store use one of the preferred domain for basic + * stats. We take the MSB since that should give a reasonable + * view. + */ + BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT || + TTM_PL_VRAM < TTM_PL_SYSTEM); + type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK); + if (!type) + return; + type--; + if (drm_WARN_ON_ONCE(&adev->ddev, + type >= ARRAY_SIZE(domain_to_pl))) + return; + type = domain_to_pl[type]; + } else { + type = res->mem_type; + } - /* Abort if the BO doesn't currently have a backing store */ - if (!res) + if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz)) return; - obj = &bo->tbo.base; - shared = drm_gem_object_is_shared_for_memory_stats(obj); - - switch (res->mem_type) { - case TTM_PL_VRAM: - stats->vram += size; - if (amdgpu_res_cpu_visible(adev, res)) - stats->visible_vram += size; - if (shared) - stats->vram_shared += size; - break; - case TTM_PL_TT: - stats->gtt += size; - if (shared) - stats->gtt_shared += size; - break; - case TTM_PL_SYSTEM: - default: - stats->cpu += size; - if (shared) - stats->cpu_shared += size; - break; + /* DRM stats common fields: */ + + if (drm_gem_object_is_shared_for_memory_stats(obj)) + stats[type].drm.shared += size; + else + stats[type].drm.private += size; + + if (res) { + stats[type].drm.resident += size; + + if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP)) + stats[type].drm.active += size; + else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) + stats[type].drm.purgeable += size; } + /* amdgpu specific stats: */ + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { - stats->requested_vram += size; - if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - stats->requested_visible_vram += size; - - if (res->mem_type != TTM_PL_VRAM) { - stats->evicted_vram += size; - if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - stats->evicted_visible_vram += size; - } + stats[TTM_PL_VRAM].requested += size; + if (type != TTM_PL_VRAM) + stats[TTM_PL_VRAM].evicted += size; } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { - stats->requested_gtt += size; + stats[TTM_PL_TT].requested += size; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 717e47b46167..be6769852ece 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -139,33 +139,6 @@ struct amdgpu_bo_vm { struct amdgpu_vm_bo_base entries[]; }; -struct amdgpu_mem_stats { - /* current VRAM usage, includes visible VRAM */ - uint64_t vram; - /* current shared VRAM usage, includes visible VRAM */ - uint64_t vram_shared; - /* current visible VRAM usage */ - uint64_t visible_vram; - /* current GTT usage */ - uint64_t gtt; - /* current shared GTT usage */ - uint64_t gtt_shared; - /* current system memory usage */ - uint64_t cpu; - /* current shared system memory usage */ - uint64_t cpu_shared; - /* sum of evicted buffers, includes visible VRAM */ - uint64_t evicted_vram; - /* sum of evicted buffers due to CPU access */ - uint64_t evicted_visible_vram; - /* how much userspace asked for, includes vis.VRAM */ - uint64_t requested_vram; - /* how much userspace asked for */ - uint64_t requested_visible_vram; - /* how much userspace asked for */ - uint64_t requested_gtt; -}; - static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) { return container_of(tbo, struct amdgpu_bo, tbo); @@ -328,7 +301,8 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, - struct amdgpu_mem_stats *stats); + struct amdgpu_mem_stats *stats, + unsigned int size); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0b28b2cf1517..17cf10c0b72b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -159,9 +159,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp) return ret; } -static int psp_early_init(void *handle) +static int psp_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; psp->autoload_supported = true; @@ -421,9 +421,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, return ret; } -static int psp_sw_init(void *handle) +static int psp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; int ret; struct psp_runtime_boot_cfg_entry boot_cfg_entry; @@ -527,9 +527,9 @@ failed1: return ret; } -static int psp_sw_fini(void *handle) +static int psp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; struct psp_gfx_cmd_resp *cmd = psp->cmd; @@ -639,6 +639,8 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) return "AUTOLOAD_RLC"; case GFX_CMD_ID_BOOT_CFG: return "BOOT_CFG"; + case GFX_CMD_ID_CONFIG_SQ_PERFMON: + return "CONFIG_SQ_PERFMON"; default: return "UNKNOWN CMD"; } @@ -1043,6 +1045,31 @@ static int psp_rl_load(struct amdgpu_device *adev) return ret; } +int psp_memory_partition(struct psp_context *psp, int mode) +{ + struct psp_gfx_cmd_resp *cmd; + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; + cmd->cmd.cmd_memory_part.mode = mode; + + dev_info(psp->adev->dev, + "Requesting %d memory partition change through PSP", mode); + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + dev_err(psp->adev->dev, + "PSP request failed to change to NPS%d mode\n", mode); + + release_psp_cmd_buf(psp); + + return ret; +} + int psp_spatial_partition(struct psp_context *psp, int mode) { struct psp_gfx_cmd_resp *cmd; @@ -1807,6 +1834,9 @@ int psp_ras_initialize(struct psp_context *psp) ras_cmd->ras_in_message.init_flags.xcc_mask = adev->gfx.xcc_mask; ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + ras_cmd->ras_in_message.init_flags.nps_mode = + adev->gmc.gmc_funcs->query_mem_partition_mode(adev); ret = psp_ta_load(psp, &psp->ras_context.context); @@ -2264,6 +2294,19 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp) } } +bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return false; + + if (psp->funcs && psp->funcs->is_reload_needed) + return psp->funcs->is_reload_needed(psp); + + return false; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -2958,10 +3001,10 @@ failed: return ret; } -static int psp_hw_init(void *handle) +static int psp_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; mutex_lock(&adev->firmware.mutex); /* @@ -2987,9 +3030,9 @@ failed: return -EINVAL; } -static int psp_hw_fini(void *handle) +static int psp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; if (psp->ta_fw) { @@ -3011,10 +3054,10 @@ static int psp_hw_fini(void *handle) return 0; } -static int psp_suspend(void *handle) +static int psp_suspend(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; if (adev->gmc.xgmi.num_physical_nodes > 1 && @@ -3074,10 +3117,10 @@ out: return ret; } -static int psp_resume(void *handle) +static int psp_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; dev_info(adev->dev, "PSP is resuming...\n"); @@ -3523,6 +3566,36 @@ out: return err; } +static bool is_ta_fw_applicable(struct psp_context *psp, + const struct psp_fw_bin_desc *desc) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t fw_version; + + switch (desc->fw_type) { + case TA_FW_TYPE_PSP_XGMI: + case TA_FW_TYPE_PSP_XGMI_AUX: + /* for now, AUX TA only exists on 13.0.6 ta bin, + * from v20.00.0x.14 + */ + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == + IP_VERSION(13, 0, 6)) { + fw_version = le32_to_cpu(desc->fw_version); + + if (adev->flags & AMD_IS_APU && + (fw_version & 0xff) >= 0x14) + return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; + else + return desc->fw_type == TA_FW_TYPE_PSP_XGMI; + } + break; + default: + break; + } + + return true; +} + static int parse_ta_bin_descriptor(struct psp_context *psp, const struct psp_fw_bin_desc *desc, const struct ta_firmware_header_v2_0 *ta_hdr) @@ -3532,6 +3605,9 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, if (!psp || !desc || !ta_hdr) return -EINVAL; + if (!is_ta_fw_applicable(psp, desc)) + return 0; + ucode_start_addr = (uint8_t *)ta_hdr + le32_to_cpu(desc->offset_bytes) + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); @@ -3544,6 +3620,7 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, psp->asd_context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_XGMI: + case TA_FW_TYPE_PSP_XGMI_AUX: psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; @@ -3736,8 +3813,44 @@ out: return err; } +int psp_config_sq_perfmon(struct psp_context *psp, + uint32_t xcp_id, bool core_override_enable, + bool reg_override_enable, bool perfmon_override_enable) +{ + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (xcp_id > MAX_XCP) { + dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); + return -EINVAL; + } + + if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { + dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", + amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); + return -EINVAL; + } + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; + cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); + cmd->cmd.config_sq_perfmon.core_override = core_override_enable; + cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; + cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", + xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); + + release_psp_cmd_buf(psp); + return ret; +} + static int psp_set_clockgating_state(void *handle, - enum amd_clockgating_state state) + enum amd_clockgating_state state) { return 0; } @@ -4019,17 +4132,12 @@ const struct attribute_group amdgpu_flash_attr_group = { const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, - .late_init = NULL, .sw_init = psp_sw_init, .sw_fini = psp_sw_fini, .hw_init = psp_hw_init, .hw_fini = psp_hw_fini, .suspend = psp_suspend, .resume = psp_resume, - .is_idle = NULL, - .check_soft_reset = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, .set_clockgating_state = psp_set_clockgating_state, .set_powergating_state = psp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index e8abbbcb4326..567cb1f924ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -139,6 +139,7 @@ struct psp_funcs { int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); bool (*is_aux_sos_load_required)(struct psp_context *psp); + bool (*is_reload_needed)(struct psp_context *psp); }; struct ta_funcs { @@ -552,9 +553,15 @@ int psp_load_fw_list(struct psp_context *psp, void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); int psp_spatial_partition(struct psp_context *psp, int mode); +int psp_memory_partition(struct psp_context *psp, int mode); int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); bool amdgpu_psp_get_ras_capability(struct psp_context *psp); + +int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); +bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a1395c5fff1..1bc95b0cdbb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1214,6 +1214,42 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } } +static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev, + struct ras_query_if *query_if, + struct ras_err_data *err_data, + struct ras_query_context *qctx) +{ + unsigned long new_ue, new_ce, new_de; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head); + const char *blk_name = get_ras_block_str(&query_if->head); + u64 event_id = qctx->evid.event_id; + + new_ce = err_data->ce_count - obj->err_data.ce_count; + new_ue = err_data->ue_count - obj->err_data.ue_count; + new_de = err_data->de_count - obj->err_data.de_count; + + if (new_ce) { + RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors " + "detected in %s block\n", + new_ce, + blk_name); + } + + if (new_ue) { + RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors " + "detected in %s block\n", + new_ue, + blk_name); + } + + if (new_de) { + RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors " + "detected in %s block\n", + new_de, + blk_name); + } +} + static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) { struct ras_err_node *err_node; @@ -1237,6 +1273,15 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s } } +static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj, + struct ras_err_data *err_data) +{ + /* Host reports absolute counts */ + obj->err_data.ue_count = err_data->ue_count; + obj->err_data.ce_count = err_data->ce_count; + obj->err_data.de_count = err_data->de_count; +} + static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) { struct ras_common_if head; @@ -1323,7 +1368,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) return -EINVAL; - if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { + return amdgpu_virt_req_ras_err_count(adev, blk, err_data); + } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { amdgpu_ras_get_ecc_info(adev, err_data); } else { @@ -1405,14 +1452,22 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, if (ret) goto out_fini_err_data; - amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { + amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); + } else { + /* Host provides absolute error counts. First generate the report + * using the previous VF internal count against new host count. + * Then Update VF internal count. + */ + amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx); + amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data); + } info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; info->de_count = obj->err_data.de_count; - amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); - out_fini_err_data: amdgpu_ras_error_data_fini(&err_data); @@ -2605,6 +2660,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; reset_context.src = AMDGPU_RESET_SRC_RAS; + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); /* Perform full reset in fatal error mode */ if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) @@ -3146,7 +3202,42 @@ static int amdgpu_ras_page_retirement_thread(void *param) return 0; } -int amdgpu_ras_recovery_init(struct amdgpu_device *adev) +int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret; + + if (!con || amdgpu_sriov_vf(adev)) + return 0; + + ret = amdgpu_ras_eeprom_init(&con->eeprom_control); + + if (ret) + return ret; + + /* HW not usable */ + if (amdgpu_ras_is_rma(adev)) + return -EHWPOISON; + + if (con->eeprom_control.ras_num_recs) { + ret = amdgpu_ras_load_bad_pages(adev); + if (ret) + return ret; + + amdgpu_dpm_send_hbm_bad_pages_num( + adev, con->eeprom_control.ras_num_recs); + + if (con->update_channel_flag == true) { + amdgpu_dpm_send_hbm_bad_channel_flag( + adev, con->eeprom_control.bad_channel_bitmap); + con->update_channel_flag = false; + } + } + + return ret; +} + +int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data **data; @@ -3181,31 +3272,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); - /* Todo: During test the SMU might fail to read the eeprom through I2C - * when the GPU is pending on XGMI reset during probe time - * (Mostly after second bus reset), skip it now - */ - if (adev->gmc.xgmi.pending_reset) - return 0; - ret = amdgpu_ras_eeprom_init(&con->eeprom_control); - /* - * This calling fails when is_rma is true or - * ret != 0. - */ - if (amdgpu_ras_is_rma(adev) || ret) - goto free; - - if (con->eeprom_control.ras_num_recs) { - ret = amdgpu_ras_load_bad_pages(adev); + if (init_bp_info) { + ret = amdgpu_ras_init_badpage_info(adev); if (ret) goto free; - - amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); - - if (con->update_channel_flag == true) { - amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); - con->update_channel_flag = false; - } } mutex_init(&con->page_rsv_lock); @@ -3438,6 +3508,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) if (!amdgpu_ras_asic_supported(adev)) return; + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_get_ras_capability(adev)) + goto init_ras_enabled_flag; + } + /* query ras capability from psp */ if (amdgpu_psp_get_ras_capability(&adev->psp)) goto init_ras_enabled_flag; @@ -3910,7 +3985,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) } /* Guest side doesn't need init ras feature */ - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev)) return 0; list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { @@ -4294,8 +4369,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; } - if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + int hive_ras_recovery = 0; + + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + /* In the case of multiple GPUs, after a GPU has started + * resetting all GPUs on hive, other GPUs do not need to + * trigger GPU reset again. + */ + if (!hive_ras_recovery) + amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); + else + atomic_set(&ras->in_recovery, 0); + } else { + flush_work(&ras->recovery_work); amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); + } + return 0; } @@ -4358,11 +4452,14 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, return false; } - if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) + if (amdgpu_sriov_vf(adev)) { + *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY; + } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) { *error_query_mode = (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; - else + } else { *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; + } return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 669720a9c60a..6db772ecfee4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -365,6 +365,7 @@ enum amdgpu_ras_error_query_mode { AMDGPU_RAS_INVALID_ERROR_QUERY = 0, AMDGPU_RAS_DIRECT_ERROR_QUERY = 1, AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2, + AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3, }; /* ras error status reisger fields */ @@ -736,8 +737,8 @@ struct amdgpu_ras_block_hw_ops { * 8: feature disable */ - -int amdgpu_ras_recovery_init(struct amdgpu_device *adev); +int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev); +int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info); void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 66c1a868c0e1..24dae7cdbe95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -26,6 +26,156 @@ #include "sienna_cichlid.h" #include "smu_v13_0_10.h" +static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev) +{ + int i; + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].status.hw) + continue; + /* displays are handled in phase1 */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) + continue; + + /* XXX handle errors */ + amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + adev->ip_blocks[i].status.hw = false; + } + + /* VCN FW shared region is in frambuffer, there are some flags + * initialized in that region during sw_init. Make sure the region is + * backed up. + */ + amdgpu_vcn_save_vcpu_bo(adev); + + return 0; +} + +static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev; + int r; + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + amdgpu_unregister_gpu_instance(tmp_adev); + r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, + "xgmi reset on init: prepare for reset failed"); + return r; + } + } + + return r; +} + +static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev = NULL; + int r; + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r) + return r; + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + if (!tmp_adev->kfd.init_complete) { + kgd2kfd_init_zone_device(tmp_adev); + amdgpu_amdkfd_device_init(tmp_adev); + amdgpu_amdkfd_drm_client_create(tmp_adev); + } + } + + return r; +} + +static int amdgpu_reset_xgmi_reset_on_init_perform_reset( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev = NULL; + int r; + + dev_dbg(adev->dev, "xgmi roi - hw reset\n"); + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + mutex_lock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = + amdgpu_asic_reset_method(adev); + } + r = 0; + /* Mode1 reset needs to be triggered on all devices together */ + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + if (r) { + dev_err(tmp_adev->dev, + "xgmi reset on init: reset failed with error, %d", + r); + break; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + mutex_unlock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE; + } + + return r; +} + +int amdgpu_reset_do_xgmi_reset_on_init( + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *adev; + int r; + + if (!reset_device_list || list_empty(reset_device_list) || + list_is_singular(reset_device_list)) + return -EINVAL; + + adev = list_first_entry(reset_device_list, struct amdgpu_device, + reset_list); + r = amdgpu_reset_prepare_hwcontext(adev, reset_context); + if (r) + return r; + + r = amdgpu_reset_perform_reset(adev, reset_context); + + return r; +} + +struct amdgpu_reset_handler xgmi_reset_on_init_handler = { + .reset_method = AMD_RESET_METHOD_ON_INIT, + .prepare_env = NULL, + .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt, + .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset, + .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt, + .restore_env = NULL, + .do_reset = NULL, +}; + int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 1cb920abc2fe..f8628bc898df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -153,4 +153,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf, for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \ (handler = (*reset_ctl->reset_handlers)[i]); \ ++i) + +extern struct amdgpu_reset_handler xgmi_reset_on_init_handler; +int amdgpu_reset_do_xgmi_reset_on_init( + struct amdgpu_reset_context *reset_context); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 690976665cf6..a6e28fe3f8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -108,10 +108,22 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) */ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; + uint32_t occupied, chunk1, chunk2; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + occupied = ring->wptr & ring->buf_mask; + chunk1 = ring->buf_mask + 1 - occupied; + chunk1 = (chunk1 >= count) ? count : chunk1; + chunk2 = count - chunk1; + + if (chunk1) + memset32(&ring->ring[occupied], ring->funcs->nop, chunk1); + + if (chunk2) + memset32(ring->ring, ring->funcs->nop, chunk2); + + ring->wptr += count; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count; } /** @@ -141,6 +153,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) { uint32_t count; + if (ring->count_dw < 0) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + /* We pad to match fetch size */ count = ring->funcs->align_mask + 1 - (ring->wptr & ring->funcs->align_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index f93f51002201..36fc9578c53c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -246,7 +246,7 @@ struct amdgpu_ring { struct drm_gpu_scheduler sched; struct amdgpu_bo *ring_obj; - volatile uint32_t *ring; + uint32_t *ring; unsigned rptr_offs; u64 rptr_gpu_addr; volatile u32 *rptr_cpu_addr; @@ -288,7 +288,7 @@ struct amdgpu_ring { u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; unsigned int set_q_mode_offs; - volatile u32 *set_q_mode_ptr; + u32 *set_q_mode_ptr; u64 set_q_mode_token; unsigned vm_hub; unsigned vm_inv_eng; @@ -377,8 +377,6 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) { - if (ring->count_dw <= 0) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); ring->ring[ring->wptr++ & ring->buf_mask] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; @@ -388,13 +386,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) { unsigned occupied, chunk1, chunk2; - void *dst; - - if (unlikely(ring->count_dw < count_dw)) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); occupied = ring->wptr & ring->buf_mask; - dst = (void *)&ring->ring[occupied]; chunk1 = ring->buf_mask + 1 - occupied; chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; chunk2 = count_dw - chunk1; @@ -402,12 +395,11 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, chunk2 <<= 2; if (chunk1) - memcpy(dst, src, chunk1); + memcpy(&ring->ring[occupied], src, chunk1); if (chunk2) { src += chunk1; - dst = (void *)ring->ring; - memcpy(dst, src, chunk2); + memcpy(ring->ring, src, chunk2); } ring->wptr += count_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 183a976ba29d..8c89b69edc20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -343,3 +343,114 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) return 0; } + +/* + * debugfs for to enable/disable sdma job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->sdma.num_instances) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops, + amdgpu_debugfs_sdma_sched_mask_get, + amdgpu_debugfs_sdma_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->sdma.num_instances > 1)) + return; + sprintf(name, "amdgpu_sdma_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_sdma_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset); +} + +static DEVICE_ATTR(sdma_reset_mask, 0444, + amdgpu_get_sdma_reset_mask, NULL); + +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) + return r; + + if (adev->sdma.num_instances) { + r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_gpu_recovery) + return; + + if (adev->sdma.num_instances) + device_remove_file(adev->dev, &dev_attr_sdma_reset_mask); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 087ce0f6fa07..2db58b5812a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -116,6 +116,7 @@ struct amdgpu_sdma { struct ras_common_if *ras_if; struct amdgpu_sdma_ras *ras; uint32_t *ip_dump; + uint32_t supported_reset; }; /* @@ -175,5 +176,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); - +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev); +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 74adb983ab03..9f922ec50ea2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, /* Map SG to device */ r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); if (r) - goto release_sg; + goto release_sg_table; /* convert SG to linear array of pages and dma addresses */ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, @@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, return 0; +release_sg_table: + sg_free_table(ttm->sg); release_sg: kfree(ttm->sg); ttm->sg = NULL; @@ -1849,6 +1851,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); + dma_set_max_seg_size(adev->dev, UINT_MAX); /* No others user of address space so set it to 0 */ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 138d80017f35..2852a6064c9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,6 +34,7 @@ #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) +#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4e23419b92d4..4150ec0aa10d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -163,6 +163,7 @@ enum ta_fw_type { TA_FW_TYPE_PSP_DTM, TA_FW_TYPE_PSP_RAP, TA_FW_TYPE_PSP_SECUREDISPLAY, + TA_FW_TYPE_PSP_XGMI_AUX, TA_FW_TYPE_MAX_INDEX, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index bb7b9b2eaac1..896f3609b0ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -318,6 +318,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; + if (amdgpu_sriov_vf(adev)) + return r; + if (amdgpu_ras_is_supported(adev, ras_block->block)) { r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index 6162582d0aa2..bd2d3863c3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -765,9 +765,9 @@ static int umsch_mm_init(struct amdgpu_device *adev) } -static int umsch_mm_early_init(void *handle) +static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 5): @@ -784,9 +784,9 @@ static int umsch_mm_early_init(void *handle) return 0; } -static int umsch_mm_late_init(void *handle) +static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend) return 0; @@ -794,9 +794,9 @@ static int umsch_mm_late_init(void *handle) return umsch_mm_test(adev); } -static int umsch_mm_sw_init(void *handle) +static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = umsch_mm_init(adev); @@ -815,9 +815,9 @@ static int umsch_mm_sw_init(void *handle) return 0; } -static int umsch_mm_sw_fini(void *handle) +static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; release_firmware(adev->umsch_mm.fw); adev->umsch_mm.fw = NULL; @@ -839,9 +839,9 @@ static int umsch_mm_sw_fini(void *handle) return 0; } -static int umsch_mm_hw_init(void *handle) +static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = umsch_mm_load_microcode(&adev->umsch_mm); @@ -857,9 +857,9 @@ static int umsch_mm_hw_init(void *handle) return 0; } -static int umsch_mm_hw_fini(void *handle) +static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; umsch_mm_ring_stop(&adev->umsch_mm); @@ -873,18 +873,14 @@ static int umsch_mm_hw_fini(void *handle) return 0; } -static int umsch_mm_suspend(void *handle) +static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return umsch_mm_hw_fini(adev); + return umsch_mm_hw_fini(ip_block); } -static int umsch_mm_resume(void *handle) +static int umsch_mm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return umsch_mm_hw_init(adev); + return umsch_mm_hw_init(ip_block); } void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm) @@ -997,8 +993,6 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = { .hw_fini = umsch_mm_hw_fini, .suspend = umsch_mm_suspend, .resume = umsch_mm_resume, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 43f44cc201cb..aecb78e0519f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -294,21 +294,12 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t return ret; } -int amdgpu_vcn_suspend(struct amdgpu_device *adev) +int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev) { unsigned int size; void *ptr; int i, idx; - bool in_ras_intr = amdgpu_ras_intr_triggered(); - - cancel_delayed_work_sync(&adev->vcn.idle_work); - - /* err_event_athub will corrupt VCPU buffer, so we need to - * restore fw data and clear buffer in amdgpu_vcn_resume() */ - if (in_ras_intr) - return 0; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -327,9 +318,24 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) drm_dev_exit(idx); } } + return 0; } +int amdgpu_vcn_suspend(struct amdgpu_device *adev) +{ + bool in_ras_intr = amdgpu_ras_intr_triggered(); + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + /* err_event_athub will corrupt VCPU buffer, so we need to + * restore fw data and clear buffer in amdgpu_vcn_resume() */ + if (in_ras_intr) + return 0; + + return amdgpu_vcn_save_vcpu_bo(adev); +} + int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned int size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 2a1f3dbb14d3..765b809d48a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -518,5 +518,6 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id); +int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b6397d3229e1..c704e9803e11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -523,6 +523,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->unique_id = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; + adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all; + adev->virt.ras_telemetry_en_caps.all = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all; break; default: dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); @@ -703,6 +706,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); } else if (adev->mman.drv_vram_usage_va) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -710,6 +715,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); } amdgpu_virt_read_pf2vf_data(adev); @@ -1144,3 +1151,185 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) return xnack_mode; } + +bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_sriov_ras_caps_en(adev)) + return false; + + if (adev->virt.ras_en_caps.bits.block_umc) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC); + if (adev->virt.ras_en_caps.bits.block_sdma) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA); + if (adev->virt.ras_en_caps.bits.block_gfx) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX); + if (adev->virt.ras_en_caps.bits.block_mmhub) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB); + if (adev->virt.ras_en_caps.bits.block_athub) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB); + if (adev->virt.ras_en_caps.bits.block_pcie_bif) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF); + if (adev->virt.ras_en_caps.bits.block_hdp) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP); + if (adev->virt.ras_en_caps.bits.block_xgmi_wafl) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL); + if (adev->virt.ras_en_caps.bits.block_df) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF); + if (adev->virt.ras_en_caps.bits.block_smn) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN); + if (adev->virt.ras_en_caps.bits.block_sem) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM); + if (adev->virt.ras_en_caps.bits.block_mp0) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0); + if (adev->virt.ras_en_caps.bits.block_mp1) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1); + if (adev->virt.ras_en_caps.bits.block_fuse) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE); + if (adev->virt.ras_en_caps.bits.block_mca) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA); + if (adev->virt.ras_en_caps.bits.block_vcn) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN); + if (adev->virt.ras_en_caps.bits.block_jpeg) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG); + if (adev->virt.ras_en_caps.bits.block_ih) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH); + if (adev->virt.ras_en_caps.bits.block_mpio) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO); + + if (adev->virt.ras_en_caps.bits.poison_propogation_mode) + con->poison_supported = true; /* Poison is handled by host */ + + return true; +} + +static inline enum amd_sriov_ras_telemetry_gpu_block +amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) { + switch (block) { + case AMDGPU_RAS_BLOCK__UMC: + return RAS_TELEMETRY_GPU_BLOCK_UMC; + case AMDGPU_RAS_BLOCK__SDMA: + return RAS_TELEMETRY_GPU_BLOCK_SDMA; + case AMDGPU_RAS_BLOCK__GFX: + return RAS_TELEMETRY_GPU_BLOCK_GFX; + case AMDGPU_RAS_BLOCK__MMHUB: + return RAS_TELEMETRY_GPU_BLOCK_MMHUB; + case AMDGPU_RAS_BLOCK__ATHUB: + return RAS_TELEMETRY_GPU_BLOCK_ATHUB; + case AMDGPU_RAS_BLOCK__PCIE_BIF: + return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF; + case AMDGPU_RAS_BLOCK__HDP: + return RAS_TELEMETRY_GPU_BLOCK_HDP; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL; + case AMDGPU_RAS_BLOCK__DF: + return RAS_TELEMETRY_GPU_BLOCK_DF; + case AMDGPU_RAS_BLOCK__SMN: + return RAS_TELEMETRY_GPU_BLOCK_SMN; + case AMDGPU_RAS_BLOCK__SEM: + return RAS_TELEMETRY_GPU_BLOCK_SEM; + case AMDGPU_RAS_BLOCK__MP0: + return RAS_TELEMETRY_GPU_BLOCK_MP0; + case AMDGPU_RAS_BLOCK__MP1: + return RAS_TELEMETRY_GPU_BLOCK_MP1; + case AMDGPU_RAS_BLOCK__FUSE: + return RAS_TELEMETRY_GPU_BLOCK_FUSE; + case AMDGPU_RAS_BLOCK__MCA: + return RAS_TELEMETRY_GPU_BLOCK_MCA; + case AMDGPU_RAS_BLOCK__VCN: + return RAS_TELEMETRY_GPU_BLOCK_VCN; + case AMDGPU_RAS_BLOCK__JPEG: + return RAS_TELEMETRY_GPU_BLOCK_JPEG; + case AMDGPU_RAS_BLOCK__IH: + return RAS_TELEMETRY_GPU_BLOCK_IH; + case AMDGPU_RAS_BLOCK__MPIO: + return RAS_TELEMETRY_GPU_BLOCK_MPIO; + default: + dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block); + return RAS_TELEMETRY_GPU_BLOCK_COUNT; + } +} + +static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev, + struct amdsriov_ras_telemetry *host_telemetry) +{ + struct amd_sriov_ras_telemetry_error_count *tmp = NULL; + uint32_t checksum, used_size; + + checksum = host_telemetry->header.checksum; + used_size = host_telemetry->header.used_size; + + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + return 0; + + tmp = kmalloc(used_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + memcpy(tmp, &host_telemetry->body.error_count, used_size); + + if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0)) + goto out; + + memcpy(&adev->virt.count_cache, tmp, + min(used_size, sizeof(adev->virt.count_cache))); +out: + kfree(tmp); + + return 0; +} + +static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update) +{ + struct amdgpu_virt *virt = &adev->virt; + + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host + * will ignore incoming guest messages. Ratelimit the guest messages to + * prevent guest self DOS. + */ + if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) { + if (!virt->ops->req_ras_err_count(adev)) + amdgpu_virt_cache_host_error_counts(adev, + adev->virt.fw_reserve.ras_telemetry); + } + + return 0; +} + +/* Bypass ACA interface and query ECC counts directly from host */ +int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, + struct ras_err_data *err_data) +{ + enum amd_sriov_ras_telemetry_gpu_block sriov_block; + + sriov_block = amdgpu_ras_block_to_sriov(adev, block); + + if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT || + !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block)) + return -EOPNOTSUPP; + + /* Host Access may be lost during reset, just return last cached data. */ + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_req_ras_err_count_internal(adev, false); + up_read(&adev->reset_domain->sem); + } + + err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count; + err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count; + err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count; + + return 0; +} + +int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev) +{ + unsigned long ue_count, ce_count; + + if (amdgpu_sriov_ras_telemetry_en(adev)) { + amdgpu_virt_req_ras_err_count_internal(adev, true); + amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b650a2032c42..5381b8d596e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -95,6 +95,7 @@ struct amdgpu_virt_ops { void (*ras_poison_handler)(struct amdgpu_device *adev, enum amdgpu_ras_block block); bool (*rcvd_ras_intr)(struct amdgpu_device *adev); + int (*req_ras_err_count)(struct amdgpu_device *adev); }; /* @@ -103,6 +104,7 @@ struct amdgpu_virt_ops { struct amdgpu_virt_fw_reserve { struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; + void *ras_telemetry; unsigned int checksum_key; }; @@ -136,6 +138,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), /* MES info */ AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), + AMDGIM_FEATURE_RAS_CAPS = (1 << 9), + AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -276,6 +280,12 @@ struct amdgpu_virt { uint32_t autoload_ucode_id; struct mutex rlcg_reg_lock; + + union amd_sriov_ras_caps ras_en_caps; + union amd_sriov_ras_caps ras_telemetry_en_caps; + + struct ratelimit_state ras_telemetry_rs; + struct amd_sriov_ras_telemetry_error_count count_cache; }; struct amdgpu_video_codec_info; @@ -320,6 +330,15 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_vf_mmio_access_protection(adev) \ ((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) +#define amdgpu_sriov_ras_caps_en(adev) \ +((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS) + +#define amdgpu_sriov_ras_telemetry_en(adev) \ +(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry) + +#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \ +(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk)) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) @@ -383,4 +402,8 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, bool write, u32 *rlcg_flag); u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id); +bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev); +int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, + struct ras_err_data *err_data); +int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d4c2afafbb73..8bf28d336807 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -493,10 +493,10 @@ const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static int amdgpu_vkms_sw_init(void *handle) +static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); @@ -536,9 +536,9 @@ static int amdgpu_vkms_sw_init(void *handle) return 0; } -static int amdgpu_vkms_sw_fini(void *handle) +static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) @@ -555,9 +555,9 @@ static int amdgpu_vkms_sw_fini(void *handle) return 0; } -static int amdgpu_vkms_hw_init(void *handle) +static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI @@ -600,31 +600,31 @@ static int amdgpu_vkms_hw_init(void *handle) return 0; } -static int amdgpu_vkms_hw_fini(void *handle) +static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int amdgpu_vkms_suspend(void *handle) +static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = drm_mode_config_helper_suspend(adev_to_drm(adev)); if (r) return r; - return amdgpu_vkms_hw_fini(handle); + + return 0; } -static int amdgpu_vkms_resume(void *handle) +static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vkms_hw_init(handle); + r = amdgpu_vkms_hw_init(ip_block); if (r) return r; - return drm_mode_config_helper_resume(adev_to_drm(adev)); + return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev)); } static bool amdgpu_vkms_is_idle(void *handle) @@ -632,16 +632,6 @@ static bool amdgpu_vkms_is_idle(void *handle) return true; } -static int amdgpu_vkms_wait_for_idle(void *handle) -{ - return 0; -} - -static int amdgpu_vkms_soft_reset(void *handle) -{ - return 0; -} - static int amdgpu_vkms_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -656,8 +646,6 @@ static int amdgpu_vkms_set_powergating_state(void *handle, static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .name = "amdgpu_vkms", - .early_init = NULL, - .late_init = NULL, .sw_init = amdgpu_vkms_sw_init, .sw_fini = amdgpu_vkms_sw_fini, .hw_init = amdgpu_vkms_hw_init, @@ -665,12 +653,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .suspend = amdgpu_vkms_suspend, .resume = amdgpu_vkms_resume, .is_idle = amdgpu_vkms_is_idle, - .wait_for_idle = amdgpu_vkms_wait_for_idle, - .soft_reset = amdgpu_vkms_soft_reset, .set_clockgating_state = amdgpu_vkms_set_clockgating_state, .set_powergating_state = amdgpu_vkms_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6005280f5f38..8d9bf7a0857f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1083,7 +1083,8 @@ error_free: } static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int size) { struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo *bo = bo_va->base.bo; @@ -1099,34 +1100,35 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, !dma_resv_trylock(bo->tbo.base.resv)) return; - amdgpu_bo_get_memory(bo, stats); + amdgpu_bo_get_memory(bo, stats, size); if (!amdgpu_vm_is_bo_always_valid(vm, bo)) dma_resv_unlock(bo->tbo.base.resv); } void amdgpu_vm_get_memory(struct amdgpu_vm *vm, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int size) { struct amdgpu_bo_va *bo_va, *tmp; spin_lock(&vm->status_lock); list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); spin_unlock(&vm->status_lock); } @@ -1159,7 +1161,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, int r; amdgpu_sync_create(&sync); - if (clear || !bo) { + if (clear) { mem = NULL; /* Implicitly sync to command submissions in the same VM before @@ -1174,6 +1176,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (r) goto error_free; } + } else if (!bo) { + mem = NULL; + + /* PRT map operations don't need to sync to anything. */ } else { struct drm_gem_object *obj = &bo->tbo.base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 52dd7cdfdc81..5d119ac26c4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -42,7 +42,6 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; struct amdgpu_bo_vm; -struct amdgpu_mem_stats; /* * GPUVM handling @@ -322,6 +321,16 @@ struct amdgpu_vm_fault_info { unsigned int vmhub; }; +struct amdgpu_mem_stats { + struct drm_memory_stats drm; + + /* buffers that requested this placement */ + uint64_t requested; + /* buffers that requested this placement + * but are currently evicted */ + uint64_t evicted; +}; + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; @@ -567,7 +576,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_memory(struct amdgpu_vm *vm, - struct amdgpu_mem_stats *stats); + struct amdgpu_mem_stats *stats, + unsigned int size); int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_vm *vmbo, bool immediate); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 5acd20ff5979..3e6f9dfb61bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -295,9 +295,9 @@ int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) return 0; } -static int vpe_early_init(void *handle) +static int vpe_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { @@ -356,9 +356,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe) return 0; } -static int vpe_sw_init(void *handle) +static int vpe_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; int ret; @@ -377,18 +377,26 @@ static int vpe_sw_init(void *handle) ret = vpe_init_microcode(vpe); if (ret) goto out; + + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vpe.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vpe.ring); + ret = amdgpu_vpe_sysfs_reset_mask_init(adev); + if (ret) + goto out; out: return ret; } -static int vpe_sw_fini(void *handle) +static int vpe_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; release_firmware(vpe->fw); vpe->fw = NULL; + amdgpu_vpe_sysfs_reset_mask_fini(adev); vpe_ring_fini(vpe); amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, @@ -398,9 +406,9 @@ static int vpe_sw_fini(void *handle) return 0; } -static int vpe_hw_init(void *handle) +static int vpe_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; int ret; @@ -421,9 +429,9 @@ static int vpe_hw_init(void *handle) return 0; } -static int vpe_hw_fini(void *handle) +static int vpe_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; vpe_ring_stop(vpe); @@ -434,20 +442,18 @@ static int vpe_hw_fini(void *handle) return 0; } -static int vpe_suspend(void *handle) +static int vpe_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vpe.idle_work); - return vpe_hw_fini(adev); + return vpe_hw_fini(ip_block); } -static int vpe_resume(void *handle) +static int vpe_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vpe_hw_init(adev); + return vpe_hw_init(ip_block); } static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) @@ -867,6 +873,41 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring) schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); } +static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset); +} + +static DEVICE_ATTR(vpe_reset_mask, 0444, + amdgpu_get_vpe_reset_mask, NULL); + +int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->vpe.num_instances) { + r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->vpe.num_instances) + device_remove_file(adev->dev, &dev_attr_vpe_reset_mask); +} + static const struct amdgpu_ring_funcs vpe_ring_funcs = { .type = AMDGPU_RING_TYPE_VPE, .align_mask = 0xf, @@ -908,14 +949,12 @@ static void vpe_set_ring_funcs(struct amdgpu_device *adev) const struct amd_ip_funcs vpe_ip_funcs = { .name = "vpe_v6_1", .early_init = vpe_early_init, - .late_init = NULL, .sw_init = vpe_sw_init, .sw_fini = vpe_sw_fini, .hw_init = vpe_hw_init, .hw_fini = vpe_hw_fini, .suspend = vpe_suspend, .resume = vpe_resume, - .soft_reset = NULL, .set_clockgating_state = vpe_set_clockgating_state, .set_powergating_state = vpe_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h index 231d86d0953e..695da740a97e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h @@ -79,6 +79,7 @@ struct amdgpu_vpe { uint32_t num_instances; bool collaborate_mode; + uint32_t supported_reset; }; int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); @@ -86,6 +87,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe); +void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev); +int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev); #define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) #define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index a6d456ec6aeb..e209b5e101df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -433,3 +433,292 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev, } } +#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ + static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ + struct amdgpu_xcp_res_details *xcp_res, char *buf) \ + { \ + return sysfs_emit(buf, "%d\n", xcp_res->_name); \ + } + +struct amdgpu_xcp_res_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf); +}; + +#define XCP_CFG_SYSFS_RES_ATTR(_name) \ + struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \ + .attr = { .name = __stringify(_name), .mode = 0400 }, \ + .show = amdgpu_xcp_res_sysfs_##_name##_show, \ + } + +XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst) +XCP_CFG_SYSFS_RES_ATTR(num_inst); +XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared) +XCP_CFG_SYSFS_RES_ATTR(num_shared); + +#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr + +static struct attribute *xcp_cfg_res_sysfs_attrs[] = { + &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst), + &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL +}; + +static const char *xcp_desc[] = { + [AMDGPU_SPX_PARTITION_MODE] = "SPX", + [AMDGPU_DPX_PARTITION_MODE] = "DPX", + [AMDGPU_TPX_PARTITION_MODE] = "TPX", + [AMDGPU_QPX_PARTITION_MODE] = "QPX", + [AMDGPU_CPX_PARTITION_MODE] = "CPX", +}; + +static const char *nps_desc[] = { + [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN", + [AMDGPU_NPS1_PARTITION_MODE] = "NPS1", + [AMDGPU_NPS2_PARTITION_MODE] = "NPS2", + [AMDGPU_NPS3_PARTITION_MODE] = "NPS3", + [AMDGPU_NPS4_PARTITION_MODE] = "NPS4", + [AMDGPU_NPS6_PARTITION_MODE] = "NPS6", + [AMDGPU_NPS8_PARTITION_MODE] = "NPS8", +}; + +ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs); + +#define to_xcp_attr(x) \ + container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr) +#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj) + +static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct amdgpu_xcp_res_sysfs_attribute *attribute; + struct amdgpu_xcp_res_details *xcp_res; + + attribute = to_xcp_attr(attr); + xcp_res = to_xcp_res(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(xcp_res, buf); +} + +static const struct sysfs_ops xcp_cfg_res_sysfs_ops = { + .show = xcp_cfg_res_sysfs_attr_show, +}; + +static const struct kobj_type xcp_cfg_res_sysfs_ktype = { + .sysfs_ops = &xcp_cfg_res_sysfs_ops, + .default_groups = xcp_cfg_res_sysfs_groups, +}; + +const char *xcp_res_names[] = { + [AMDGPU_XCP_RES_XCC] = "xcc", + [AMDGPU_XCP_RES_DMA] = "dma", + [AMDGPU_XCP_RES_DEC] = "dec", + [AMDGPU_XCP_RES_JPEG] = "jpeg", +}; + +static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info) + return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg); + + return -EOPNOTSUPP; +} + +#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj) +static ssize_t supported_xcp_configs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr; + int size = 0, mode; + char *sep = ""; + + if (!xcp_mgr || !xcp_mgr->supp_xcp_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_mgr->supp_xcp_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]); + sep = ", "; + } + + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t supported_nps_configs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + int size = 0, mode; + char *sep = ""; + + if (!xcp_cfg || !xcp_cfg->compatible_nps_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_cfg->compatible_nps_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]); + sep = ", "; + } + + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t xcp_config_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + + return sysfs_emit(buf, "%s\n", + amdgpu_gfx_compute_mode_desc(xcp_cfg->mode)); +} + +static ssize_t xcp_config_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t size) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + int mode, r; + + if (!strncasecmp("SPX", buf, strlen("SPX"))) + mode = AMDGPU_SPX_PARTITION_MODE; + else if (!strncasecmp("DPX", buf, strlen("DPX"))) + mode = AMDGPU_DPX_PARTITION_MODE; + else if (!strncasecmp("TPX", buf, strlen("TPX"))) + mode = AMDGPU_TPX_PARTITION_MODE; + else if (!strncasecmp("QPX", buf, strlen("QPX"))) + mode = AMDGPU_QPX_PARTITION_MODE; + else if (!strncasecmp("CPX", buf, strlen("CPX"))) + mode = AMDGPU_CPX_PARTITION_MODE; + else + return -EINVAL; + + r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg); + + if (r) + return r; + + xcp_cfg->mode = mode; + return size; +} + +static struct kobj_attribute xcp_cfg_sysfs_mode = + __ATTR_RW_MODE(xcp_config, 0644); + +static void xcp_cfg_sysfs_release(struct kobject *kobj) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + + kfree(xcp_cfg); +} + +static const struct kobj_type xcp_cfg_sysfs_ktype = { + .release = xcp_cfg_sysfs_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +static struct kobj_attribute supp_part_sysfs_mode = + __ATTR_RO(supported_xcp_configs); + +static struct kobj_attribute supp_nps_sysfs_mode = + __ATTR_RO(supported_nps_configs); + +static const struct attribute *xcp_attrs[] = { + &supp_part_sysfs_mode.attr, + &xcp_cfg_sysfs_mode.attr, + NULL, +}; + +void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) +{ + struct amdgpu_xcp_res_details *xcp_res; + struct amdgpu_xcp_cfg *xcp_cfg; + int i, r, j, rid, mode; + + if (!adev->xcp_mgr) + return; + + xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL); + if (!xcp_cfg) + return; + xcp_cfg->xcp_mgr = adev->xcp_mgr; + + r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype, + &adev->dev->kobj, "compute_partition_config"); + if (r) + goto err1; + + r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs); + if (r) + goto err1; + + if (adev->gmc.supported_nps_modes != 0) { + r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + if (r) { + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + goto err1; + } + } + + mode = (xcp_cfg->xcp_mgr->mode == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ? + AMDGPU_SPX_PARTITION_MODE : + xcp_cfg->xcp_mgr->mode; + r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg); + if (r) { + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + goto err1; + } + + xcp_cfg->mode = mode; + for (i = 0; i < xcp_cfg->num_res; i++) { + xcp_res = &xcp_cfg->xcp_res[i]; + rid = xcp_res->id; + r = kobject_init_and_add(&xcp_res->kobj, + &xcp_cfg_res_sysfs_ktype, + &xcp_cfg->kobj, "%s", + xcp_res_names[rid]); + if (r) + goto err; + } + + adev->xcp_mgr->xcp_cfg = xcp_cfg; + return; +err: + for (j = 0; j < i; j++) { + xcp_res = &xcp_cfg->xcp_res[i]; + kobject_put(&xcp_res->kobj); + } + + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); +err1: + kobject_put(&xcp_cfg->kobj); +} + +void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) +{ + struct amdgpu_xcp_res_details *xcp_res; + struct amdgpu_xcp_cfg *xcp_cfg; + int i; + + if (!adev->xcp_mgr) + return; + + xcp_cfg = adev->xcp_mgr->xcp_cfg; + for (i = 0; i < xcp_cfg->num_res; i++) { + xcp_res = &xcp_cfg->xcp_res[i]; + kobject_put(&xcp_res->kobj); + } + + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + kobject_put(&xcp_cfg->kobj); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 32775260556f..b63f53242c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -56,6 +56,30 @@ enum AMDGPU_XCP_STATE { AMDGPU_XCP_RESUME, }; +enum amdgpu_xcp_res_id { + AMDGPU_XCP_RES_XCC, + AMDGPU_XCP_RES_DMA, + AMDGPU_XCP_RES_DEC, + AMDGPU_XCP_RES_JPEG, + AMDGPU_XCP_RES_MAX, +}; + +struct amdgpu_xcp_res_details { + enum amdgpu_xcp_res_id id; + u8 num_inst; + u8 num_shared; + struct kobject kobj; +}; + +struct amdgpu_xcp_cfg { + u8 mode; + struct amdgpu_xcp_res_details xcp_res[AMDGPU_XCP_RES_MAX]; + u8 num_res; + struct amdgpu_xcp_mgr *xcp_mgr; + struct kobject kobj; + u16 compatible_nps_modes; +}; + struct amdgpu_xcp_ip_funcs { int (*prepare_suspend)(void *handle, uint32_t inst_mask); int (*suspend)(void *handle, uint32_t inst_mask); @@ -97,6 +121,9 @@ struct amdgpu_xcp_mgr { /* Used to determine KFD memory size limits per XCP */ unsigned int num_xcp_per_mem_partition; + struct amdgpu_xcp_cfg *xcp_cfg; + uint32_t supp_xcp_modes; + uint32_t avail_xcp_modes; }; struct amdgpu_xcp_mgr_funcs { @@ -108,7 +135,9 @@ struct amdgpu_xcp_mgr_funcs { struct amdgpu_xcp_ip *ip); int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp *xcp, uint8_t *mem_id); - + int (*get_xcp_res_info)(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg); int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -146,6 +175,9 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, void amdgpu_xcp_release_sched(struct amdgpu_device *adev, struct amdgpu_ctx_entity *entity); +void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev); +void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev); + #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ (adev)->xcp_mgr->funcs->select_scheds ? \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 7de449fae1e3..b47422b0b5b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -667,6 +667,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) task_barrier_init(&hive->tb); hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; hive->hi_req_gpu = NULL; + atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); /* * hive pstate on boot is high in vega20 so we have to go to low @@ -800,6 +801,23 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, return -EINVAL; } +bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + int i; + + /* Sharing should always be enabled for non-SRIOV. */ + if (!amdgpu_sriov_vf(adev)) + return true; + + for (i = 0 ; i < top->num_nodes; ++i) + if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) + return !!top->nodes[i].is_sharing_enabled; + + return false; +} + /* * Devices that support extended data require the entire hive to initialize with * the shared memory buffer flag set. @@ -860,8 +878,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return 0; - if (!adev->gmc.xgmi.pending_reset && - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { ret = psp_xgmi_initialize(&adev->psp, false, true); if (ret) { dev_err(adev->dev, @@ -907,8 +924,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) task_barrier_add_task(&hive->tb); - if (!adev->gmc.xgmi.pending_reset && - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { /* update node list for other device in the hive */ if (tmp_adev != adev) { @@ -985,7 +1001,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } } - if (!ret && !adev->gmc.xgmi.pending_reset) + if (!ret) ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); exit_unlock: @@ -1500,3 +1516,117 @@ int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev) return 0; } + +static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work) +{ + struct amdgpu_hive_info *hive = + container_of(work, struct amdgpu_hive_info, reset_on_init_work); + struct amdgpu_reset_context reset_context; + struct amdgpu_device *tmp_adev; + struct list_head device_list; + int r; + + mutex_lock(&hive->hive_lock); + + INIT_LIST_HEAD(&device_list); + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + list_add_tail(&tmp_adev->reset_list, &device_list); + + tmp_adev = list_first_entry(&device_list, struct amdgpu_device, + reset_list); + amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); + + reset_context.method = AMD_RESET_METHOD_ON_INIT; + reset_context.reset_req_dev = tmp_adev; + reset_context.hive = hive; + reset_context.reset_device_list = &device_list; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); + + amdgpu_reset_do_xgmi_reset_on_init(&reset_context); + mutex_unlock(&hive->hive_lock); + amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); + + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + r = amdgpu_ras_init_badpage_info(tmp_adev); + if (r && r != -EHWPOISON) + dev_err(tmp_adev->dev, + "error during bad page data initialization"); + } +} + +static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive) +{ + INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work); + amdgpu_reset_domain_schedule(hive->reset_domain, + &hive->reset_on_init_work); +} + +int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev) +{ + struct amdgpu_hive_info *hive; + bool reset_scheduled; + int num_devs; + + hive = amdgpu_get_xgmi_hive(adev); + if (!hive) + return -EINVAL; + + mutex_lock(&hive->hive_lock); + num_devs = atomic_read(&hive->number_devices); + reset_scheduled = false; + if (num_devs == adev->gmc.xgmi.num_physical_nodes) { + amdgpu_xgmi_schedule_reset_on_init(hive); + reset_scheduled = true; + } + + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); + + if (reset_scheduled) + flush_work(&hive->reset_on_init_work); + + return 0; +} + +int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, + int req_nps_mode) +{ + struct amdgpu_device *tmp_adev; + int cur_nps_mode, r; + + /* This is expected to be called only during unload of driver. The + * request needs to be placed only once for all devices in the hive. If + * one of them fail, revert the request for previous successful devices. + * After placing the request, make hive mode as UNKNOWN so that other + * devices don't request anymore. + */ + mutex_lock(&hive->hive_lock); + if (atomic_read(&hive->requested_nps_mode) == + UNKNOWN_MEMORY_PARTITION_MODE) { + dev_dbg(adev->dev, "Unexpected entry for hive NPS change"); + mutex_unlock(&hive->hive_lock); + return 0; + } + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + r = adev->gmc.gmc_funcs->request_mem_partition_mode( + tmp_adev, req_nps_mode); + if (r) + break; + } + if (r) { + /* Request back current mode if one of the requests failed */ + cur_nps_mode = + adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev); + list_for_each_entry_continue_reverse( + tmp_adev, &hive->device_list, gmc.xgmi.head) + adev->gmc.gmc_funcs->request_mem_partition_mode( + tmp_adev, cur_nps_mode); + } + /* Set to UNKNOWN so that other devices don't request anymore */ + atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); + mutex_unlock(&hive->hive_lock); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index a3bfc16de6d4..8cc7ab38db7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -45,6 +45,8 @@ struct amdgpu_hive_info { struct amdgpu_reset_domain *reset_domain; atomic_t ras_recovery; struct ras_event_manager event_mgr; + struct work_struct reset_on_init_work; + atomic_t requested_nps_mode; }; struct amdgpu_pcs_ras_field { @@ -64,6 +66,8 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev); uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, @@ -75,5 +79,10 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); } int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev); + +int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, + int req_nps_mode); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 6e9eeaeb3de1..b4f9c2f4e92c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -28,17 +28,21 @@ #define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 #define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB #define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 - +#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 +#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2 +#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64 /* * layout - * 0 64KB 65KB 66KB - * | VBIOS | PF2VF | VF2PF | Bad Page | ... - * | 64KB | 1KB | 1KB | + * 0 64KB 65KB 66KB 68KB 132KB + * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... + * | 64KB | 1KB | 1KB | 2KB | 64KB | ... */ + #define AMD_SRIOV_MSG_SIZE_KB 1 #define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB #define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) #define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB) /* * PF2VF history log: @@ -86,30 +90,59 @@ enum amd_sriov_ucode_engine_id { union amd_sriov_msg_feature_flags { struct { - uint32_t error_log_collect : 1; - uint32_t host_load_ucodes : 1; - uint32_t host_flr_vramlost : 1; - uint32_t mm_bw_management : 1; - uint32_t pp_one_vf_mode : 1; - uint32_t reg_indirect_acc : 1; - uint32_t av1_support : 1; - uint32_t vcn_rb_decouple : 1; - uint32_t mes_info_enable : 1; - uint32_t reserved : 23; + uint32_t error_log_collect : 1; + uint32_t host_load_ucodes : 1; + uint32_t host_flr_vramlost : 1; + uint32_t mm_bw_management : 1; + uint32_t pp_one_vf_mode : 1; + uint32_t reg_indirect_acc : 1; + uint32_t av1_support : 1; + uint32_t vcn_rb_decouple : 1; + uint32_t mes_info_dump_enable : 1; + uint32_t ras_caps : 1; + uint32_t ras_telemetry : 1; + uint32_t reserved : 21; } flags; uint32_t all; }; union amd_sriov_reg_access_flags { struct { - uint32_t vf_reg_access_ih : 1; - uint32_t vf_reg_access_mmhub : 1; - uint32_t vf_reg_access_gc : 1; - uint32_t reserved : 29; + uint32_t vf_reg_access_ih : 1; + uint32_t vf_reg_access_mmhub : 1; + uint32_t vf_reg_access_gc : 1; + uint32_t reserved : 29; } flags; uint32_t all; }; +union amd_sriov_ras_caps { + struct { + uint64_t block_umc : 1; + uint64_t block_sdma : 1; + uint64_t block_gfx : 1; + uint64_t block_mmhub : 1; + uint64_t block_athub : 1; + uint64_t block_pcie_bif : 1; + uint64_t block_hdp : 1; + uint64_t block_xgmi_wafl : 1; + uint64_t block_df : 1; + uint64_t block_smn : 1; + uint64_t block_sem : 1; + uint64_t block_mp0 : 1; + uint64_t block_mp1 : 1; + uint64_t block_fuse : 1; + uint64_t block_mca : 1; + uint64_t block_vcn : 1; + uint64_t block_jpeg : 1; + uint64_t block_ih : 1; + uint64_t block_mpio : 1; + uint64_t poison_propogation_mode : 1; + uint64_t reserved : 44; + } bits; + uint64_t all; +}; + union amd_sriov_msg_os_info { struct { uint32_t windows : 1; @@ -158,7 +191,7 @@ struct amd_sriov_msg_pf2vf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49) +#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (55) struct amd_sriov_msg_pf2vf_info { /* header contains size and version */ struct amd_sriov_msg_pf2vf_info_header header; @@ -211,6 +244,12 @@ struct amd_sriov_msg_pf2vf_info { uint32_t pcie_atomic_ops_support_flags; /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */ uint32_t gpu_capacity; + /* vf bdf on host pci tree for debug only */ + uint32_t bdf_on_host; + uint32_t more_bp; //Reserved for future use. + union amd_sriov_ras_caps ras_en_caps; + union amd_sriov_ras_caps ras_telemetry_en_caps; + /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; } __packed; @@ -283,8 +322,12 @@ enum amd_sriov_mailbox_request_message { MB_REQ_MSG_REL_GPU_FINI_ACCESS, MB_REQ_MSG_REQ_GPU_RESET_ACCESS, MB_REQ_MSG_REQ_GPU_INIT_DATA, + MB_REQ_MSG_PSP_VF_CMD_RELAY, MB_REQ_MSG_LOG_VF_ERROR = 200, + MB_REQ_MSG_READY_TO_RESET = 201, + MB_REQ_MSG_RAS_POISON = 202, + MB_REQ_RAS_ERROR_COUNT = 203, }; /* mailbox message send from host to guest */ @@ -297,10 +340,60 @@ enum amd_sriov_mailbox_response_message { MB_RES_MSG_FAIL, MB_RES_MSG_QUERY_ALIVE, MB_RES_MSG_GPU_INIT_DATA_READY, + MB_RES_MSG_RAS_ERROR_COUNT_READY = 11, MB_RES_MSG_TEXT_MESSAGE = 255 }; +enum amd_sriov_ras_telemetry_gpu_block { + RAS_TELEMETRY_GPU_BLOCK_UMC = 0, + RAS_TELEMETRY_GPU_BLOCK_SDMA = 1, + RAS_TELEMETRY_GPU_BLOCK_GFX = 2, + RAS_TELEMETRY_GPU_BLOCK_MMHUB = 3, + RAS_TELEMETRY_GPU_BLOCK_ATHUB = 4, + RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF = 5, + RAS_TELEMETRY_GPU_BLOCK_HDP = 6, + RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL = 7, + RAS_TELEMETRY_GPU_BLOCK_DF = 8, + RAS_TELEMETRY_GPU_BLOCK_SMN = 9, + RAS_TELEMETRY_GPU_BLOCK_SEM = 10, + RAS_TELEMETRY_GPU_BLOCK_MP0 = 11, + RAS_TELEMETRY_GPU_BLOCK_MP1 = 12, + RAS_TELEMETRY_GPU_BLOCK_FUSE = 13, + RAS_TELEMETRY_GPU_BLOCK_MCA = 14, + RAS_TELEMETRY_GPU_BLOCK_VCN = 15, + RAS_TELEMETRY_GPU_BLOCK_JPEG = 16, + RAS_TELEMETRY_GPU_BLOCK_IH = 17, + RAS_TELEMETRY_GPU_BLOCK_MPIO = 18, + RAS_TELEMETRY_GPU_BLOCK_COUNT = 19, +}; + +struct amd_sriov_ras_telemetry_header { + uint32_t checksum; + uint32_t used_size; + uint32_t reserved[2]; +}; + +struct amd_sriov_ras_telemetry_error_count { + struct { + uint32_t ce_count; + uint32_t ue_count; + uint32_t de_count; + uint32_t ce_overflow_count; + uint32_t ue_overflow_count; + uint32_t de_overflow_count; + uint32_t reserved[6]; + } block[RAS_TELEMETRY_GPU_BLOCK_COUNT]; +}; + +struct amdsriov_ras_telemetry { + struct amd_sriov_ras_telemetry_header header; + + union { + struct amd_sriov_ras_telemetry_error_count error_count; + } body; +}; + /* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */ enum amd_sriov_gpu_init_data_version { GPU_INIT_DATA_READY_V1 = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index ccfd2a4b4acc..e157d6d857b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -447,6 +447,72 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x return 0; } +static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int max_res[AMDGPU_XCP_RES_MAX] = {}; + bool res_lt_xcp; + int num_xcp, i; + u16 nps_modes; + + if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + return -EINVAL; + + max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); + max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; + max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; + max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcp = 1; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcp = 2; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcp = 3; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcp = 4; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcp = NUM_XCC(adev->gfx.xcc_mask); + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + default: + return -EINVAL; + } + + xcp_cfg->compatible_nps_modes = + (adev->gmc.supported_nps_modes & nps_modes); + xcp_cfg->num_res = ARRAY_SIZE(max_res); + + for (i = 0; i < xcp_cfg->num_res; i++) { + res_lt_xcp = max_res[i] < num_xcp; + xcp_cfg->xcp_res[i].id = i; + xcp_cfg->xcp_res[i].num_inst = + res_lt_xcp ? 1 : max_res[i] / num_xcp; + xcp_cfg->xcp_res[i].num_inst = + i == AMDGPU_XCP_RES_JPEG ? + xcp_cfg->xcp_res[i].num_inst * + adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst; + xcp_cfg->xcp_res[i].num_shared = + res_lt_xcp ? num_xcp / max_res[i] : 1; + } + + return 0; +} + static enum amdgpu_gfx_partition __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) { @@ -530,6 +596,57 @@ static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, return ret; } +static void +__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + + xcp_mgr->supp_xcp_modes = 0; + + switch (NUM_XCC(adev->gfx.xcc_mask)) { + case 8: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_QPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 6: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_TPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 4: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + /* this seems only existing in emulation phase */ + case 2: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 1: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + + default: + break; + } +} + +static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + int mode; + + xcp_mgr->avail_xcp_modes = 0; + + for_each_inst(mode, xcp_mgr->supp_xcp_modes) { + if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + xcp_mgr->avail_xcp_modes |= BIT(mode); + } +} + static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode, int *num_xcps) { @@ -578,6 +695,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + if (!ret) + __aqua_vanjaram_update_available_partition_mode(xcp_mgr); unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); @@ -656,9 +775,11 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, + .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, .select_scheds = &aqua_vanjaram_select_scheds, - .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list + .update_partition_sched_list = + &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) @@ -673,6 +794,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) if (ret) return ret; + __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); /* TODO: Default memory node affinity init */ return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index cf1d5d462b67..e2cb1f080e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1985,9 +1985,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .query_video_codecs = &cik_query_video_codecs, }; -static int cik_common_early_init(void *handle) +static int cik_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->smc_rreg = &cik_smc_rreg; adev->smc_wreg = &cik_smc_wreg; @@ -2124,19 +2124,9 @@ static int cik_common_early_init(void *handle) return 0; } -static int cik_common_sw_init(void *handle) +static int cik_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int cik_common_sw_fini(void *handle) -{ - return 0; -} - -static int cik_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* move the golden regs per IP block */ cik_init_golden_registers(adev); @@ -2148,23 +2138,14 @@ static int cik_common_hw_init(void *handle) return 0; } -static int cik_common_hw_fini(void *handle) +static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int cik_common_suspend(void *handle) +static int cik_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_common_hw_fini(adev); -} - -static int cik_common_resume(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_common_hw_init(adev); + return cik_common_hw_init(ip_block); } static bool cik_common_is_idle(void *handle) @@ -2172,12 +2153,9 @@ static bool cik_common_is_idle(void *handle) return true; } -static int cik_common_wait_for_idle(void *handle) -{ - return 0; -} -static int cik_common_soft_reset(void *handle) + +static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX hard reset?? */ return 0; @@ -2198,20 +2176,13 @@ static int cik_common_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_common_ip_funcs = { .name = "cik_common", .early_init = cik_common_early_init, - .late_init = NULL, - .sw_init = cik_common_sw_init, - .sw_fini = cik_common_sw_fini, .hw_init = cik_common_hw_init, .hw_fini = cik_common_hw_fini, - .suspend = cik_common_suspend, .resume = cik_common_resume, .is_idle = cik_common_is_idle, - .wait_for_idle = cik_common_wait_for_idle, .soft_reset = cik_common_soft_reset, .set_clockgating_state = cik_common_set_clockgating_state, .set_powergating_state = cik_common_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version cik_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 576baa9dbb0e..1da17755ad53 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -283,9 +283,9 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int cik_ih_early_init(void *handle) +static int cik_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -297,10 +297,10 @@ static int cik_ih_early_init(void *handle) return 0; } -static int cik_ih_sw_init(void *handle) +static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -311,9 +311,9 @@ static int cik_ih_sw_init(void *handle) return r; } -static int cik_ih_sw_fini(void *handle) +static int cik_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -321,34 +321,28 @@ static int cik_ih_sw_fini(void *handle) return 0; } -static int cik_ih_hw_init(void *handle) +static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return cik_ih_irq_init(adev); } -static int cik_ih_hw_fini(void *handle) +static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_ih_irq_disable(adev); + cik_ih_irq_disable(ip_block->adev); return 0; } -static int cik_ih_suspend(void *handle) +static int cik_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_ih_hw_fini(adev); + return cik_ih_hw_fini(ip_block); } -static int cik_ih_resume(void *handle) +static int cik_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_ih_hw_init(adev); + return cik_ih_hw_init(ip_block); } static bool cik_ih_is_idle(void *handle) @@ -362,11 +356,11 @@ static bool cik_ih_is_idle(void *handle) return true; } -static int cik_ih_wait_for_idle(void *handle) +static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -378,9 +372,9 @@ static int cik_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cik_ih_soft_reset(void *handle) +static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -423,7 +417,6 @@ static int cik_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_ih_ip_funcs = { .name = "cik_ih", .early_init = cik_ih_early_init, - .late_init = NULL, .sw_init = cik_ih_sw_init, .sw_fini = cik_ih_sw_fini, .hw_init = cik_ih_hw_init, @@ -435,8 +428,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = { .soft_reset = cik_ih_soft_reset, .set_clockgating_state = cik_ih_set_clockgating_state, .set_powergating_state = cik_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cik_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 952737de9411..ede1a028d48d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -54,7 +54,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); -static int cik_sdma_soft_reset(void *handle); +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); @@ -918,9 +918,9 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, } } -static int cik_sdma_early_init(void *handle) +static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; @@ -937,10 +937,10 @@ static int cik_sdma_early_init(void *handle) return 0; } -static int cik_sdma_sw_init(void *handle) +static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, i; /* SDMA trap event */ @@ -977,9 +977,9 @@ static int cik_sdma_sw_init(void *handle) return r; } -static int cik_sdma_sw_fini(void *handle) +static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -989,10 +989,10 @@ static int cik_sdma_sw_fini(void *handle) return 0; } -static int cik_sdma_hw_init(void *handle) +static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = cik_sdma_start(adev); if (r) @@ -1001,9 +1001,9 @@ static int cik_sdma_hw_init(void *handle) return r; } -static int cik_sdma_hw_fini(void *handle) +static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); @@ -1011,20 +1011,16 @@ static int cik_sdma_hw_fini(void *handle) return 0; } -static int cik_sdma_suspend(void *handle) +static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_sdma_hw_fini(adev); + return cik_sdma_hw_fini(ip_block); } -static int cik_sdma_resume(void *handle) +static int cik_sdma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_sdma_soft_reset(handle); + cik_sdma_soft_reset(ip_block); - return cik_sdma_hw_init(adev); + return cik_sdma_hw_init(ip_block); } static bool cik_sdma_is_idle(void *handle) @@ -1039,11 +1035,11 @@ static bool cik_sdma_is_idle(void *handle) return true; } -static int cik_sdma_wait_for_idle(void *handle) +static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1056,10 +1052,10 @@ static int cik_sdma_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cik_sdma_soft_reset(void *handle) +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; /* sdma0 */ @@ -1217,7 +1213,6 @@ static int cik_sdma_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_sdma_ip_funcs = { .name = "cik_sdma", .early_init = cik_sdma_early_init, - .late_init = NULL, .sw_init = cik_sdma_sw_init, .sw_fini = cik_sdma_sw_fini, .hw_init = cik_sdma_hw_init, @@ -1229,8 +1224,6 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = { .soft_reset = cik_sdma_soft_reset, .set_clockgating_state = cik_sdma_set_clockgating_state, .set_powergating_state = cik_sdma_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 072643787384..d72973bd570d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -274,9 +274,9 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int cz_ih_early_init(void *handle) +static int cz_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -288,10 +288,10 @@ static int cz_ih_early_init(void *handle) return 0; } -static int cz_ih_sw_init(void *handle) +static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -302,9 +302,9 @@ static int cz_ih_sw_init(void *handle) return r; } -static int cz_ih_sw_fini(void *handle) +static int cz_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -312,10 +312,10 @@ static int cz_ih_sw_fini(void *handle) return 0; } -static int cz_ih_hw_init(void *handle) +static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = cz_ih_irq_init(adev); if (r) @@ -324,27 +324,21 @@ static int cz_ih_hw_init(void *handle) return 0; } -static int cz_ih_hw_fini(void *handle) +static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cz_ih_irq_disable(adev); + cz_ih_irq_disable(ip_block->adev); return 0; } -static int cz_ih_suspend(void *handle) +static int cz_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cz_ih_hw_fini(adev); + return cz_ih_hw_fini(ip_block); } -static int cz_ih_resume(void *handle) +static int cz_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cz_ih_hw_init(adev); + return cz_ih_hw_init(ip_block); } static bool cz_ih_is_idle(void *handle) @@ -358,11 +352,11 @@ static bool cz_ih_is_idle(void *handle) return true; } -static int cz_ih_wait_for_idle(void *handle) +static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -374,10 +368,10 @@ static int cz_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cz_ih_soft_reset(void *handle) +static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -421,7 +415,6 @@ static int cz_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs cz_ih_ip_funcs = { .name = "cz_ih", .early_init = cz_ih_early_init, - .late_init = NULL, .sw_init = cz_ih_sw_init, .sw_fini = cz_ih_sw_fini, .hw_init = cz_ih_hw_init, @@ -433,8 +426,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = { .soft_reset = cz_ih_soft_reset, .set_clockgating_state = cz_ih_set_clockgating_state, .set_powergating_state = cz_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cz_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 70c1399f738d..5098c50d54c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2738,9 +2738,9 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v10_0_early_init(void *handle) +static int dce_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; @@ -2765,10 +2765,10 @@ static int dce_v10_0_early_init(void *handle) return 0; } -static int dce_v10_0_sw_init(void *handle) +static int dce_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2844,9 +2844,9 @@ static int dce_v10_0_sw_init(void *handle) return 0; } -static int dce_v10_0_sw_fini(void *handle) +static int dce_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2862,10 +2862,10 @@ static int dce_v10_0_sw_fini(void *handle) return 0; } -static int dce_v10_0_hw_init(void *handle) +static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v10_0_init_golden_registers(adev); @@ -2887,10 +2887,10 @@ static int dce_v10_0_hw_init(void *handle) return 0; } -static int dce_v10_0_hw_fini(void *handle) +static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v10_0_hpd_fini(adev); @@ -2905,9 +2905,9 @@ static int dce_v10_0_hw_fini(void *handle) return 0; } -static int dce_v10_0_suspend(void *handle) +static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2917,18 +2917,18 @@ static int dce_v10_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v10_0_hw_fini(handle); + return dce_v10_0_hw_fini(ip_block); } -static int dce_v10_0_resume(void *handle) +static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v10_0_hw_init(handle); + ret = dce_v10_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2948,22 +2948,17 @@ static bool dce_v10_0_is_idle(void *handle) return true; } -static int dce_v10_0_wait_for_idle(void *handle) +static bool dce_v10_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static bool dce_v10_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return dce_v10_0_is_display_hung(adev); } -static int dce_v10_0_soft_reset(void *handle) +static int dce_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3322,7 +3317,6 @@ static int dce_v10_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .name = "dce_v10_0", .early_init = dce_v10_0_early_init, - .late_init = NULL, .sw_init = dce_v10_0_sw_init, .sw_fini = dce_v10_0_sw_fini, .hw_init = dce_v10_0_hw_init, @@ -3330,13 +3324,10 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .suspend = dce_v10_0_suspend, .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, - .wait_for_idle = dce_v10_0_wait_for_idle, .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f154c24499c8..c5680ff4ab9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2851,9 +2851,9 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v11_0_early_init(void *handle) +static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; @@ -2891,10 +2891,10 @@ static int dce_v11_0_early_init(void *handle) return 0; } -static int dce_v11_0_sw_init(void *handle) +static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2971,9 +2971,9 @@ static int dce_v11_0_sw_init(void *handle) return 0; } -static int dce_v11_0_sw_fini(void *handle) +static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2989,10 +2989,10 @@ static int dce_v11_0_sw_fini(void *handle) return 0; } -static int dce_v11_0_hw_init(void *handle) +static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v11_0_init_golden_registers(adev); @@ -3025,10 +3025,10 @@ static int dce_v11_0_hw_init(void *handle) return 0; } -static int dce_v11_0_hw_fini(void *handle) +static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v11_0_hpd_fini(adev); @@ -3043,9 +3043,9 @@ static int dce_v11_0_hw_fini(void *handle) return 0; } -static int dce_v11_0_suspend(void *handle) +static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -3055,18 +3055,18 @@ static int dce_v11_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v11_0_hw_fini(handle); + return dce_v11_0_hw_fini(ip_block); } -static int dce_v11_0_resume(void *handle) +static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v11_0_hw_init(handle); + ret = dce_v11_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -3086,15 +3086,10 @@ static bool dce_v11_0_is_idle(void *handle) return true; } -static int dce_v11_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v11_0_soft_reset(void *handle) +static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v11_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3454,7 +3449,6 @@ static int dce_v11_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .name = "dce_v11_0", .early_init = dce_v11_0_early_init, - .late_init = NULL, .sw_init = dce_v11_0_sw_init, .sw_fini = dce_v11_0_sw_fini, .hw_init = dce_v11_0_hw_init, @@ -3462,12 +3456,9 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .suspend = dce_v11_0_suspend, .resume = dce_v11_0_resume, .is_idle = dce_v11_0_is_idle, - .wait_for_idle = dce_v11_0_wait_for_idle, .soft_reset = dce_v11_0_soft_reset, .set_clockgating_state = dce_v11_0_set_clockgating_state, .set_powergating_state = dce_v11_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index a7fcb135827f..eb7de9122d99 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2633,9 +2633,9 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v6_0_early_init(void *handle) +static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; @@ -2664,11 +2664,11 @@ static int dce_v6_0_early_init(void *handle) return 0; } -static int dce_v6_0_sw_init(void *handle) +static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; bool ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2743,9 +2743,9 @@ static int dce_v6_0_sw_init(void *handle) return r; } -static int dce_v6_0_sw_fini(void *handle) +static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2760,10 +2760,10 @@ static int dce_v6_0_sw_fini(void *handle) return 0; } -static int dce_v6_0_hw_init(void *handle) +static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* disable vga render */ dce_v6_0_set_vga_render_state(adev, false); @@ -2783,10 +2783,10 @@ static int dce_v6_0_hw_init(void *handle) return 0; } -static int dce_v6_0_hw_fini(void *handle) +static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v6_0_hpd_fini(adev); @@ -2801,9 +2801,9 @@ static int dce_v6_0_hw_fini(void *handle) return 0; } -static int dce_v6_0_suspend(void *handle) +static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2812,18 +2812,18 @@ static int dce_v6_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v6_0_hw_fini(handle); + return dce_v6_0_hw_fini(ip_block); } -static int dce_v6_0_resume(void *handle) +static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v6_0_hw_init(handle); + ret = dce_v6_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2843,12 +2843,7 @@ static bool dce_v6_0_is_idle(void *handle) return true; } -static int dce_v6_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v6_0_soft_reset(void *handle) +static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n"); return 0; @@ -3144,7 +3139,6 @@ static int dce_v6_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .name = "dce_v6_0", .early_init = dce_v6_0_early_init, - .late_init = NULL, .sw_init = dce_v6_0_sw_init, .sw_fini = dce_v6_0_sw_fini, .hw_init = dce_v6_0_hw_init, @@ -3152,12 +3146,9 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .suspend = dce_v6_0_suspend, .resume = dce_v6_0_resume, .is_idle = dce_v6_0_is_idle, - .wait_for_idle = dce_v6_0_wait_for_idle, .soft_reset = dce_v6_0_soft_reset, .set_clockgating_state = dce_v6_0_set_clockgating_state, .set_powergating_state = dce_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 77ac3f114d24..04b79ff87f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2644,9 +2644,9 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v8_0_early_init(void *handle) +static int dce_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; @@ -2680,10 +2680,10 @@ static int dce_v8_0_early_init(void *handle) return 0; } -static int dce_v8_0_sw_init(void *handle) +static int dce_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2764,9 +2764,9 @@ static int dce_v8_0_sw_init(void *handle) return 0; } -static int dce_v8_0_sw_fini(void *handle) +static int dce_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2782,10 +2782,10 @@ static int dce_v8_0_sw_fini(void *handle) return 0; } -static int dce_v8_0_hw_init(void *handle) +static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* disable vga render */ dce_v8_0_set_vga_render_state(adev, false); @@ -2805,10 +2805,10 @@ static int dce_v8_0_hw_init(void *handle) return 0; } -static int dce_v8_0_hw_fini(void *handle) +static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v8_0_hpd_fini(adev); @@ -2823,9 +2823,9 @@ static int dce_v8_0_hw_fini(void *handle) return 0; } -static int dce_v8_0_suspend(void *handle) +static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2835,18 +2835,18 @@ static int dce_v8_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v8_0_hw_fini(handle); + return dce_v8_0_hw_fini(ip_block); } -static int dce_v8_0_resume(void *handle) +static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v8_0_hw_init(handle); + ret = dce_v8_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2866,15 +2866,10 @@ static bool dce_v8_0_is_idle(void *handle) return true; } -static int dce_v8_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v8_0_soft_reset(void *handle) +static int dce_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v8_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3232,7 +3227,6 @@ static int dce_v8_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .name = "dce_v8_0", .early_init = dce_v8_0_early_init, - .late_init = NULL, .sw_init = dce_v8_0_sw_init, .sw_fini = dce_v8_0_sw_fini, .hw_init = dce_v8_0_hw_init, @@ -3240,12 +3234,9 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .suspend = dce_v8_0_suspend, .resume = dce_v8_0_resume, .is_idle = dce_v8_0_is_idle, - .wait_for_idle = dce_v8_0_wait_for_idle, .soft_reset = dce_v8_0_soft_reset, .set_clockgating_state = dce_v8_0_set_clockgating_state, .set_powergating_state = dce_v8_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 45ed97038df0..24dce803a829 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3677,13 +3677,19 @@ static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -4683,11 +4689,11 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v10_0_sw_init(void *handle) +static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): @@ -4726,6 +4732,11 @@ static int gfx_v10_0_sw_init(void *handle) adev->gfx.mec.num_queue_per_pipe = 8; break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } /* KIQ event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, @@ -4814,6 +4825,11 @@ static int gfx_v10_0_sw_init(void *handle) } } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { @@ -4842,6 +4858,10 @@ static int gfx_v10_0_sw_init(void *handle) gfx_v10_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -4866,10 +4886,10 @@ static void gfx_v10_0_me_fini(struct amdgpu_device *adev) (void **)&adev->gfx.me.me_fw_ptr); } -static int gfx_v10_0_sw_fini(void *handle) +static int gfx_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4881,6 +4901,8 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev, 0); + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v10_0_pfp_fini(adev); gfx_v10_0_ce_fini(adev); gfx_v10_0_me_fini(adev); @@ -4891,6 +4913,7 @@ static int gfx_v10_0_sw_fini(void *handle) gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev); gfx_v10_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -6374,7 +6397,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -6412,7 +6435,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ + /* Set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -7366,14 +7389,17 @@ static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG, data); } -static int gfx_v10_0_hw_init(void *handle) +static int gfx_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev); + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { /** * For gfx 10, rlc firmware loading relies on smu firmware is @@ -7418,9 +7444,9 @@ static int gfx_v10_0_hw_init(void *handle) return r; } -static int gfx_v10_0_hw_fini(void *handle) +static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -7431,7 +7457,7 @@ static int gfx_v10_0_hw_fini(void *handle) * otherwise the gfxoff disallowing will be failed to set. */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1)) - gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE); + gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { @@ -7456,14 +7482,14 @@ static int gfx_v10_0_hw_fini(void *handle) return 0; } -static int gfx_v10_0_suspend(void *handle) +static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v10_0_hw_fini(handle); + return gfx_v10_0_hw_fini(ip_block); } -static int gfx_v10_0_resume(void *handle) +static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v10_0_hw_init(handle); + return gfx_v10_0_hw_init(ip_block); } static bool gfx_v10_0_is_idle(void *handle) @@ -7477,11 +7503,11 @@ static bool gfx_v10_0_is_idle(void *handle) return true; } -static int gfx_v10_0_wait_for_idle(void *handle) +static int gfx_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -7495,11 +7521,11 @@ static int gfx_v10_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v10_0_soft_reset(void *handle) +static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); @@ -7678,9 +7704,9 @@ static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v10_0_early_init(void *handle) +static int gfx_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v10_0_gfx_funcs; @@ -7722,9 +7748,9 @@ static int gfx_v10_0_early_init(void *handle) return gfx_v10_0_init_microcode(adev); } -static int gfx_v10_0_late_init(void *handle) +static int gfx_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -9402,8 +9428,6 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -9414,8 +9438,7 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) @@ -9568,9 +9591,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, return amdgpu_ring_test_ring(ring); } -static void gfx_v10_ip_print(void *handle, struct drm_printer *p) +static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); @@ -9632,9 +9655,9 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v10_ip_dump(void *handle) +static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); @@ -9699,6 +9722,13 @@ static void gfx_v10_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { .name = "gfx_v10_0", .early_init = gfx_v10_0_early_init, @@ -9749,7 +9779,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2 + /* SWITCH_BUFFER */ - 8, /* gfx_v10_0_emit_mem_sync */ + 8 + /* gfx_v10_0_emit_mem_sync */ + 2, /* gfx_v10_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -9772,6 +9803,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kgq, + .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -9791,7 +9825,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v10_0_emit_mem_sync */ + 8 + /* gfx_v10_0_emit_mem_sync */ + 2, /* gfx_v10_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -9809,6 +9844,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kcq, + .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d3e8be82a172..2ae058a224f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -46,6 +46,7 @@ #include "clearstate_gfx11.h" #include "v11_structs.h" #include "gfx_v11_0.h" +#include "gfx_v11_0_cleaner_shader.h" #include "gfx_v11_0_3.h" #include "nbio_v4_3.h" #include "mes_v11_0.h" @@ -293,14 +294,20 @@ static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -483,8 +490,6 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -495,8 +500,7 @@ static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) @@ -1536,11 +1540,11 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v11_0_sw_init(void *handle) +static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): @@ -1575,6 +1579,29 @@ static int gfx_v11_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 2280 && + adev->gfx.pfp_fw_version >= 2370 && + adev->gfx.mec_fw_version >= 2450 && + adev->mes.fw_version[0] >= 99) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) @@ -1666,6 +1693,24 @@ static int gfx_v11_0_sw_init(void *handle) } } + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + if ((adev->gfx.me_fw_version >= 2280) && + (adev->gfx.mec_fw_version >= 2410)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } + break; + default: + break; + } + if (!adev->enable_mes_kiq) { r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); if (r) { @@ -1700,6 +1745,10 @@ static int gfx_v11_0_sw_init(void *handle) gfx_v11_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -1732,10 +1781,10 @@ static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.rlc_autoload_ptr); } -static int gfx_v11_0_sw_fini(void *handle) +static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -1749,6 +1798,8 @@ static int gfx_v11_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev, 0); } + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v11_0_pfp_fini(adev); gfx_v11_0_me_fini(adev); gfx_v11_0_rlc_fini(adev); @@ -1759,6 +1810,8 @@ static int gfx_v11_0_sw_fini(void *handle) gfx_v11_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); kfree(adev->gfx.ip_dump_gfx_queues); @@ -1893,8 +1946,10 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); @@ -3555,7 +3610,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -3593,7 +3648,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ + /* Set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -4568,10 +4623,13 @@ static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); } -static int gfx_v11_0_hw_init(void *handle) +static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { if (adev->gfx.imu.funcs) { @@ -4665,9 +4723,9 @@ static int gfx_v11_0_hw_init(void *handle) return r; } -static int gfx_v11_0_hw_fini(void *handle) +static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4703,14 +4761,14 @@ static int gfx_v11_0_hw_fini(void *handle) return 0; } -static int gfx_v11_0_suspend(void *handle) +static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v11_0_hw_fini(handle); + return gfx_v11_0_hw_fini(ip_block); } -static int gfx_v11_0_resume(void *handle) +static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v11_0_hw_init(handle); + return gfx_v11_0_hw_init(ip_block); } static bool gfx_v11_0_is_idle(void *handle) @@ -4724,11 +4782,11 @@ static bool gfx_v11_0_is_idle(void *handle) return true; } -static int gfx_v11_0_wait_for_idle(void *handle) +static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4774,12 +4832,12 @@ int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, return 0; } -static int gfx_v11_0_soft_reset(void *handle) +static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; int r, i, j, k; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); @@ -4905,10 +4963,10 @@ static int gfx_v11_0_soft_reset(void *handle) return gfx_v11_0_cp_resume(adev); } -static bool gfx_v11_0_check_soft_reset(void *handle) +static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; long tmo = msecs_to_jiffies(1000); @@ -4929,12 +4987,13 @@ static bool gfx_v11_0_check_soft_reset(void *handle) return false; } -static int gfx_v11_0_post_soft_reset(void *handle) +static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; /** * GFX soft reset will impact MES, need resume MES when do GFX soft reset */ - return amdgpu_mes_resume((struct amdgpu_device *)handle); + return amdgpu_mes_resume(adev); } static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) @@ -4995,9 +5054,9 @@ static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v11_0_early_init(void *handle) +static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v11_0_gfx_funcs; @@ -5018,9 +5077,9 @@ static int gfx_v11_0_early_init(void *handle) return gfx_v11_0_init_microcode(adev); } -static int gfx_v11_0_late_init(void *handle) +static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -6639,9 +6698,9 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return amdgpu_ring_test_ring(ring); } -static void gfx_v11_ip_print(void *handle, struct drm_printer *p) +static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); @@ -6703,9 +6762,9 @@ static void gfx_v11_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v11_ip_dump(void *handle) +static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); @@ -6769,6 +6828,13 @@ static void gfx_v11_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { .name = "gfx_v11_0", .early_init = gfx_v11_0_early_init, @@ -6818,7 +6884,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 22 + /* SET_Q_PREEMPTION_MODE */ 8 + 8 + /* FENCE x2 */ - 8, /* gfx_v11_0_emit_mem_sync */ + 8 + /* gfx_v11_0_emit_mem_sync */ + 2, /* gfx_v11_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ .emit_ib = gfx_v11_0_ring_emit_ib_gfx, .emit_fence = gfx_v11_0_ring_emit_fence, @@ -6841,6 +6908,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kgq, + .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { @@ -6861,7 +6931,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v11_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v11_0_emit_mem_sync */ + 8 + /* gfx_v11_0_emit_mem_sync */ + 2, /* gfx_v11_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ .emit_ib = gfx_v11_0_ring_emit_ib_compute, .emit_fence = gfx_v11_0_ring_emit_fence, @@ -6879,6 +6950,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kcq, + .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm new file mode 100644 index 000000000000..9b90b66368c7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// Navi3 : Clear SGPRs, VGPRs and LDS +// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot +// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD +// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS) +// It takes 2 workgroups to use all of LDS: one on each CU of the WGP +// Each wave clears SGPRs 0 - 107 +// Each wave clears VGPRs 0 - 63 +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. + +shader main + asic(GFX11) + type(CS) + wave_size(32) +// Note: original source code from SQ team + +// Takes about 2500 clocks to run. +// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks) +// + S_BARRIER + + // + // CLEAR VGPRs + // + s_mov_b32 m0, 0x00000058 // Loop 96/8=12 times (loop unrolled for performance) + +label_0005: + v_movreld_b32 v0, 0 + v_movreld_b32 v1, 0 + v_movreld_b32 v2, 0 + v_movreld_b32 v3, 0 + v_movreld_b32 v4, 0 + v_movreld_b32 v5, 0 + v_movreld_b32 v6, 0 + v_movreld_b32 v7, 0 + s_sub_u32 m0, m0, 8 + s_cbranch_scc0 label_0005 + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iterations + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_0023: + s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance) +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc + s_mov_b64 vcc, 0 //clear vcc + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 + + s_endpgm + +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h new file mode 100644 index 000000000000..3218cc04f543 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* Define the cleaner shader gfx_11_0_3 */ +static const u32 gfx_11_0_3_cleaner_shader_hex[] = { + 0xb0804006, 0xbe8200ff, + 0x00000058, 0xbefd0080, + 0x7e008480, 0x7e028480, + 0x7e048480, 0x7e068480, + 0x7e088480, 0x7e0a8480, + 0x7e0c8480, 0x7e0e8480, + 0xbefd0002, 0x80828802, + 0xbfa1fff5, 0xbe8200ff, + 0x80000000, 0x8b020002, + 0xbfa10012, 0xbefe00c1, + 0xbeff00c1, 0xd71f0001, + 0x0001007f, 0xd7200001, + 0x0002027e, 0x16020288, + 0xbe8200bf, 0xbefd00c1, + 0xd9382000, 0x00020201, + 0xd9386040, 0x00040401, + 0xd7006a01, 0x000202ff, + 0x00000400, 0x80828102, + 0xbfa1fff7, 0xbefd00ff, + 0x00000068, 0xbe804280, + 0xbe814280, 0xbe824280, + 0xbe834280, 0x80fd847d, + 0xbfa1fffa, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbfb00000, 0xbf9f0000, + 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0xbf9f0000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 47b47d21f464..fe7c48f2fb2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1319,12 +1319,12 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v12_0_sw_init(void *handle) +static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; unsigned num_compute_rings; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): @@ -1346,6 +1346,12 @@ static int gfx_v12_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + /* recalculate compute rings to use based on hardware configuration */ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe) / 2; @@ -1431,6 +1437,12 @@ static int gfx_v12_0_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!adev->enable_mes_kiq) { r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); if (r) { @@ -1460,6 +1472,10 @@ static int gfx_v12_0_sw_init(void *handle) gfx_v12_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -1492,10 +1508,10 @@ static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.rlc_autoload_ptr); } -static int gfx_v12_0_sw_fini(void *handle) +static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -1519,6 +1535,8 @@ static int gfx_v12_0_sw_fini(void *handle) gfx_v12_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); kfree(adev->gfx.ip_dump_gfx_queues); @@ -2601,7 +2619,7 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -3513,10 +3531,10 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) } } -static int gfx_v12_0_hw_init(void *handle) +static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { @@ -3603,9 +3621,9 @@ static int gfx_v12_0_hw_init(void *handle) return r; } -static int gfx_v12_0_hw_fini(void *handle) +static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t tmp; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -3643,14 +3661,14 @@ static int gfx_v12_0_hw_fini(void *handle) return 0; } -static int gfx_v12_0_suspend(void *handle) +static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v12_0_hw_fini(handle); + return gfx_v12_0_hw_fini(ip_block); } -static int gfx_v12_0_resume(void *handle) +static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v12_0_hw_init(handle); + return gfx_v12_0_hw_init(ip_block); } static bool gfx_v12_0_is_idle(void *handle) @@ -3664,11 +3682,11 @@ static bool gfx_v12_0_is_idle(void *handle) return true; } -static int gfx_v12_0_wait_for_idle(void *handle) +static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -3695,9 +3713,9 @@ static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) return clock; } -static int gfx_v12_0_early_init(void *handle) +static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v12_0_gfx_funcs; @@ -3717,9 +3735,9 @@ static int gfx_v12_0_early_init(void *handle) return gfx_v12_0_init_microcode(adev); } -static int gfx_v12_0_late_init(void *handle) +static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -5022,8 +5040,6 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -5034,13 +5050,19 @@ static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } -static void gfx_v12_ip_print(void *handle, struct drm_printer *p) +static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + +static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) +{ + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); @@ -5102,9 +5124,9 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v12_ip_dump(void *handle) +static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); @@ -5297,7 +5319,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ - 8, /* gfx_v12_0_emit_mem_sync */ + 8 + /* gfx_v12_0_emit_mem_sync */ + 2, /* gfx_v12_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ .emit_ib = gfx_v12_0_ring_emit_ib_gfx, .emit_fence = gfx_v12_0_ring_emit_fence, @@ -5318,6 +5341,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kgq, + .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { @@ -5336,7 +5362,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v12_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v12_0_emit_mem_sync */ + 8 + /* gfx_v12_0_emit_mem_sync */ + 2, /* gfx_v12_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ .emit_ib = gfx_v12_0_ring_emit_ib_compute, .emit_fence = gfx_v12_0_ring_emit_fence, @@ -5353,6 +5380,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kcq, + .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 564f0b9336b6..41f50bf380c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3023,9 +3023,9 @@ static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { .start = gfx_v6_0_rlc_start }; -static int gfx_v6_0_early_init(void *handle) +static int gfx_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; @@ -3039,10 +3039,10 @@ static int gfx_v6_0_early_init(void *handle) return 0; } -static int gfx_v6_0_sw_init(void *handle) +static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); @@ -3107,10 +3107,10 @@ static int gfx_v6_0_sw_init(void *handle) return r; } -static int gfx_v6_0_sw_fini(void *handle) +static int gfx_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -3122,10 +3122,10 @@ static int gfx_v6_0_sw_fini(void *handle) return 0; } -static int gfx_v6_0_hw_init(void *handle) +static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v6_0_constants_init(adev); @@ -3142,9 +3142,9 @@ static int gfx_v6_0_hw_init(void *handle) return r; } -static int gfx_v6_0_hw_fini(void *handle) +static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v6_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev); @@ -3153,18 +3153,14 @@ static int gfx_v6_0_hw_fini(void *handle) return 0; } -static int gfx_v6_0_suspend(void *handle) +static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v6_0_hw_fini(adev); + return gfx_v6_0_hw_fini(ip_block); } -static int gfx_v6_0_resume(void *handle) +static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v6_0_hw_init(adev); + return gfx_v6_0_hw_init(ip_block); } static bool gfx_v6_0_is_idle(void *handle) @@ -3177,24 +3173,19 @@ static bool gfx_v6_0_is_idle(void *handle) return true; } -static int gfx_v6_0_wait_for_idle(void *handle) +static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v6_0_is_idle(handle)) + if (gfx_v6_0_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v6_0_soft_reset(void *handle) -{ - return 0; -} - static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -3444,7 +3435,6 @@ static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring) static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .name = "gfx_v6_0", .early_init = gfx_v6_0_early_init, - .late_init = NULL, .sw_init = gfx_v6_0_sw_init, .sw_fini = gfx_v6_0_sw_fini, .hw_init = gfx_v6_0_hw_init, @@ -3453,11 +3443,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .resume = gfx_v6_0_resume, .is_idle = gfx_v6_0_is_idle, .wait_for_idle = gfx_v6_0_wait_for_idle, - .soft_reset = gfx_v6_0_soft_reset, .set_clockgating_state = gfx_v6_0_set_clockgating_state, .set_powergating_state = gfx_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index f146806c4633..824d5913103b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2559,7 +2559,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -2876,7 +2876,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ wb_gpu_addr = ring->rptr_gpu_addr; mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_rptr_report_addr_hi = @@ -4134,9 +4134,9 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .update_spm_vmid = gfx_v7_0_update_spm_vmid }; -static int gfx_v7_0_early_init(void *handle) +static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; @@ -4151,9 +4151,9 @@ static int gfx_v7_0_early_init(void *handle) return 0; } -static int gfx_v7_0_late_init(void *handle) +static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4343,10 +4343,10 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, return 0; } -static int gfx_v7_0_sw_init(void *handle) +static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j, k, r, ring_id; switch (adev->asic_type) { @@ -4439,9 +4439,9 @@ static int gfx_v7_0_sw_init(void *handle) return r; } -static int gfx_v7_0_sw_fini(void *handle) +static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) @@ -4465,10 +4465,10 @@ static int gfx_v7_0_sw_fini(void *handle) return 0; } -static int gfx_v7_0_hw_init(void *handle) +static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v7_0_constants_init(adev); @@ -4486,9 +4486,9 @@ static int gfx_v7_0_hw_init(void *handle) return r; } -static int gfx_v7_0_hw_fini(void *handle) +static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4499,18 +4499,14 @@ static int gfx_v7_0_hw_fini(void *handle) return 0; } -static int gfx_v7_0_suspend(void *handle) +static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_fini(adev); + return gfx_v7_0_hw_fini(ip_block); } -static int gfx_v7_0_resume(void *handle) +static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_init(adev); + return gfx_v7_0_hw_init(ip_block); } static bool gfx_v7_0_is_idle(void *handle) @@ -4523,11 +4519,11 @@ static bool gfx_v7_0_is_idle(void *handle) return true; } -static int gfx_v7_0_wait_for_idle(void *handle) +static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4540,11 +4536,11 @@ static int gfx_v7_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v7_0_soft_reset(void *handle) +static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5009,8 +5005,6 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .soft_reset = gfx_v7_0_soft_reset, .set_clockgating_state = gfx_v7_0_set_clockgating_state, .set_powergating_state = gfx_v7_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bc8295812cc8..b7006c41e270 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1894,12 +1894,12 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, static void gfx_v8_0_sq_irq_work_func(struct work_struct *work); -static int gfx_v8_0_sw_init(void *handle) +static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (adev->asic_type) { case CHIP_TONGA: @@ -2037,9 +2037,9 @@ static int gfx_v8_0_sw_init(void *handle) return 0; } -static int gfx_v8_0_sw_fini(void *handle) +static int gfx_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) @@ -4260,7 +4260,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -4783,10 +4783,10 @@ static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v8_0_cp_compute_enable(adev, enable); } -static int gfx_v8_0_hw_init(void *handle) +static int gfx_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); @@ -4823,6 +4823,13 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); } + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); @@ -4865,13 +4872,13 @@ static int gfx_v8_0_wait_for_rlc_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_wait_for_idle(void *handle) +static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v8_0_is_idle(handle)) + if (gfx_v8_0_is_idle(adev)) return 0; udelay(1); @@ -4879,9 +4886,9 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_hw_fini(void *handle) +static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4897,8 +4904,9 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - if (!gfx_v8_0_wait_for_idle(adev)) + if (!gfx_v8_0_wait_for_idle(ip_block)) gfx_v8_0_cp_enable(adev, false); else pr_err("cp is busy, skip halt cp\n"); @@ -4911,19 +4919,19 @@ static int gfx_v8_0_hw_fini(void *handle) return 0; } -static int gfx_v8_0_suspend(void *handle) +static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v8_0_hw_fini(handle); + return gfx_v8_0_hw_fini(ip_block); } -static int gfx_v8_0_resume(void *handle) +static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v8_0_hw_init(handle); + return gfx_v8_0_hw_init(ip_block); } -static bool gfx_v8_0_check_soft_reset(void *handle) +static bool gfx_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; @@ -4983,9 +4991,9 @@ static bool gfx_v8_0_check_soft_reset(void *handle) } } -static int gfx_v8_0_pre_soft_reset(void *handle) +static int gfx_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && @@ -5024,9 +5032,9 @@ static int gfx_v8_0_pre_soft_reset(void *handle) return 0; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; @@ -5086,9 +5094,9 @@ static int gfx_v8_0_soft_reset(void *handle) return 0; } -static int gfx_v8_0_post_soft_reset(void *handle) +static int gfx_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && @@ -5254,9 +5262,9 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q }; -static int gfx_v8_0_early_init(void *handle) +static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; @@ -5271,9 +5279,9 @@ static int gfx_v8_0_early_init(void *handle) return 0; } -static int gfx_v8_0_late_init(void *handle) +static int gfx_v8_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -6947,8 +6955,6 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, .get_clockgating_state = gfx_v8_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 23f0573ae47b..0b6f09f2cc9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2198,12 +2198,12 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v9_0_sw_init(void *handle) +static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned int hw_prio; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { @@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle) } switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 2): + adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); + if (adev->gfx.mec_fw_version >= 88) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; default: adev->gfx.enable_cleaner_shader = false; break; @@ -2362,6 +2374,12 @@ static int gfx_v9_0_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); @@ -2390,7 +2408,7 @@ static int gfx_v9_0_sw_init(void *handle) gfx_v9_0_alloc_ip_dump(adev); - r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + r = amdgpu_gfx_sysfs_init(adev); if (r) return r; @@ -2398,10 +2416,10 @@ static int gfx_v9_0_sw_init(void *handle) } -static int gfx_v9_0_sw_fini(void *handle) +static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) { for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) @@ -2418,6 +2436,8 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev, 0); + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v9_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -2429,7 +2449,7 @@ static int gfx_v9_0_sw_fini(void *handle) } gfx_v9_0_free_microcode(adev); - amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -3184,6 +3204,15 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); @@ -3265,8 +3294,8 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) * confirmed that the APU gfx10/gfx11 needn't such update. */ if (adev->flags & AMD_IS_APU && - adev->in_s3 && !adev->suspend_complete) { - DRM_INFO(" Will skip the CSB packet resubmit\n"); + adev->in_s3 && !pm_resume_via_firmware()) { + DRM_INFO("Will skip the CSB packet resubmit\n"); return 0; } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); @@ -3346,7 +3375,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); @@ -3393,7 +3422,15 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, - (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); @@ -3914,6 +3951,10 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return r; } + if (adev->gfx.num_gfx_rings) + gfx_v9_0_cp_gfx_enable(adev, false); + gfx_v9_0_cp_compute_enable(adev, false); + r = gfx_v9_0_kiq_resume(adev); if (r) return r; @@ -3970,10 +4011,10 @@ static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v9_0_cp_compute_enable(adev, enable); } -static int gfx_v9_0_hw_init(void *handle) +static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, adev->gfx.cleaner_shader_ptr); @@ -3999,9 +4040,9 @@ static int gfx_v9_0_hw_init(void *handle) return r; } -static int gfx_v9_0_hw_fini(void *handle) +static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); @@ -4051,14 +4092,14 @@ static int gfx_v9_0_hw_fini(void *handle) return 0; } -static int gfx_v9_0_suspend(void *handle) +static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v9_0_hw_fini(handle); + return gfx_v9_0_hw_fini(ip_block); } -static int gfx_v9_0_resume(void *handle) +static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v9_0_hw_init(handle); + return gfx_v9_0_hw_init(ip_block); } static bool gfx_v9_0_is_idle(void *handle) @@ -4072,24 +4113,24 @@ static bool gfx_v9_0_is_idle(void *handle) return true; } -static int gfx_v9_0_wait_for_idle(void *handle) +static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v9_0_is_idle(handle)) + if (gfx_v9_0_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v9_0_soft_reset(void *handle) +static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); @@ -4745,9 +4786,9 @@ fail: return r; } -static int gfx_v9_0_early_init(void *handle) +static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v9_0_gfx_funcs; @@ -4771,9 +4812,9 @@ static int gfx_v9_0_early_init(void *handle) return gfx_v9_0_init_microcode(adev); } -static int gfx_v9_0_ecc_late_init(void *handle) +static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* @@ -4805,9 +4846,9 @@ static int gfx_v9_0_ecc_late_init(void *handle) return 0; } -static int gfx_v9_0_late_init(void *handle) +static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4822,7 +4863,7 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; - r = gfx_v9_0_ecc_late_init(handle); + r = gfx_v9_0_ecc_late_init(ip_block); if (r) return r; @@ -7167,8 +7208,6 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -7179,8 +7218,7 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) @@ -7237,10 +7275,6 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (!adev->debug_exp_resets && - !adev->gfx.num_gfx_rings) - return -EINVAL; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -7316,9 +7350,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, return amdgpu_ring_test_ring(ring); } -static void gfx_v9_ip_print(void *handle, struct drm_printer *p) +static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9); @@ -7356,9 +7390,9 @@ static void gfx_v9_ip_print(void *handle, struct drm_printer *p) } -static void gfx_v9_ip_dump(void *handle) +static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h index 36c0292b5110..0b6bd09b7529 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2024 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,3 +24,45 @@ static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = { /* Add the cleaner shader code here */ }; + +/* Define the cleaner shader gfx_9_4_2 */ +static const u32 gfx_9_4_2_cleaner_shader_hex[] = { + 0xbf068100, 0xbf84003b, + 0xbf8a0000, 0xb07c0000, + 0xbe8200ff, 0x00000078, + 0xbf110802, 0x7e000280, + 0x7e020280, 0x7e040280, + 0x7e060280, 0x7e080280, + 0x7e0a0280, 0x7e0c0280, + 0x7e0e0280, 0x80828802, + 0xbe803202, 0xbf84fff5, + 0xbf9c0000, 0xbe8200ff, + 0x80000000, 0x86020102, + 0xbf840011, 0xbefe00c1, + 0xbeff00c1, 0xd28c0001, + 0x0001007f, 0xd28d0001, + 0x0002027e, 0x10020288, + 0xbe8200bf, 0xbefc00c1, + 0xd89c2000, 0x00020201, + 0xd89c6040, 0x00040401, + 0x320202ff, 0x00000400, + 0x80828102, 0xbf84fff8, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbf810000, 0xbf8d0001, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea01ff, + 0x000000ee, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm new file mode 100644 index 000000000000..35b8cf9070bd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// MI200 : Clear SGPRs, VGPRs and LDS +// Uses two kernels launched separately: +// 1. Clean VGPRs, LDS, and lower SGPRs +// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU +// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD +// Waves in the workgroup share the 64KB of LDS +// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383 +// Each wave clears 128 VGPRs, so all 512 in the SIMD +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. +// 2. Clean remaining SGPRs +// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU +// Waves are allocating 96 SGPRs +// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223. +// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799 +// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER +// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command +// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799 + +shader main + asic(MI200) + type(CS) + wave_size(64) +// Note: original source code from SQ team + +// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks) + + s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3 + s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1) + S_BARRIER + + s_movk_i32 m0, 0x0000 + s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance) + // + // CLEAR VGPRs + // + s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing +label_0005: + v_mov_b32 v0, 0 + v_mov_b32 v1, 0 + v_mov_b32 v2, 0 + v_mov_b32 v3, 0 + v_mov_b32 v4, 0 + v_mov_b32 v5, 0 + v_mov_b32 v6, 0 + v_mov_b32 v7, 0 + s_sub_u32 s2, s2, 8 + s_set_gpr_idx_idx s2 + s_cbranch_scc0 label_0005 + s_set_gpr_idx_off + + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iterations + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_clean_sgpr_1: + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0 //clear vcc + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 +s_endpgm + +label_0023: + + s_sethalt 1 + + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop1: + + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop1 + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0xee //clear vcc + +s_endpgm +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c100845409f7..e2b3dda57030 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1049,10 +1049,10 @@ static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v9_4_3_sw_init(void *handle) +static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id, xcc_id, num_xcc; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): @@ -1157,6 +1157,19 @@ static int gfx_v9_4_3_sw_init(void *handle) return r; } + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + if (adev->gfx.mec_fw_version >= 155) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; + } + break; + default: + break; + } r = gfx_v9_4_3_gpu_early_init(adev); if (r) return r; @@ -1165,26 +1178,19 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; - - if (!amdgpu_sriov_vf(adev)) { - r = amdgpu_gfx_sysfs_init(adev); - if (r) - return r; - } - - gfx_v9_4_3_alloc_ip_dump(adev); - - r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + r = amdgpu_gfx_sysfs_init(adev); if (r) return r; + gfx_v9_4_3_alloc_ip_dump(adev); + return 0; } -static int gfx_v9_4_3_sw_fini(void *handle) +static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block) { int i, num_xcc; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) @@ -1201,9 +1207,7 @@ static int gfx_v9_4_3_sw_fini(void *handle) gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); gfx_v9_4_3_free_microcode(adev); - if (!amdgpu_sriov_vf(adev)) - amdgpu_gfx_sysfs_fini(adev); - amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -1247,8 +1251,10 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); @@ -2343,10 +2349,10 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } -static int gfx_v9_4_3_hw_init(void *handle) +static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, adev->gfx.cleaner_shader_ptr); @@ -2367,9 +2373,9 @@ static int gfx_v9_4_3_hw_init(void *handle) return r; } -static int gfx_v9_4_3_hw_fini(void *handle) +static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, num_xcc; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -2384,14 +2390,14 @@ static int gfx_v9_4_3_hw_fini(void *handle) return 0; } -static int gfx_v9_4_3_suspend(void *handle) +static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v9_4_3_hw_fini(handle); + return gfx_v9_4_3_hw_fini(ip_block); } -static int gfx_v9_4_3_resume(void *handle) +static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v9_4_3_hw_init(handle); + return gfx_v9_4_3_hw_init(ip_block); } static bool gfx_v9_4_3_is_idle(void *handle) @@ -2408,24 +2414,24 @@ static bool gfx_v9_4_3_is_idle(void *handle) return true; } -static int gfx_v9_4_3_wait_for_idle(void *handle) +static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v9_4_3_is_idle(handle)) + if (gfx_v9_4_3_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v9_4_3_soft_reset(void *handle) +static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); @@ -2509,9 +2515,9 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v9_4_3_early_init(void *handle) +static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); @@ -2527,9 +2533,9 @@ static int gfx_v9_4_3_early_init(void *handle) return gfx_v9_4_3_init_microcode(adev); } -static int gfx_v9_4_3_late_init(void *handle) +static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -3056,9 +3062,6 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint32_t value = 0; - if (!adev->debug_exp_resets) - return; - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); @@ -3574,9 +3577,6 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int r; - if (!adev->debug_exp_resets) - return -EINVAL; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -4567,8 +4567,6 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -4579,13 +4577,12 @@ static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_no amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } -static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) +static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k; uint32_t xcc_id, xcc_offset, inst_offset; uint32_t num_xcc, reg, num_inst; @@ -4643,9 +4640,9 @@ static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v9_4_3_ip_dump(void *handle) +static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k; uint32_t num_xcc, reg, num_inst; uint32_t xcc_id, xcc_offset, inst_offset; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 9784a2892185..697599c46240 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -175,7 +175,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); @@ -630,9 +633,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) } -static int gmc_v10_0_early_init(void *handle) +static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_set_mmhub_funcs(adev); gmc_v10_0_set_gfxhub_funcs(adev); @@ -651,9 +654,9 @@ static int gmc_v10_0_early_init(void *handle) return 0; } -static int gmc_v10_0_late_init(void *handle) +static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -769,10 +772,10 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v10_0_sw_init(void *handle) +static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfxhub.funcs->init(adev); @@ -920,9 +923,9 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v10_0_sw_fini(void *handle) +static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v10_0_gart_fini(adev); @@ -985,9 +988,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v10_0_hw_init(void *handle) +static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; @@ -1032,9 +1035,9 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v10_0_hw_fini(void *handle) +static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_gart_disable(adev); @@ -1053,25 +1056,22 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } -static int gmc_v10_0_suspend(void *handle) +static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v10_0_hw_fini(adev); + gmc_v10_0_hw_fini(ip_block); return 0; } -static int gmc_v10_0_resume(void *handle) +static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v10_0_hw_init(adev); + r = gmc_v10_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1082,17 +1082,12 @@ static bool gmc_v10_0_is_idle(void *handle) return true; } -static int gmc_v10_0_wait_for_idle(void *handle) +static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v10.*/ return 0; } -static int gmc_v10_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v10_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1154,7 +1149,6 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = { .resume = gmc_v10_0_resume, .is_idle = gmc_v10_0_is_idle, .wait_for_idle = gmc_v10_0_wait_for_idle, - .soft_reset = gmc_v10_0_soft_reset, .set_clockgating_state = gmc_v10_0_set_clockgating_state, .set_powergating_state = gmc_v10_0_set_powergating_state, .get_clockgating_state = gmc_v10_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 2797fd84432b..f893ab4c14df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -144,7 +144,10 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } @@ -601,9 +604,9 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) } } -static int gmc_v11_0_early_init(void *handle) +static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v11_0_set_gfxhub_funcs(adev); gmc_v11_0_set_mmhub_funcs(adev); @@ -622,9 +625,9 @@ static int gmc_v11_0_early_init(void *handle) return 0; } -static int gmc_v11_0_late_init(void *handle) +static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -729,10 +732,10 @@ static int gmc_v11_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v11_0_sw_init(void *handle) +static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->mmhub.funcs->init(adev); @@ -849,9 +852,9 @@ static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v11_0_sw_fini(void *handle) +static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v11_0_gart_fini(adev); @@ -908,9 +911,9 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v11_0_hw_init(void *handle) +static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; @@ -940,9 +943,9 @@ static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v11_0_hw_fini(void *handle) +static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -961,25 +964,22 @@ static int gmc_v11_0_hw_fini(void *handle) return 0; } -static int gmc_v11_0_suspend(void *handle) +static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v11_0_hw_fini(adev); + gmc_v11_0_hw_fini(ip_block); return 0; } -static int gmc_v11_0_resume(void *handle) +static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v11_0_hw_init(adev); + r = gmc_v11_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -990,17 +990,12 @@ static bool gmc_v11_0_is_idle(void *handle) return true; } -static int gmc_v11_0_wait_for_idle(void *handle) +static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v11.*/ return 0; } -static int gmc_v11_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v11_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1041,7 +1036,6 @@ const struct amd_ip_funcs gmc_v11_0_ip_funcs = { .resume = gmc_v11_0_resume, .is_idle = gmc_v11_0_is_idle, .wait_for_idle = gmc_v11_0_wait_for_idle, - .soft_reset = gmc_v11_0_soft_reset, .set_clockgating_state = gmc_v11_0_set_clockgating_state, .set_powergating_state = gmc_v11_0_set_powergating_state, .get_clockgating_state = gmc_v11_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index edcb5351f8cc..d22b027fd0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -137,7 +137,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } @@ -604,9 +607,9 @@ static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev) } } -static int gmc_v12_0_early_init(void *handle) +static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v12_0_set_gfxhub_funcs(adev); gmc_v12_0_set_mmhub_funcs(adev); @@ -624,9 +627,9 @@ static int gmc_v12_0_early_init(void *handle) return 0; } -static int gmc_v12_0_late_init(void *handle) +static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -731,10 +734,10 @@ static int gmc_v12_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v12_0_sw_init(void *handle) +static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->mmhub.funcs->init(adev); @@ -841,9 +844,9 @@ static void gmc_v12_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v12_0_sw_fini(void *handle) +static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v12_0_gart_fini(adev); @@ -894,10 +897,10 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v12_0_hw_init(void *handle) +static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* The sequence of these two function calls matters.*/ gmc_v12_0_init_golden_registers(adev); @@ -924,9 +927,9 @@ static void gmc_v12_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v12_0_hw_fini(void *handle) +static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -945,25 +948,22 @@ static int gmc_v12_0_hw_fini(void *handle) return 0; } -static int gmc_v12_0_suspend(void *handle) +static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v12_0_hw_fini(adev); + gmc_v12_0_hw_fini(ip_block); return 0; } -static int gmc_v12_0_resume(void *handle) +static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v12_0_hw_init(adev); + r = gmc_v12_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -974,17 +974,12 @@ static bool gmc_v12_0_is_idle(void *handle) return true; } -static int gmc_v12_0_wait_for_idle(void *handle) +static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v11.*/ return 0; } -static int gmc_v12_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v12_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1025,7 +1020,6 @@ const struct amd_ip_funcs gmc_v12_0_ip_funcs = { .resume = gmc_v12_0_resume, .is_idle = gmc_v12_0_is_idle, .wait_for_idle = gmc_v12_0_wait_for_idle, - .soft_reset = gmc_v12_0_soft_reset, .set_clockgating_state = gmc_v12_0_set_clockgating_state, .set_powergating_state = gmc_v12_0_set_powergating_state, .get_clockgating_state = gmc_v12_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d36725666b54..ca000b3d1afc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -43,7 +43,7 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v6_0_wait_for_idle(void *handle); +static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); @@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin"); static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; + struct amdgpu_ip_block *ip_block; - gmc_v6_0_wait_for_idle((void *)adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v6_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -213,6 +218,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { int i, j; + struct amdgpu_ip_block *ip_block; + /* Initialize HDP */ for (i = 0, j = 0; i < 32; i++, j += 0x6) { @@ -224,7 +231,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v6_0_wait_for_idle((void *)adev)) + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -251,7 +262,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v6_0_wait_for_idle((void *)adev)) + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } @@ -762,9 +773,9 @@ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v6_0_early_init(void *handle) +static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v6_0_set_gmc_funcs(adev); gmc_v6_0_set_irq_funcs(adev); @@ -772,9 +783,9 @@ static int gmc_v6_0_early_init(void *handle) return 0; } -static int gmc_v6_0_late_init(void *handle) +static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -799,10 +810,10 @@ static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static int gmc_v6_0_sw_init(void *handle) +static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -876,9 +887,9 @@ static int gmc_v6_0_sw_init(void *handle) return 0; } -static int gmc_v6_0_sw_fini(void *handle) +static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -889,10 +900,10 @@ static int gmc_v6_0_sw_fini(void *handle) return 0; } -static int gmc_v6_0_hw_init(void *handle) +static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v6_0_mc_program(adev); @@ -914,9 +925,9 @@ static int gmc_v6_0_hw_init(void *handle) return 0; } -static int gmc_v6_0_hw_fini(void *handle) +static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v6_0_gart_disable(adev); @@ -924,21 +935,19 @@ static int gmc_v6_0_hw_fini(void *handle) return 0; } -static int gmc_v6_0_suspend(void *handle) +static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v6_0_hw_fini(adev); + gmc_v6_0_hw_fini(ip_block); return 0; } -static int gmc_v6_0_resume(void *handle) +static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - r = gmc_v6_0_hw_init(adev); + r = gmc_v6_0_hw_init(ip_block); if (r) return r; @@ -950,6 +959,7 @@ static int gmc_v6_0_resume(void *handle) static bool gmc_v6_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | @@ -959,13 +969,13 @@ static bool gmc_v6_0_is_idle(void *handle) return true; } -static int gmc_v6_0_wait_for_idle(void *handle) +static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gmc_v6_0_is_idle(handle)) + if (gmc_v6_0_is_idle(adev)) return 0; udelay(1); } @@ -973,9 +983,10 @@ static int gmc_v6_0_wait_for_idle(void *handle) } -static int gmc_v6_0_soft_reset(void *handle) +static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -992,7 +1003,8 @@ static int gmc_v6_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v6_0_mc_stop(adev); - if (gmc_v6_0_wait_for_idle(adev)) + + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1109,8 +1121,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { .soft_reset = gmc_v6_0_soft_reset, .set_clockgating_state = gmc_v6_0_set_clockgating_state, .set_powergating_state = gmc_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 994432fb57ea..07f45f1a503a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -52,7 +52,7 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v7_0_wait_for_idle(void *handle); +static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); @@ -921,9 +921,9 @@ static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v7_0_early_init(void *handle) +static int gmc_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v7_0_set_gmc_funcs(adev); gmc_v7_0_set_irq_funcs(adev); @@ -940,9 +940,9 @@ static int gmc_v7_0_early_init(void *handle) return 0; } -static int gmc_v7_0_late_init(void *handle) +static int gmc_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -968,10 +968,10 @@ static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static int gmc_v7_0_sw_init(void *handle) +static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -1060,9 +1060,9 @@ static int gmc_v7_0_sw_init(void *handle) return 0; } -static int gmc_v7_0_sw_fini(void *handle) +static int gmc_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1074,10 +1074,10 @@ static int gmc_v7_0_sw_fini(void *handle) return 0; } -static int gmc_v7_0_hw_init(void *handle) +static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v7_0_init_golden_registers(adev); @@ -1101,9 +1101,9 @@ static int gmc_v7_0_hw_init(void *handle) return 0; } -static int gmc_v7_0_hw_fini(void *handle) +static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v7_0_gart_disable(adev); @@ -1111,25 +1111,22 @@ static int gmc_v7_0_hw_fini(void *handle) return 0; } -static int gmc_v7_0_suspend(void *handle) +static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v7_0_hw_fini(adev); + gmc_v7_0_hw_fini(ip_block); return 0; } -static int gmc_v7_0_resume(void *handle) +static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v7_0_hw_init(adev); + r = gmc_v7_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1146,11 +1143,11 @@ static bool gmc_v7_0_is_idle(void *handle) return true; } -static int gmc_v7_0_wait_for_idle(void *handle) +static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1167,9 +1164,9 @@ static int gmc_v7_0_wait_for_idle(void *handle) } -static int gmc_v7_0_soft_reset(void *handle) +static int gmc_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1351,8 +1348,6 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .soft_reset = gmc_v7_0_soft_reset, .set_clockgating_state = gmc_v7_0_set_clockgating_state, .set_powergating_state = gmc_v7_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 86488c052f82..12d5967ecd45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -53,7 +53,7 @@ static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v8_0_wait_for_idle(void *handle); +static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); @@ -170,8 +170,13 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; + struct amdgpu_ip_block *ip_block; - gmc_v8_0_wait_for_idle(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v8_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -426,6 +431,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; u32 tmp; int i, j; @@ -439,7 +445,11 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v8_0_wait_for_idle((void *)adev)) + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -474,7 +484,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v8_0_wait_for_idle((void *)adev)) + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -1027,9 +1037,9 @@ static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v8_0_early_init(void *handle) +static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v8_0_set_gmc_funcs(adev); gmc_v8_0_set_irq_funcs(adev); @@ -1046,9 +1056,9 @@ static int gmc_v8_0_early_init(void *handle) return 0; } -static int gmc_v8_0_late_init(void *handle) +static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -1076,10 +1086,10 @@ static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) #define mmMC_SEQ_MISC0_FIJI 0xA71 -static int gmc_v8_0_sw_init(void *handle) +static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -1173,9 +1183,9 @@ static int gmc_v8_0_sw_init(void *handle) return 0; } -static int gmc_v8_0_sw_fini(void *handle) +static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1187,10 +1197,10 @@ static int gmc_v8_0_sw_fini(void *handle) return 0; } -static int gmc_v8_0_hw_init(void *handle) +static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v8_0_init_golden_registers(adev); @@ -1222,9 +1232,9 @@ static int gmc_v8_0_hw_init(void *handle) return 0; } -static int gmc_v8_0_hw_fini(void *handle) +static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v8_0_gart_disable(adev); @@ -1232,25 +1242,22 @@ static int gmc_v8_0_hw_fini(void *handle) return 0; } -static int gmc_v8_0_suspend(void *handle) +static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v8_0_hw_fini(adev); + gmc_v8_0_hw_fini(ip_block); return 0; } -static int gmc_v8_0_resume(void *handle) +static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v8_0_hw_init(adev); + r = gmc_v8_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1267,11 +1274,11 @@ static bool gmc_v8_0_is_idle(void *handle) return true; } -static int gmc_v8_0_wait_for_idle(void *handle) +static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1289,10 +1296,10 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static bool gmc_v8_0_check_soft_reset(void *handle) +static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__VMC_BUSY_MASK) @@ -1316,23 +1323,23 @@ static bool gmc_v8_0_check_soft_reset(void *handle) return false; } -static int gmc_v8_0_pre_soft_reset(void *handle) +static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->gmc.srbm_soft_reset) return 0; gmc_v8_0_mc_stop(adev); - if (gmc_v8_0_wait_for_idle(adev)) + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); return 0; } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->gmc.srbm_soft_reset) @@ -1361,9 +1368,9 @@ static int gmc_v8_0_soft_reset(void *handle) return 0; } -static int gmc_v8_0_post_soft_reset(void *handle) +static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->gmc.srbm_soft_reset) return 0; @@ -1715,8 +1722,6 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, .get_clockgating_state = gmc_v8_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7a45f3fdc734..50c5da3020cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -672,6 +672,12 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) return 0; + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (!status) + return 0; + if (!amdgpu_sriov_vf(adev)) WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); @@ -1390,14 +1396,44 @@ gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) } static enum amdgpu_memory_partition +gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev) +{ + switch (adev->gmc.num_mem_partitions) { + case 0: + return UNKNOWN_MEMORY_PARTITION_MODE; + case 1: + return AMDGPU_NPS1_PARTITION_MODE; + case 2: + return AMDGPU_NPS2_PARTITION_MODE; + case 4: + return AMDGPU_NPS4_PARTITION_MODE; + default: + return AMDGPU_NPS1_PARTITION_MODE; + } + + return AMDGPU_NPS1_PARTITION_MODE; +} + +static enum amdgpu_memory_partition gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) - return AMDGPU_NPS1_PARTITION_MODE; + return gmc_v9_0_query_vf_memory_partition(adev); return gmc_v9_0_get_memory_partition(adev, NULL); } +static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) +{ + if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && + adev->nbio.funcs->is_nps_switch_requested(adev)) { + adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS; + return true; + } + + return false; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, @@ -1409,6 +1445,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, + .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, + .need_reset_on_init = &gmc_v9_0_need_reset_on_init, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -1548,9 +1586,31 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) adev->gmc.xgmi.ras = &xgmi_ras; } -static int gmc_v9_0_early_init(void *handle) +static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gmc.supported_nps_modes = 0; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return; + + /*TODO: Check PSP version also which supports NPS switch. Otherwise keep + * supported modes as 0. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + adev->gmc.supported_nps_modes = + BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + default: + break; + } +} + +static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; /* * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined @@ -1604,9 +1664,9 @@ static int gmc_v9_0_early_init(void *handle) return 0; } -static int gmc_v9_0_late_init(void *handle) +static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -1903,6 +1963,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, switch (mode) { case UNKNOWN_MEMORY_PARTITION_MODE: + adev->gmc.num_mem_partitions = 0; + break; case AMDGPU_NPS1_PARTITION_MODE: adev->gmc.num_mem_partitions = 1; break; @@ -1922,7 +1984,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, /* Use NPS range info, if populated */ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, - adev->gmc.num_mem_partitions); + &adev->gmc.num_mem_partitions); if (!r) { l = 0; for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { @@ -1932,6 +1994,11 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, } } else { + if (!adev->gmc.num_mem_partitions) { + dev_err(adev->dev, + "Not able to detect NPS mode, fall back to NPS1"); + adev->gmc.num_mem_partitions = 1; + } /* Fallback to sw based calculation */ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; size /= adev->gmc.num_mem_partitions; @@ -1990,10 +2057,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) adev->gmc.vram_width = 128 * 64; } -static int gmc_v9_0_sw_init(void *handle) +static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned long inst_mask = adev->aid_mask; adev->gfxhub.funcs->init(adev); @@ -2168,6 +2235,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + gmc_v9_0_init_nps_details(adev); /* * number of VMs * VMID 0 is reserved for System @@ -2201,9 +2269,9 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -static int gmc_v9_0_sw_fini(void *handle) +static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) @@ -2311,9 +2379,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v9_0_hw_init(void *handle) +static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool value; int i, r; @@ -2396,9 +2464,9 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v9_0_hw_fini(void *handle) +static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v9_0_gart_disable(adev); @@ -2416,32 +2484,44 @@ static int gmc_v9_0_hw_fini(void *handle) if (adev->mmhub.funcs->update_power_gating) adev->mmhub.funcs->update_power_gating(adev, false); - amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + /* + * For minimal init, late_init is not called, hence VM fault/RAS irqs + * are not enabled. + */ + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - if (adev->gmc.ecc_irq.funcs && - amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + } return 0; } -static int gmc_v9_0_suspend(void *handle) +static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gmc_v9_0_hw_fini(adev); + return gmc_v9_0_hw_fini(ip_block); } -static int gmc_v9_0_resume(void *handle) +static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v9_0_hw_init(adev); + /* If a reset is done for NPS mode switch, read the memory range + * information again. + */ + if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { + gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; + } + + r = gmc_v9_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -2452,13 +2532,13 @@ static bool gmc_v9_0_is_idle(void *handle) return true; } -static int gmc_v9_0_wait_for_idle(void *handle) +static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v9.*/ return 0; } -static int gmc_v9_0_soft_reset(void *handle) +static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX for emulation.*/ return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 07984f7c3ae7..7f45e93c0397 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -273,9 +273,9 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int iceland_ih_early_init(void *handle) +static int iceland_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -287,10 +287,10 @@ static int iceland_ih_early_init(void *handle) return 0; } -static int iceland_ih_sw_init(void *handle) +static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -301,9 +301,9 @@ static int iceland_ih_sw_init(void *handle) return r; } -static int iceland_ih_sw_fini(void *handle) +static int iceland_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -311,34 +311,28 @@ static int iceland_ih_sw_fini(void *handle) return 0; } -static int iceland_ih_hw_init(void *handle) +static int iceland_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return iceland_ih_irq_init(adev); } -static int iceland_ih_hw_fini(void *handle) +static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - iceland_ih_irq_disable(adev); + iceland_ih_irq_disable(ip_block->adev); return 0; } -static int iceland_ih_suspend(void *handle) +static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return iceland_ih_hw_fini(adev); + return iceland_ih_hw_fini(ip_block); } -static int iceland_ih_resume(void *handle) +static int iceland_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return iceland_ih_hw_init(adev); + return iceland_ih_hw_init(ip_block); } static bool iceland_ih_is_idle(void *handle) @@ -352,11 +346,11 @@ static bool iceland_ih_is_idle(void *handle) return true; } -static int iceland_ih_wait_for_idle(void *handle) +static int iceland_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -368,10 +362,10 @@ static int iceland_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int iceland_ih_soft_reset(void *handle) +static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -413,7 +407,6 @@ static int iceland_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs iceland_ih_ip_funcs = { .name = "iceland_ih", .early_init = iceland_ih_early_init, - .late_init = NULL, .sw_init = iceland_ih_sw_init, .sw_fini = iceland_ih_sw_fini, .hw_init = iceland_ih_hw_init, @@ -425,8 +418,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = { .soft_reset = iceland_ih_soft_reset, .set_clockgating_state = iceland_ih_set_clockgating_state, .set_powergating_state = iceland_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs iceland_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 18a761d6ef33..38f953fd65d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -559,19 +559,19 @@ static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs; } -static int ih_v6_0_early_init(void *handle) +static int ih_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ih_v6_0_set_interrupt_funcs(adev); ih_v6_0_set_self_irq_funcs(adev); return 0; } -static int ih_v6_0_sw_init(void *handle) +static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -614,19 +614,19 @@ static int ih_v6_0_sw_init(void *handle) return r; } -static int ih_v6_0_sw_fini(void *handle) +static int ih_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v6_0_hw_init(void *handle) +static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v6_0_irq_init(adev); if (r) @@ -635,27 +635,21 @@ static int ih_v6_0_hw_init(void *handle) return 0; } -static int ih_v6_0_hw_fini(void *handle) +static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v6_0_irq_disable(adev); + ih_v6_0_irq_disable(ip_block->adev); return 0; } -static int ih_v6_0_suspend(void *handle) +static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_0_hw_fini(adev); + return ih_v6_0_hw_fini(ip_block); } -static int ih_v6_0_resume(void *handle) +static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_0_hw_init(adev); + return ih_v6_0_hw_init(ip_block); } static bool ih_v6_0_is_idle(void *handle) @@ -664,13 +658,13 @@ static bool ih_v6_0_is_idle(void *handle) return true; } -static int ih_v6_0_wait_for_idle(void *handle) +static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v6_0_soft_reset(void *handle) +static int ih_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -785,7 +779,6 @@ static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .name = "ih_v6_0", .early_init = ih_v6_0_early_init, - .late_init = NULL, .sw_init = ih_v6_0_sw_init, .sw_fini = ih_v6_0_sw_fini, .hw_init = ih_v6_0_hw_init, @@ -798,8 +791,6 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .set_clockgating_state = ih_v6_0_set_clockgating_state, .set_powergating_state = ih_v6_0_set_powergating_state, .get_clockgating_state = ih_v6_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 2e0469feca1e..61381e0c3795 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -532,9 +532,9 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs; } -static int ih_v6_1_early_init(void *handle) +static int ih_v6_1_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -547,10 +547,10 @@ static int ih_v6_1_early_init(void *handle) return 0; } -static int ih_v6_1_sw_init(void *handle) +static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -593,19 +593,19 @@ static int ih_v6_1_sw_init(void *handle) return r; } -static int ih_v6_1_sw_fini(void *handle) +static int ih_v6_1_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v6_1_hw_init(void *handle) +static int ih_v6_1_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v6_1_irq_init(adev); if (r) @@ -614,27 +614,21 @@ static int ih_v6_1_hw_init(void *handle) return 0; } -static int ih_v6_1_hw_fini(void *handle) +static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v6_1_irq_disable(adev); + ih_v6_1_irq_disable(ip_block->adev); return 0; } -static int ih_v6_1_suspend(void *handle) +static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_1_hw_fini(adev); + return ih_v6_1_hw_fini(ip_block); } -static int ih_v6_1_resume(void *handle) +static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_1_hw_init(adev); + return ih_v6_1_hw_init(ip_block); } static bool ih_v6_1_is_idle(void *handle) @@ -643,13 +637,13 @@ static bool ih_v6_1_is_idle(void *handle) return true; } -static int ih_v6_1_wait_for_idle(void *handle) +static int ih_v6_1_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v6_1_soft_reset(void *handle) +static int ih_v6_1_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -768,7 +762,6 @@ static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .name = "ih_v6_1", .early_init = ih_v6_1_early_init, - .late_init = NULL, .sw_init = ih_v6_1_sw_init, .sw_fini = ih_v6_1_sw_fini, .hw_init = ih_v6_1_hw_init, @@ -781,8 +774,6 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .set_clockgating_state = ih_v6_1_set_clockgating_state, .set_powergating_state = ih_v6_1_set_powergating_state, .get_clockgating_state = ih_v6_1_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 6852081fcff2..d2428cf5d385 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -528,19 +528,19 @@ static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs; } -static int ih_v7_0_early_init(void *handle) +static int ih_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ih_v7_0_set_interrupt_funcs(adev); ih_v7_0_set_self_irq_funcs(adev); return 0; } -static int ih_v7_0_sw_init(void *handle) +static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -583,19 +583,19 @@ static int ih_v7_0_sw_init(void *handle) return r; } -static int ih_v7_0_sw_fini(void *handle) +static int ih_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v7_0_hw_init(void *handle) +static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v7_0_irq_init(adev); if (r) @@ -604,27 +604,21 @@ static int ih_v7_0_hw_init(void *handle) return 0; } -static int ih_v7_0_hw_fini(void *handle) +static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v7_0_irq_disable(adev); + ih_v7_0_irq_disable(ip_block->adev); return 0; } -static int ih_v7_0_suspend(void *handle) +static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v7_0_hw_fini(adev); + return ih_v7_0_hw_fini(ip_block); } -static int ih_v7_0_resume(void *handle) +static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v7_0_hw_init(adev); + return ih_v7_0_hw_init(ip_block); } static bool ih_v7_0_is_idle(void *handle) @@ -633,13 +627,13 @@ static bool ih_v7_0_is_idle(void *handle) return true; } -static int ih_v7_0_wait_for_idle(void *handle) +static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v7_0_soft_reset(void *handle) +static int ih_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -758,7 +752,6 @@ static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .name = "ih_v7_0", .early_init = ih_v7_0_early_init, - .late_init = NULL, .sw_init = ih_v7_0_sw_init, .sw_fini = ih_v7_0_sw_fini, .hw_init = ih_v7_0_hw_init, @@ -771,8 +764,6 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .set_clockgating_state = ih_v7_0_set_clockgating_state, .set_powergating_state = ih_v7_0_set_powergating_state, .get_clockgating_state = ih_v7_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 6e0e88076224..03b8b7cd5229 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -458,13 +458,13 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, /** * jpeg_v1_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -int jpeg_v1_0_early_init(void *handle) +int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -478,12 +478,12 @@ int jpeg_v1_0_early_init(void *handle) /** * jpeg_v1_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -int jpeg_v1_0_sw_init(void *handle) +int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -509,13 +509,13 @@ int jpeg_v1_0_sw_init(void *handle) /** * jpeg_v1_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG free up sw allocation */ -void jpeg_v1_0_sw_fini(void *handle) +void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_ring_fini(adev->jpeg.inst->ring_dec); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h index 9654d22e0376..097328635083 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h @@ -24,9 +24,9 @@ #ifndef __JPEG_V1_0_H__ #define __JPEG_V1_0_H__ -int jpeg_v1_0_early_init(void *handle); -int jpeg_v1_0_sw_init(void *handle); -void jpeg_v1_0_sw_fini(void *handle); +int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block); +int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block); +void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block); void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); #define JPEG_V1_REG_RANGE_START 0x8000 diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 41c0f8750dc1..d6823fb45d32 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -41,13 +41,13 @@ static int jpeg_v2_0_set_powergating_state(void *handle, /** * jpeg_v2_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_0_early_init(void *handle) +static int jpeg_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -61,13 +61,13 @@ static int jpeg_v2_0_early_init(void *handle) /** * jpeg_v2_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_0_sw_init(void *handle) +static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -104,14 +104,14 @@ static int jpeg_v2_0_sw_init(void *handle) /** * jpeg_v2_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_0_sw_fini(void *handle) +static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) @@ -125,12 +125,12 @@ static int jpeg_v2_0_sw_fini(void *handle) /** * jpeg_v2_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_0_hw_init(void *handle) +static int jpeg_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -142,13 +142,13 @@ static int jpeg_v2_0_hw_init(void *handle) /** * jpeg_v2_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_0_hw_fini(void *handle) +static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -162,20 +162,19 @@ static int jpeg_v2_0_hw_fini(void *handle) /** * jpeg_v2_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_0_suspend(void *handle) +static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_0_hw_fini(adev); + r = jpeg_v2_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -183,20 +182,19 @@ static int jpeg_v2_0_suspend(void *handle) /** * jpeg_v2_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_0_resume(void *handle) +static int jpeg_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_0_hw_init(adev); + r = jpeg_v2_0_hw_init(ip_block); return r; } @@ -666,9 +664,9 @@ static bool jpeg_v2_0_is_idle(void *handle) UVD_JRBC_STATUS__RB_JOB_DONE_MASK); } -static int jpeg_v2_0_wait_for_idle(void *handle) +static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -744,7 +742,6 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .name = "jpeg_v2_0", .early_init = jpeg_v2_0_early_init, - .late_init = NULL, .sw_init = jpeg_v2_0_sw_init, .sw_fini = jpeg_v2_0_sw_fini, .hw_init = jpeg_v2_0_hw_init, @@ -753,14 +750,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .resume = jpeg_v2_0_resume, .is_idle = jpeg_v2_0_is_idle, .wait_for_idle = jpeg_v2_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_0_set_clockgating_state, .set_powergating_state = jpeg_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index eedb9a829d95..5063a38801d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -50,13 +50,13 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v2_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_5_early_init(void *handle) +static int jpeg_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; int i; @@ -81,15 +81,15 @@ static int jpeg_v2_5_early_init(void *handle) /** * jpeg_v2_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_5_sw_init(void *handle) +static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -153,14 +153,14 @@ static int jpeg_v2_5_sw_init(void *handle) /** * jpeg_v2_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_5_sw_fini(void *handle) +static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) @@ -174,12 +174,12 @@ static int jpeg_v2_5_sw_fini(void *handle) /** * jpeg_v2_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_5_hw_init(void *handle) +static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -202,13 +202,13 @@ static int jpeg_v2_5_hw_init(void *handle) /** * jpeg_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_5_hw_fini(void *handle) +static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -231,20 +231,19 @@ static int jpeg_v2_5_hw_fini(void *handle) /** * jpeg_v2_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_5_suspend(void *handle) +static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_5_hw_fini(adev); + r = jpeg_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -252,20 +251,19 @@ static int jpeg_v2_5_suspend(void *handle) /** * jpeg_v2_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_5_resume(void *handle) +static int jpeg_v2_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_5_hw_init(adev); + r = jpeg_v2_5_hw_init(ip_block); return r; } @@ -501,9 +499,9 @@ static bool jpeg_v2_5_is_idle(void *handle) return ret; } -static int jpeg_v2_5_wait_for_idle(void *handle) +static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -615,7 +613,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .name = "jpeg_v2_5", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -624,20 +621,13 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .name = "jpeg_v2_6", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -646,14 +636,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index b1e7fd25afbc..10adbb7cbf53 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -42,13 +42,13 @@ static int jpeg_v3_0_set_powergating_state(void *handle, /** * jpeg_v3_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v3_0_early_init(void *handle) +static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; @@ -75,13 +75,13 @@ static int jpeg_v3_0_early_init(void *handle) /** * jpeg_v3_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v3_0_sw_init(void *handle) +static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -118,13 +118,13 @@ static int jpeg_v3_0_sw_init(void *handle) /** * jpeg_v3_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v3_0_sw_fini(void *handle) +static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); @@ -139,12 +139,12 @@ static int jpeg_v3_0_sw_fini(void *handle) /** * jpeg_v3_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v3_0_hw_init(void *handle) +static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -156,13 +156,13 @@ static int jpeg_v3_0_hw_init(void *handle) /** * jpeg_v3_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v3_0_hw_fini(void *handle) +static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -176,20 +176,19 @@ static int jpeg_v3_0_hw_fini(void *handle) /** * jpeg_v3_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v3_0_suspend(void *handle) +static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v3_0_hw_fini(adev); + r = jpeg_v3_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -197,20 +196,19 @@ static int jpeg_v3_0_suspend(void *handle) /** * jpeg_v3_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v3_0_resume(void *handle) +static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v3_0_hw_init(adev); + r = jpeg_v3_0_hw_init(ip_block); return r; } @@ -459,9 +457,9 @@ static bool jpeg_v3_0_is_idle(void *handle) return ret; } -static int jpeg_v3_0_wait_for_idle(void *handle) +static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -535,7 +533,6 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .name = "jpeg_v3_0", .early_init = jpeg_v3_0_early_init, - .late_init = NULL, .sw_init = jpeg_v3_0_sw_init, .sw_fini = jpeg_v3_0_sw_fini, .hw_init = jpeg_v3_0_hw_init, @@ -544,14 +541,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .resume = jpeg_v3_0_resume, .is_idle = jpeg_v3_0_is_idle, .wait_for_idle = jpeg_v3_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v3_0_set_clockgating_state, .set_powergating_state = jpeg_v3_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 6c5c1a68a9b7..193dfac5dc76 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -48,13 +48,13 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring); /** * jpeg_v4_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_early_init(void *handle) +static int jpeg_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; @@ -70,13 +70,13 @@ static int jpeg_v4_0_early_init(void *handle) /** * jpeg_v4_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_sw_init(void *handle) +static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -123,6 +123,12 @@ static int jpeg_v4_0_sw_init(void *handle) r = amdgpu_jpeg_ras_sw_init(adev); if (r) return r; + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } @@ -130,19 +136,20 @@ static int jpeg_v4_0_sw_init(void *handle) /** * jpeg_v4_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_sw_fini(void *handle) +static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -151,12 +158,12 @@ static int jpeg_v4_0_sw_fini(void *handle) /** * jpeg_v4_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_hw_init(void *handle) +static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; @@ -187,13 +194,13 @@ static int jpeg_v4_0_hw_init(void *handle) /** * jpeg_v4_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_hw_fini(void *handle) +static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); if (!amdgpu_sriov_vf(adev)) { @@ -210,20 +217,19 @@ static int jpeg_v4_0_hw_fini(void *handle) /** * jpeg_v4_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_suspend(void *handle) +static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_hw_fini(adev); + r = jpeg_v4_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -231,20 +237,19 @@ static int jpeg_v4_0_suspend(void *handle) /** * jpeg_v4_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_resume(void *handle) +static int jpeg_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_hw_init(adev); + r = jpeg_v4_0_hw_init(ip_block); return r; } @@ -621,9 +626,9 @@ static bool jpeg_v4_0_is_idle(void *handle) return ret; } -static int jpeg_v4_0_wait_for_idle(void *handle) +static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -702,7 +707,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .name = "jpeg_v4_0", .early_init = jpeg_v4_0_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_sw_init, .sw_fini = jpeg_v4_0_sw_fini, .hw_init = jpeg_v4_0_hw_init, @@ -711,14 +715,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .resume = jpeg_v4_0_resume, .is_idle = jpeg_v4_0_is_idle, .wait_for_idle = jpeg_v4_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_set_clockgating_state, .set_powergating_state = jpeg_v4_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 86958cb2c2ab..67b51bcbacd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -68,13 +68,13 @@ static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) /** * jpeg_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_3_early_init(void *handle) +static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; @@ -88,13 +88,13 @@ static int jpeg_v4_0_3_early_init(void *handle) /** * jpeg_v4_0_3_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_3_sw_init(void *handle) +static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r, jpeg_inst; @@ -159,25 +159,33 @@ static int jpeg_v4_0_3_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * jpeg_v4_0_3_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_3_sw_fini(void *handle) +static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -299,12 +307,12 @@ static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev) /** * jpeg_v4_0_3_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_3_hw_init(void *handle) +static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r, jpeg_inst; @@ -358,13 +366,13 @@ static int jpeg_v4_0_3_hw_init(void *handle) /** * jpeg_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_3_hw_fini(void *handle) +static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; cancel_delayed_work_sync(&adev->jpeg.idle_work); @@ -380,20 +388,19 @@ static int jpeg_v4_0_3_hw_fini(void *handle) /** * jpeg_v4_0_3_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_3_suspend(void *handle) +static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_3_hw_fini(adev); + r = jpeg_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -401,20 +408,19 @@ static int jpeg_v4_0_3_suspend(void *handle) /** * jpeg_v4_0_3_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_3_resume(void *handle) +static int jpeg_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_3_hw_init(adev); + r = jpeg_v4_0_3_hw_init(ip_block); return r; } @@ -674,11 +680,12 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80004000); + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80004000); + } } /** @@ -694,11 +701,12 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x62a04); - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00004000); + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00004000); + } } /** @@ -743,14 +751,6 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); amdgpu_ring_write(ring, 0); @@ -929,9 +929,9 @@ static bool jpeg_v4_0_3_is_idle(void *handle) return ret; } -static int jpeg_v4_0_3_wait_for_idle(void *handle) +static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; int i, j; @@ -1058,7 +1058,6 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .name = "jpeg_v4_0_3", .early_init = jpeg_v4_0_3_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_3_sw_init, .sw_fini = jpeg_v4_0_3_sw_fini, .hw_init = jpeg_v4_0_3_hw_init, @@ -1067,14 +1066,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .resume = jpeg_v4_0_3_resume, .is_idle = jpeg_v4_0_3_is_idle, .wait_for_idle = jpeg_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state, .set_powergating_state = jpeg_v4_0_3_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { @@ -1088,7 +1081,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */ - 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ + 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ 8 + 16, .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 44eeed445ea9..b48e2412e6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -61,13 +61,13 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v4_0_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_5_early_init(void *handle) +static int jpeg_v4_0_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(4, 0, 5): @@ -94,13 +94,13 @@ static int jpeg_v4_0_5_early_init(void *handle) /** * jpeg_v4_0_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_5_sw_init(void *handle) +static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r, i; @@ -153,25 +153,33 @@ static int jpeg_v4_0_5_sw_init(void *handle) adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * jpeg_v4_0_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_5_sw_fini(void *handle) +static int jpeg_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -180,12 +188,12 @@ static int jpeg_v4_0_5_sw_fini(void *handle) /** * jpeg_v4_0_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_5_hw_init(void *handle) +static int jpeg_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r = 0; @@ -210,13 +218,13 @@ static int jpeg_v4_0_5_hw_init(void *handle) /** * jpeg_v4_0_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_5_hw_fini(void *handle) +static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -237,20 +245,19 @@ static int jpeg_v4_0_5_hw_fini(void *handle) /** * jpeg_v4_0_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_5_suspend(void *handle) +static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_5_hw_fini(adev); + r = jpeg_v4_0_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -258,20 +265,19 @@ static int jpeg_v4_0_5_suspend(void *handle) /** * jpeg_v4_0_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_5_resume(void *handle) +static int jpeg_v4_0_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_5_hw_init(adev); + r = jpeg_v4_0_5_hw_init(ip_block); return r; } @@ -637,9 +643,9 @@ static bool jpeg_v4_0_5_is_idle(void *handle) return ret; } -static int jpeg_v4_0_5_wait_for_idle(void *handle) +static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -743,7 +749,6 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .name = "jpeg_v4_0_5", .early_init = jpeg_v4_0_5_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_5_sw_init, .sw_fini = jpeg_v4_0_5_sw_fini, .hw_init = jpeg_v4_0_5_hw_init, @@ -752,14 +757,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .resume = jpeg_v4_0_5_resume, .is_idle = jpeg_v4_0_5_is_idle, .wait_for_idle = jpeg_v4_0_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, .set_powergating_state = jpeg_v4_0_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index d662aa841f97..686f9605239d 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -42,13 +42,13 @@ static int jpeg_v5_0_0_set_powergating_state(void *handle, /** * jpeg_v5_0_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v5_0_0_early_init(void *handle) +static int jpeg_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -62,13 +62,13 @@ static int jpeg_v5_0_0_early_init(void *handle) /** * jpeg_v5_0_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v5_0_0_sw_init(void *handle) +static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -100,25 +100,32 @@ static int jpeg_v5_0_0_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } /** * jpeg_v5_0_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v5_0_0_sw_fini(void *handle) +static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -127,12 +134,12 @@ static int jpeg_v5_0_0_sw_fini(void *handle) /** * jpeg_v5_0_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v5_0_0_hw_init(void *handle) +static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; @@ -153,13 +160,13 @@ static int jpeg_v5_0_0_hw_init(void *handle) /** * jpeg_v5_0_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v5_0_0_hw_fini(void *handle) +static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -173,20 +180,19 @@ static int jpeg_v5_0_0_hw_fini(void *handle) /** * jpeg_v5_0_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v5_0_0_suspend(void *handle) +static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v5_0_0_hw_fini(adev); + r = jpeg_v5_0_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -194,20 +200,19 @@ static int jpeg_v5_0_0_suspend(void *handle) /** * jpeg_v5_0_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v5_0_0_resume(void *handle) +static int jpeg_v5_0_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v5_0_0_hw_init(adev); + r = jpeg_v5_0_0_hw_init(ip_block); return r; } @@ -546,9 +551,9 @@ static bool jpeg_v5_0_0_is_idle(void *handle) return ret; } -static int jpeg_v5_0_0_wait_for_idle(void *handle) +static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -622,7 +627,6 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .name = "jpeg_v5_0_0", .early_init = jpeg_v5_0_0_early_init, - .late_init = NULL, .sw_init = jpeg_v5_0_0_sw_init, .sw_fini = jpeg_v5_0_0_sw_fini, .hw_init = jpeg_v5_0_0_hw_init, @@ -631,14 +635,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .resume = jpeg_v5_0_0_resume, .is_idle = jpeg_v5_0_0_is_idle, .wait_for_idle = jpeg_v5_0_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state, .set_powergating_state = jpeg_v5_0_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 231a3d490ea8..9c905b9e9376 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -55,8 +55,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin"); -static int mes_v11_0_hw_init(void *handle); -static int mes_v11_0_hw_fini(void *handle); +static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block); +static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block); static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); @@ -366,7 +366,7 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ uint32_t queue_id, uint32_t vmid) { struct amdgpu_device *adev = mes->adev; - uint32_t value; + uint32_t value, reg; int i, r = 0; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); @@ -424,6 +424,31 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + } else if (queue_type == AMDGPU_RING_TYPE_SDMA) { + dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n", + me_id, pipe_id, queue_id); + switch (me_id) { + case 1: + reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ); + break; + case 0: + default: + reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ); + break; + } + + value = 1 << queue_id; + WREG32(reg, value); + /* wait for queue reset done */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(reg) & value)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "failed to wait on sdma queue reset done\n"); + r = -ETIMEDOUT; + } } amdgpu_gfx_rlc_exit_safe_mode(adev, 0); @@ -619,6 +644,18 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; + case MES_MISC_OP_CHANGE_CONFIG: + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) { + dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n"); + return -EINVAL; + } + misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; + misc_pkt.change_config.opcode = + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; + misc_pkt.change_config.option.bits.limit_single_process = + input->change_config.option.limit_single_process; + break; + default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; @@ -683,6 +720,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes->event_log_gpu_addr; } + if (enforce_isolation) + mes_set_hw_res_pkt.limit_single_process = 1; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); @@ -883,6 +923,16 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) uint32_t pipe, data = 0; if (enable) { + if (amdgpu_mes_log_enable) { + WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO, + lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE)); + WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI, + upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE)); + dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n", + RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI), + RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO)); + } + data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); data = REG_SET_FIELD(data, CP_MES_CNTL, @@ -1336,16 +1386,16 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, return 0; } -static int mes_v11_0_sw_init(void *handle) +static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; adev->mes.funcs = &mes_v11_0_funcs; adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init; adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini; - adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; + adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE; r = amdgpu_mes_init(adev); if (r) @@ -1377,9 +1427,9 @@ static int mes_v11_0_sw_init(void *handle) return 0; } -static int mes_v11_0_sw_fini(void *handle) +static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1473,6 +1523,7 @@ static void mes_v11_0_kiq_clear(struct amdgpu_device *adev) static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { @@ -1496,6 +1547,12 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); + if (unlikely(!ip_block)) { + dev_err(adev->dev, "Failed to get MES handle\n"); + return -EINVAL; + } + r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) goto failure; @@ -1506,7 +1563,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) adev->mes.enable_legacy_queue_map = false; if (adev->mes.enable_legacy_queue_map) { - r = mes_v11_0_hw_init(adev); + r = mes_v11_0_hw_init(ip_block); if (r) goto failure; } @@ -1514,7 +1571,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) return r; failure: - mes_v11_0_hw_fini(adev); + mes_v11_0_hw_fini(ip_block); return r; } @@ -1535,10 +1592,10 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) return 0; } -static int mes_v11_0_hw_init(void *handle) +static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->mes.ring[0].sched.ready) goto out; @@ -1590,13 +1647,13 @@ out: return 0; failure: - mes_v11_0_hw_fini(adev); + mes_v11_0_hw_fini(ip_block); return r; } -static int mes_v11_0_hw_fini(void *handle) +static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_is_mes_info_enable(adev)) { amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr, &adev->mes.resource_1_addr); @@ -1604,33 +1661,31 @@ static int mes_v11_0_hw_fini(void *handle) return 0; } -static int mes_v11_0_suspend(void *handle) +static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_mes_suspend(adev); + r = amdgpu_mes_suspend(ip_block->adev); if (r) return r; - return mes_v11_0_hw_fini(adev); + return mes_v11_0_hw_fini(ip_block); } -static int mes_v11_0_resume(void *handle) +static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mes_v11_0_hw_init(adev); + r = mes_v11_0_hw_init(ip_block); if (r) return r; - return amdgpu_mes_resume(adev); + return amdgpu_mes_resume(ip_block->adev); } -static int mes_v11_0_early_init(void *handle) +static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1644,9 +1699,9 @@ static int mes_v11_0_early_init(void *handle) return 0; } -static int mes_v11_0_late_init(void *handle) +static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && @@ -1666,8 +1721,6 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = { .hw_fini = mes_v11_0_hw_fini, .suspend = mes_v11_0_suspend, .resume = mes_v11_0_resume, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version mes_v11_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index b3175ff676f3..9ecc5d61e49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -39,8 +39,8 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin"); -static int mes_v12_0_hw_init(void *handle); -static int mes_v12_0_hw_fini(void *handle); +static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block); +static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block); static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); @@ -531,6 +531,14 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; + case MES_MISC_OP_CHANGE_CONFIG: + misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; + misc_pkt.change_config.opcode = + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; + misc_pkt.change_config.option.bits.limit_single_process = + input->change_config.option.limit_single_process; + break; + default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; @@ -624,6 +632,9 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE; } + if (enforce_isolation) + mes_set_hw_res_pkt.limit_single_process = 1; + return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); @@ -1326,9 +1337,9 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev, return 0; } -static int mes_v12_0_sw_init(void *handle) +static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; adev->mes.funcs = &mes_v12_0_funcs; @@ -1362,9 +1373,9 @@ static int mes_v12_0_sw_init(void *handle) return 0; } -static int mes_v12_0_sw_fini(void *handle) +static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1452,6 +1463,7 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring) static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (adev->enable_uni_mes) mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]); @@ -1479,6 +1491,12 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) mes_v12_0_enable(adev, true); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); + if (unlikely(!ip_block)) { + dev_err(adev->dev, "Failed to get MES handle\n"); + return -EINVAL; + } + r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) goto failure; @@ -1492,7 +1510,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) } if (adev->mes.enable_legacy_queue_map) { - r = mes_v12_0_hw_init(adev); + r = mes_v12_0_hw_init(ip_block); if (r) goto failure; } @@ -1500,7 +1518,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) return r; failure: - mes_v12_0_hw_fini(adev); + mes_v12_0_hw_fini(ip_block); return r; } @@ -1522,10 +1540,10 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev) return 0; } -static int mes_v12_0_hw_init(void *handle) +static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->mes.ring[0].sched.ready) goto out; @@ -1584,42 +1602,40 @@ out: return 0; failure: - mes_v12_0_hw_fini(adev); + mes_v12_0_hw_fini(ip_block); return r; } -static int mes_v12_0_hw_fini(void *handle) +static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int mes_v12_0_suspend(void *handle) +static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_mes_suspend(adev); + r = amdgpu_mes_suspend(ip_block->adev); if (r) return r; - return mes_v12_0_hw_fini(adev); + return mes_v12_0_hw_fini(ip_block); } -static int mes_v12_0_resume(void *handle) +static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mes_v12_0_hw_init(adev); + r = mes_v12_0_hw_init(ip_block); if (r) return r; - return amdgpu_mes_resume(adev); + return amdgpu_mes_resume(ip_block->adev); } -static int mes_v12_0_early_init(void *handle) +static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1631,9 +1647,9 @@ static int mes_v12_0_early_init(void *handle) return 0; } -static int mes_v12_0_late_init(void *handle) +static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index e3ddd22aa172..e9a6f33ca710 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -229,6 +229,52 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) 0); } +static void mmhub_v1_0_init_saw(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + uint32_t tmp; + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + lower_32_bits(pt_base >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + upper_32_bits(pt_base >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + + /* Program SAW CONTEXT0 CNTL */ + tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL); + tmp |= 1 << CONTEXT0_CNTL_ENABLE_OFFSET; + tmp &= ~(3 << CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET); + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL, tmp); + + /* Disable all Contexts except Context0 */ + tmp = 0xfffe; + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXTS_DISABLE, tmp); + + /* Program SAW CNTL4 */ + tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4); + tmp |= 1 << VMC_TAP_PDE_REQUEST_SNOOP_OFFSET; + tmp |= 1 << VMC_TAP_PTE_REQUEST_SNOOP_OFFSET; + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4, tmp); +} + static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; @@ -283,6 +329,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); } + + if (amdgpu_ip_version(adev, ISP_HWIP, 0)) + mmhub_v1_0_init_saw(adev); } static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index f47bd7ada4d7..4dcb72d1bdda 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -61,15 +61,18 @@ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { + int r = 0; u32 reg; reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); - if (reg != event) + if (reg == IDH_FAIL) + r = -EINVAL; + else if (reg != event) return -ENOENT; xgpu_nv_mailbox_send_ack(adev); - return 0; + return r; } static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) @@ -178,6 +181,9 @@ send_request: if (data1 != 0) event = IDH_RAS_POISON_READY; break; + case IDH_REQ_RAS_ERROR_COUNT: + event = IDH_RAS_ERROR_COUNT_READY; + break; default: break; } @@ -456,6 +462,11 @@ static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev) return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF); } +static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -466,4 +477,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .trans_msg = xgpu_nv_mailbox_trans_msg, .ras_poison_handler = xgpu_nv_ras_poison_handler, .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, + .req_ras_err_count = xgpu_nv_req_ras_err_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 1d099ffb3a5a..9d61d76e1bf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -40,6 +40,7 @@ enum idh_request { IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, IDH_RAS_POISON = 202, + IDH_REQ_RAS_ERROR_COUNT = 203, }; enum idh_event { @@ -54,6 +55,8 @@ enum idh_event { IDH_RAS_POISON_READY, IDH_PF_SOFT_FLR_NOTIFICATION, IDH_RAS_ERROR_DETECTED, + IDH_RAS_ERROR_COUNT_READY = 11, + IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index b281462093f1..0820ed62e2e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -542,19 +542,19 @@ static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs; } -static int navi10_ih_early_init(void *handle) +static int navi10_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; navi10_ih_set_interrupt_funcs(adev); navi10_ih_set_self_irq_funcs(adev); return 0; } -static int navi10_ih_sw_init(void *handle) +static int navi10_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -593,43 +593,37 @@ static int navi10_ih_sw_init(void *handle) return r; } -static int navi10_ih_sw_fini(void *handle) +static int navi10_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int navi10_ih_hw_init(void *handle) +static int navi10_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return navi10_ih_irq_init(adev); } -static int navi10_ih_hw_fini(void *handle) +static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - navi10_ih_irq_disable(adev); + navi10_ih_irq_disable(ip_block->adev); return 0; } -static int navi10_ih_suspend(void *handle) +static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return navi10_ih_hw_fini(adev); + return navi10_ih_hw_fini(ip_block); } -static int navi10_ih_resume(void *handle) +static int navi10_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return navi10_ih_hw_init(adev); + return navi10_ih_hw_init(ip_block); } static bool navi10_ih_is_idle(void *handle) @@ -638,13 +632,13 @@ static bool navi10_ih_is_idle(void *handle) return true; } -static int navi10_ih_wait_for_idle(void *handle) +static int navi10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int navi10_ih_soft_reset(void *handle) +static int navi10_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -700,7 +694,6 @@ static void navi10_ih_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs navi10_ih_ip_funcs = { .name = "navi10_ih", .early_init = navi10_ih_early_init, - .late_init = NULL, .sw_init = navi10_ih_sw_init, .sw_fini = navi10_ih_sw_fini, .hw_init = navi10_ih_hw_init, @@ -713,8 +706,6 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { .set_clockgating_state = navi10_ih_set_clockgating_state, .set_powergating_state = navi10_ih_set_powergating_state, .get_clockgating_state = navi10_ih_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs navi10_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index a5b60c9a2418..c88284ff92d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -68,6 +68,7 @@ #define SDMA_SUBOP_POLL_REG_WRITE_MEM 1 #define SDMA_SUBOP_POLL_DBIT_WRITE_MEM 2 #define SDMA_SUBOP_POLL_MEM_VERIFY 3 +#define SDMA_SUBOP_VM_INVALIDATION 4 #define HEADER_AGENT_DISPATCH 4 #define HEADER_BARRIER 5 #define SDMA_OP_AQL_COPY 0 @@ -4041,6 +4042,69 @@ /* +** Definitions for SDMA_PKT_VM_INVALIDATION packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8 +#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift) + +/*define for gfx_eng_id field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift 16 +#define SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift) + +/*define for mm_eng_id field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift 24 +#define SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift) + +/*define for INVALIDATEREQ word*/ +/*define for invalidatereq field*/ +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1 +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0 +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) (((x) & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift) + +/*define for ADDRESSRANGELO word*/ +/*define for addressrangelo field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift) + +/*define for ADDRESSRANGEHI word*/ +/*define for invalidateack field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift) + +/*define for addressrangehi field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift) + +/*define for reserved field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift) + + +/* ** Definitions for SDMA_PKT_ATOMIC packet */ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 8d80df94bd8b..a26a9be58eac 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -414,8 +414,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device /* ras_controller_int is dedicated for nbif ras error, * not the global interrupt for sync flood */ - amdgpu_ras_set_fed(adev, true); - amdgpu_ras_reset_gpu(adev); + amdgpu_ras_global_ras_isr(adev); } amdgpu_ras_error_data_fini(&err_data); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index d1bd79bbae53..8a0a63ac88d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -401,6 +401,17 @@ static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) return px; } +static bool nbio_v7_9_is_nps_switch_requested(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, + CHANGE_STATUE); + + /* 0x8 - NPS switch requested */ + return (tmp == 0x8); +} static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, u32 *supp_modes) { @@ -508,6 +519,7 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, + .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested, .init_registers = nbio_v7_9_init_registers, .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, .set_reg_remap = nbio_v7_9_set_reg_remap, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 73065a85e0d2..3bad565ded73 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -634,9 +634,9 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = { .query_video_codecs = &nv_query_video_codecs, }; -static int nv_common_early_init(void *handle) +static int nv_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -944,9 +944,9 @@ static int nv_common_early_init(void *handle) return 0; } -static int nv_common_late_init(void *handle) +static int nv_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); @@ -973,9 +973,9 @@ static int nv_common_late_init(void *handle) return 0; } -static int nv_common_sw_init(void *handle) +static int nv_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -983,14 +983,9 @@ static int nv_common_sw_init(void *handle) return 0; } -static int nv_common_sw_fini(void *handle) +static int nv_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int nv_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->nbio.funcs->apply_lc_spc_mode_wa) adev->nbio.funcs->apply_lc_spc_mode_wa(adev); @@ -1014,9 +1009,9 @@ static int nv_common_hw_init(void *handle) return 0; } -static int nv_common_hw_fini(void *handle) +static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because nv_enable_doorbell_aperture @@ -1029,18 +1024,14 @@ static int nv_common_hw_fini(void *handle) return 0; } -static int nv_common_suspend(void *handle) +static int nv_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return nv_common_hw_fini(adev); + return nv_common_hw_fini(ip_block); } -static int nv_common_resume(void *handle) +static int nv_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return nv_common_hw_init(adev); + return nv_common_hw_init(ip_block); } static bool nv_common_is_idle(void *handle) @@ -1048,16 +1039,6 @@ static bool nv_common_is_idle(void *handle) return true; } -static int nv_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int nv_common_soft_reset(void *handle) -{ - return 0; -} - static int nv_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1115,17 +1096,12 @@ static const struct amd_ip_funcs nv_common_ip_funcs = { .early_init = nv_common_early_init, .late_init = nv_common_late_init, .sw_init = nv_common_sw_init, - .sw_fini = nv_common_sw_fini, .hw_init = nv_common_hw_init, .hw_fini = nv_common_hw_fini, .suspend = nv_common_suspend, .resume = nv_common_resume, .is_idle = nv_common_is_idle, - .wait_for_idle = nv_common_wait_for_idle, - .soft_reset = nv_common_soft_reset, .set_clockgating_state = nv_common_set_clockgating_state, .set_powergating_state = nv_common_set_powergating_state, .get_clockgating_state = nv_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 37b5ddd6f13b..f4a91b126c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -103,6 +103,10 @@ enum psp_gfx_cmd_id GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */ GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */ GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */ + /*IDs of performance monitoring/profiling*/ + GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */ + /* Dynamic memory partitioninig (NPS mode change)*/ + GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ }; /* PSP boot config sub-commands */ @@ -351,6 +355,20 @@ struct psp_gfx_cmd_sriov_spatial_part { uint32_t override_this_aid; }; +/*Structure for sq performance monitoring/profiling enable/disable*/ +struct psp_gfx_cmd_config_sq_perfmon { + uint32_t gfx_xcp_mask; + uint8_t core_override; + uint8_t reg_override; + uint8_t perfmon_override; + uint8_t reserved[5]; +}; + +struct psp_gfx_cmd_fb_memory_part { + uint32_t mode; /* requested NPS mode */ + uint32_t resvd; +}; + /* All GFX ring buffer commands. */ union psp_gfx_commands { @@ -365,6 +383,8 @@ union psp_gfx_commands struct psp_gfx_cmd_load_toc cmd_load_toc; struct psp_gfx_cmd_boot_cfg boot_cfg; struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part; + struct psp_gfx_cmd_config_sq_perfmon config_sq_perfmon; + struct psp_gfx_cmd_fb_memory_part cmd_memory_part; }; struct psp_gfx_uresp_reserved diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 51e470e8d67d..c4b775aaee9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -823,6 +823,30 @@ static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp) return (pmfw_ver < 0x557300); } +static bool psp_v13_0_is_reload_needed(struct psp_context *psp) +{ + uint32_t ucode_ver; + + if (!psp_v13_0_is_sos_alive(psp)) + return false; + + /* Restrict reload support only to specific IP versions */ + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + /* TOS version read from microcode header */ + ucode_ver = psp->sos.fw_version; + /* Read TOS version from hardware */ + psp_v13_0_init_sos_version(psp); + return (ucode_ver != psp->sos.fw_version); + default: + return false; + } + + return false; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -847,6 +871,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required, + .is_reload_needed = psp_v13_0_is_reload_needed, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 725392522267..7948d74f8722 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -807,9 +807,9 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int sdma_v2_4_early_init(void *handle) +static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; @@ -826,11 +826,11 @@ static int sdma_v2_4_early_init(void *handle) return 0; } -static int sdma_v2_4_sw_init(void *handle) +static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, @@ -866,9 +866,9 @@ static int sdma_v2_4_sw_init(void *handle) return r; } -static int sdma_v2_4_sw_fini(void *handle) +static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -878,10 +878,10 @@ static int sdma_v2_4_sw_fini(void *handle) return 0; } -static int sdma_v2_4_hw_init(void *handle) +static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v2_4_init_golden_registers(adev); @@ -892,27 +892,21 @@ static int sdma_v2_4_hw_init(void *handle) return r; } -static int sdma_v2_4_hw_fini(void *handle) +static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - sdma_v2_4_enable(adev, false); + sdma_v2_4_enable(ip_block->adev, false); return 0; } -static int sdma_v2_4_suspend(void *handle) +static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v2_4_hw_fini(adev); + return sdma_v2_4_hw_fini(ip_block); } -static int sdma_v2_4_resume(void *handle) +static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v2_4_hw_init(adev); + return sdma_v2_4_hw_init(ip_block); } static bool sdma_v2_4_is_idle(void *handle) @@ -927,11 +921,11 @@ static bool sdma_v2_4_is_idle(void *handle) return true; } -static int sdma_v2_4_wait_for_idle(void *handle) +static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -944,10 +938,10 @@ static int sdma_v2_4_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v2_4_soft_reset(void *handle) +static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { @@ -1102,7 +1096,6 @@ static int sdma_v2_4_set_powergating_state(void *handle, static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .name = "sdma_v2_4", .early_init = sdma_v2_4_early_init, - .late_init = NULL, .sw_init = sdma_v2_4_sw_init, .sw_fini = sdma_v2_4_sw_fini, .hw_init = sdma_v2_4_hw_init, @@ -1114,8 +1107,6 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .soft_reset = sdma_v2_4_soft_reset, .set_clockgating_state = sdma_v2_4_set_clockgating_state, .set_powergating_state = sdma_v2_4_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index e65194fe94af..9a3d729545a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1080,9 +1080,9 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int sdma_v3_0_early_init(void *handle) +static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; switch (adev->asic_type) { @@ -1106,11 +1106,11 @@ static int sdma_v3_0_early_init(void *handle) return 0; } -static int sdma_v3_0_sw_init(void *handle) +static int sdma_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, @@ -1152,9 +1152,9 @@ static int sdma_v3_0_sw_init(void *handle) return r; } -static int sdma_v3_0_sw_fini(void *handle) +static int sdma_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1164,10 +1164,10 @@ static int sdma_v3_0_sw_fini(void *handle) return 0; } -static int sdma_v3_0_hw_init(void *handle) +static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_init_golden_registers(adev); @@ -1178,9 +1178,9 @@ static int sdma_v3_0_hw_init(void *handle) return r; } -static int sdma_v3_0_hw_fini(void *handle) +static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); @@ -1188,18 +1188,14 @@ static int sdma_v3_0_hw_fini(void *handle) return 0; } -static int sdma_v3_0_suspend(void *handle) +static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_fini(adev); + return sdma_v3_0_hw_fini(ip_block); } -static int sdma_v3_0_resume(void *handle) +static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_init(adev); + return sdma_v3_0_hw_init(ip_block); } static bool sdma_v3_0_is_idle(void *handle) @@ -1214,11 +1210,11 @@ static bool sdma_v3_0_is_idle(void *handle) return true; } -static int sdma_v3_0_wait_for_idle(void *handle) +static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1231,9 +1227,9 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool sdma_v3_0_check_soft_reset(void *handle) +static bool sdma_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); @@ -1252,9 +1248,9 @@ static bool sdma_v3_0_check_soft_reset(void *handle) } } -static int sdma_v3_0_pre_soft_reset(void *handle) +static int sdma_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1271,9 +1267,9 @@ static int sdma_v3_0_pre_soft_reset(void *handle) return 0; } -static int sdma_v3_0_post_soft_reset(void *handle) +static int sdma_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1290,9 +1286,9 @@ static int sdma_v3_0_post_soft_reset(void *handle) return 0; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp; @@ -1538,7 +1534,6 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .name = "sdma_v3_0", .early_init = sdma_v3_0_early_init, - .late_init = NULL, .sw_init = sdma_v3_0_sw_init, .sw_fini = sdma_v3_0_sw_fini, .hw_init = sdma_v3_0_hw_init, @@ -1554,8 +1549,6 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, .get_clockgating_state = sdma_v3_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 23ef4eb36b40..c1f98f6cf20d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1751,9 +1751,9 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) } } -static int sdma_v4_0_early_init(void *handle) +static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v4_0_init_microcode(adev); @@ -1780,9 +1780,9 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); -static int sdma_v4_0_late_init(void *handle) +static int sdma_v4_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v4_0_setup_ulv(adev); @@ -1792,11 +1792,11 @@ static int sdma_v4_0_late_init(void *handle) return 0; } -static int sdma_v4_0_sw_init(void *handle) +static int sdma_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); uint32_t *ptr; @@ -1929,9 +1929,9 @@ static int sdma_v4_0_sw_init(void *handle) return r; } -static int sdma_v4_0_sw_fini(void *handle) +static int sdma_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1951,9 +1951,9 @@ static int sdma_v4_0_sw_fini(void *handle) return 0; } -static int sdma_v4_0_hw_init(void *handle) +static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->flags & AMD_IS_APU) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); @@ -1964,9 +1964,9 @@ static int sdma_v4_0_hw_init(void *handle) return sdma_v4_0_start(adev); } -static int sdma_v4_0_hw_fini(void *handle) +static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; if (amdgpu_sriov_vf(adev)) @@ -1988,9 +1988,9 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } -static int sdma_v4_0_suspend(void *handle) +static int sdma_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SMU saves SDMA state for us */ if (adev->in_s0ix) { @@ -1998,12 +1998,12 @@ static int sdma_v4_0_suspend(void *handle) return 0; } - return sdma_v4_0_hw_fini(adev); + return sdma_v4_0_hw_fini(ip_block); } -static int sdma_v4_0_resume(void *handle) +static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SMU restores SDMA state for us */ if (adev->in_s0ix) { @@ -2012,7 +2012,7 @@ static int sdma_v4_0_resume(void *handle) return 0; } - return sdma_v4_0_hw_init(adev); + return sdma_v4_0_hw_init(ip_block); } static bool sdma_v4_0_is_idle(void *handle) @@ -2030,11 +2030,11 @@ static bool sdma_v4_0_is_idle(void *handle) return true; } -static int sdma_v4_0_wait_for_idle(void *handle) +static int sdma_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i, j; u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { for (j = 0; j < adev->sdma.num_instances; j++) { @@ -2049,7 +2049,7 @@ static int sdma_v4_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v4_0_soft_reset(void *handle) +static int sdma_v4_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -2350,9 +2350,9 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); uint32_t instance_offset; @@ -2371,9 +2371,9 @@ static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v4_0_dump_ip_state(void *handle) +static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index c77889040760..a38553f38fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1290,9 +1290,9 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) } } -static int sdma_v4_4_2_early_init(void *handle) +static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v4_4_2_init_microcode(adev); @@ -1318,9 +1318,9 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); #endif -static int sdma_v4_4_2_late_init(void *handle) +static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; #if 0 struct ras_ih_if ih_info = { .cb = sdma_v4_4_2_process_ras_data_cb, @@ -1332,11 +1332,11 @@ static int sdma_v4_4_2_late_init(void *handle) return 0; } -static int sdma_v4_4_2_sw_init(void *handle) +static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 aid_id; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); uint32_t *ptr; @@ -1430,6 +1430,10 @@ static int sdma_v4_4_2_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "fail to initialize sdma ras block\n"); return -EINVAL; @@ -1442,12 +1446,16 @@ static int sdma_v4_4_2_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v4_4_2_sw_fini(void *handle) +static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1456,6 +1464,7 @@ static int sdma_v4_4_2_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } + amdgpu_sdma_sysfs_reset_mask_fini(adev); if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) || amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) amdgpu_sdma_destroy_inst_ctx(adev, true); @@ -1467,10 +1476,10 @@ static int sdma_v4_4_2_sw_fini(void *handle) return 0; } -static int sdma_v4_4_2_hw_init(void *handle) +static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t inst_mask; inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); @@ -1482,9 +1491,9 @@ static int sdma_v4_4_2_hw_init(void *handle) return r; } -static int sdma_v4_4_2_hw_fini(void *handle) +static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t inst_mask; int i; @@ -1508,21 +1517,19 @@ static int sdma_v4_4_2_hw_fini(void *handle) static int sdma_v4_4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state); -static int sdma_v4_4_2_suspend(void *handle) +static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_in_reset(adev)) sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - return sdma_v4_4_2_hw_fini(adev); + return sdma_v4_4_2_hw_fini(ip_block); } -static int sdma_v4_4_2_resume(void *handle) +static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v4_4_2_hw_init(adev); + return sdma_v4_4_2_hw_init(ip_block); } static bool sdma_v4_4_2_is_idle(void *handle) @@ -1540,11 +1547,11 @@ static bool sdma_v4_4_2_is_idle(void *handle) return true; } -static int sdma_v4_4_2_wait_for_idle(void *handle) +static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i, j; u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { for (j = 0; j < adev->sdma.num_instances; j++) { @@ -1559,7 +1566,7 @@ static int sdma_v4_4_2_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v4_4_2_soft_reset(void *handle) +static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -1857,9 +1864,9 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); uint32_t instance_offset; @@ -1878,9 +1885,9 @@ static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v4_4_2_dump_ip_state(void *handle) +static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 3e48ea38385d..fa9b40934957 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -705,14 +705,16 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v5_0_gfx_resume - setup and start the async dma engines + * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them (NAVI10). - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) +static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -722,142 +724,163 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) u32 temp; u32 wptr_poll_cntl; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; + ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); - - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, - mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, - SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), - wptr_poll_cntl); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), - ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), - ring->gpu_addr >> 40); - + } + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), + ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), + ring->gpu_addr >> 40); + + if (!restore) ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), - lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), - upper_32_bits(ring->wptr << 2)); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); + } - doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, - mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), - doorbell_offset); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), + doorbell_offset); - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, 20); + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, 20); - if (amdgpu_sriov_vf(adev)) - sdma_v5_0_ring_set_wptr(ring); + if (amdgpu_sriov_vf(adev)) + sdma_v5_0_ring_set_wptr(ring); - /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - if (!amdgpu_sriov_vf(adev)) { - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + } - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v5_0_ctx_switch_enable(adev, true); - sdma_v5_0_enable(adev, true); - } + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_0_ctx_switch_enable(adev, true); + sdma_v5_0_enable(adev, true); + } + + return amdgpu_ring_test_helper(ring); +} + +/** + * sdma_v5_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them (NAVI10). + * Returns 0 for success, error for failure. + */ +static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; - r = amdgpu_ring_test_helper(ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v5_0_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -1366,9 +1389,9 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v5_0_early_init(void *handle) +static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v5_0_init_microcode(adev); @@ -1385,11 +1408,11 @@ static int sdma_v5_0_early_init(void *handle) } -static int sdma_v5_0_sw_init(void *handle) +static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); uint32_t *ptr; @@ -1429,6 +1452,19 @@ static int sdma_v5_0_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): + if (adev->sdma.instance[0].fw_version >= 35) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr) @@ -1436,17 +1472,22 @@ static int sdma_v5_0_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v5_0_sw_fini(void *handle) +static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, false); kfree(adev->sdma.ip_dump); @@ -1454,10 +1495,10 @@ static int sdma_v5_0_sw_fini(void *handle) return 0; } -static int sdma_v5_0_hw_init(void *handle) +static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v5_0_init_golden_registers(adev); @@ -1466,9 +1507,9 @@ static int sdma_v5_0_hw_init(void *handle) return r; } -static int sdma_v5_0_hw_fini(void *handle) +static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1479,18 +1520,14 @@ static int sdma_v5_0_hw_fini(void *handle) return 0; } -static int sdma_v5_0_suspend(void *handle) +static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_0_hw_fini(adev); + return sdma_v5_0_hw_fini(ip_block); } -static int sdma_v5_0_resume(void *handle) +static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_0_hw_init(adev); + return sdma_v5_0_hw_init(ip_block); } static bool sdma_v5_0_is_idle(void *handle) @@ -1508,11 +1545,11 @@ static bool sdma_v5_0_is_idle(void *handle) return true; } -static int sdma_v5_0_wait_for_idle(void *handle) +static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); @@ -1525,13 +1562,100 @@ static int sdma_v5_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v5_0_soft_reset(void *handle) +static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; } +static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, j, r; + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + /* stop queue */ + ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + for (j = 0; j < adev->usec_timeout; j++) { + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) + break; + udelay(1); + } + + /* check sdma copy engine all idle if frozen not received*/ + if (j == adev->usec_timeout) { + stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); + if ((stat1_reg & 0x3FF) != 0x3FF) { + DRM_ERROR("cannot soft reset as sdma not idle\n"); + r = -ETIMEDOUT; + goto err0; + } + } + + f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + + cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); + + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ + preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); + + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + udelay(50); + + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + /* unfreeze*/ + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + r = sdma_v5_0_gfx_resume_instance(adev, i, true); + +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; @@ -1778,9 +1902,9 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); uint32_t instance_offset; @@ -1799,9 +1923,9 @@ static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v5_0_dump_ip_state(void *handle) +static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); @@ -1823,7 +1947,6 @@ static void sdma_v5_0_dump_ip_state(void *handle) static const struct amd_ip_funcs sdma_v5_0_ip_funcs = { .name = "sdma_v5_0", .early_init = sdma_v5_0_early_init, - .late_init = NULL, .sw_init = sdma_v5_0_sw_init, .sw_fini = sdma_v5_0_sw_fini, .hw_init = sdma_v5_0_hw_init, @@ -1874,6 +1997,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_0_ring_init_cond_exec, .preempt_ib = sdma_v5_0_ring_preempt_ib, + .reset = sdma_v5_0_reset_queue, }; static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index bc9b240a3488..ba5160399ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -522,14 +522,17 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v5_2_gfx_resume - setup and start the async dma engines + * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them. - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) + +static int sdma_v5_2_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -539,139 +542,161 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) u32 temp; u32 wptr_poll_cntl; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; + ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + } - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, - mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, - SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), - wptr_poll_cntl); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); - + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + + if (!restore) ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } - doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); - if (amdgpu_sriov_vf(adev)) - sdma_v5_2_ring_set_wptr(ring); + if (amdgpu_sriov_vf(adev)) + sdma_v5_2_ring_set_wptr(ring); - /* set minor_ptr_update to 0 after wptr programed */ + /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* SRIOV VF has no control of any of registers below */ - if (!amdgpu_sriov_vf(adev)) { - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); - - /* unhalt engine */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + /* SRIOV VF has no control of any of registers below */ + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + + /* unhalt engine */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v5_2_ctx_switch_enable(adev, true); - sdma_v5_2_enable(adev, true); - } + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } + + return amdgpu_ring_test_helper(ring); +} - r = amdgpu_ring_test_helper(ring); +/** + * sdma_v5_2_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v5_2_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -736,9 +761,9 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v5_2_soft_reset(void *handle) +static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset; u32 tmp; int i; @@ -778,6 +803,7 @@ static int sdma_v5_2_soft_reset(void *handle) static int sdma_v5_2_start(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (amdgpu_sriov_vf(adev)) { sdma_v5_2_ctx_switch_enable(adev, false); @@ -798,7 +824,11 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) msleep(1000); } - sdma_v5_2_soft_reset(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA); + if (!ip_block) + return -EINVAL; + + sdma_v5_2_soft_reset(ip_block); /* unhalt the MEs */ sdma_v5_2_enable(adev, true); /* enable sdma ring preemption */ @@ -1180,7 +1210,28 @@ static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); + + /* Update the PD address for this VMID. */ + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), + lower_32_bits(pd_addr)); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), + upper_32_bits(pd_addr)); + + /* Trigger invalidation. */ + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) | + SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) | + SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f)); + amdgpu_ring_write(ring, req); + amdgpu_ring_write(ring, 0xFFFFFFFF); + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) | + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F)); } static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring, @@ -1216,9 +1267,9 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v5_2_early_init(void *handle) +static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1268,11 +1319,11 @@ static unsigned sdma_v5_2_seq_to_trap_id(int seq_num) return -EINVAL; } -static int sdma_v5_2_sw_init(void *handle) +static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); uint32_t *ptr; @@ -1306,6 +1357,24 @@ static int sdma_v5_2_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 4): + if (adev->sdma.instance[0].fw_version >= 76) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + case IP_VERSION(5, 2, 5): + if (adev->sdma.instance[0].fw_version >= 34) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr) @@ -1313,17 +1382,22 @@ static int sdma_v5_2_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v5_2_sw_fini(void *handle) +static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, true); kfree(adev->sdma.ip_dump); @@ -1331,16 +1405,16 @@ static int sdma_v5_2_sw_fini(void *handle) return 0; } -static int sdma_v5_2_hw_init(void *handle) +static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v5_2_start(adev); } -static int sdma_v5_2_hw_fini(void *handle) +static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1351,18 +1425,14 @@ static int sdma_v5_2_hw_fini(void *handle) return 0; } -static int sdma_v5_2_suspend(void *handle) +static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_2_hw_fini(adev); + return sdma_v5_2_hw_fini(ip_block); } -static int sdma_v5_2_resume(void *handle) +static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_2_hw_init(adev); + return sdma_v5_2_hw_init(ip_block); } static bool sdma_v5_2_is_idle(void *handle) @@ -1380,11 +1450,11 @@ static bool sdma_v5_2_is_idle(void *handle) return true; } -static int sdma_v5_2_wait_for_idle(void *handle) +static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1, sdma2, sdma3; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); @@ -1399,6 +1469,96 @@ static int sdma_v5_2_wait_for_idle(void *handle) return -ETIMEDOUT; } +static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, j, r; + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + /* stop queue */ + ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + for (j = 0; j < adev->usec_timeout; j++) { + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) + break; + udelay(1); + } + + + if (j == adev->usec_timeout) { + stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); + if ((stat1_reg & 0x3FF) != 0x3FF) { + DRM_ERROR("cannot soft reset as sdma not idle\n"); + r = -ETIMEDOUT; + goto err0; + } + } + + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + + cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); + + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ + preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); + + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; + + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + udelay(50); + + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + /* unfreeze and unhalt */ + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + r = sdma_v5_2_gfx_resume_instance(adev, i, true); + +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; @@ -1736,9 +1896,9 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring) amdgpu_gfx_off_ctrl(adev, true); } -static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v5_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); uint32_t instance_offset; @@ -1757,9 +1917,9 @@ static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v5_2_dump_ip_state(void *handle) +static void sdma_v5_2_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); @@ -1781,7 +1941,6 @@ static void sdma_v5_2_dump_ip_state(void *handle) static const struct amd_ip_funcs sdma_v5_2_ip_funcs = { .name = "sdma_v5_2", .early_init = sdma_v5_2_early_init, - .late_init = NULL, .sw_init = sdma_v5_2_sw_init, .sw_fini = sdma_v5_2_sw_fini, .hw_init = sdma_v5_2_hw_init, @@ -1834,6 +1993,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_2_ring_init_cond_exec, .preempt_ib = sdma_v5_2_ring_preempt_ib, + .reset = sdma_v5_2_reset_queue, }; static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 208a1fa9d4e7..d46128b0ec92 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -469,14 +469,16 @@ static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v6_0_gfx_resume - setup and start the async dma engines + * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them. - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) +static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -485,132 +487,152 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) u32 doorbell_offset; u32 temp; u64 wptr_gpu_addr; - int i, r; - - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + ring = &adev->sdma.instance[i].ring; + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0); + } + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + + if (!restore) + ring->wptr = 0; - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + } - ring->wptr = 0; + doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); - } + if (i == 0) + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); - doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); + if (amdgpu_sriov_vf(adev)) + sdma_v6_0_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); + + /* Set up sdma hang watchdog */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); + /* 100ms per unit */ + temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, + max(adev->usec_timeout/100000, 1)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); - - if (i == 0) - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); - - if (amdgpu_sriov_vf(adev)) - sdma_v6_0_ring_set_wptr(ring); - - /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); - - /* Set up sdma hang watchdog */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); - /* 100ms per unit */ - temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, - max(adev->usec_timeout/100000, 1)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); - - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); + + if (amdgpu_sriov_vf(adev)) + sdma_v6_0_enable(adev, true); + + return amdgpu_ring_test_helper(ring); +} - if (amdgpu_sriov_vf(adev)) - sdma_v6_0_enable(adev, true); +/** + * sdma_v6_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; - r = amdgpu_ring_test_helper(ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v6_0_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -733,9 +755,9 @@ static int sdma_v6_0_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v6_0_soft_reset(void *handle) +static int sdma_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; int i; @@ -769,9 +791,9 @@ static int sdma_v6_0_soft_reset(void *handle) return sdma_v6_0_start(adev); } -static bool sdma_v6_0_check_soft_reset(void *handle) +static bool sdma_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; long tmo = msecs_to_jiffies(1000); @@ -1272,9 +1294,9 @@ static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) } } -static int sdma_v6_0_early_init(void *handle) +static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1291,11 +1313,11 @@ static int sdma_v6_0_early_init(void *handle) return 0; } -static int sdma_v6_0_sw_init(void *handle) +static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); uint32_t *ptr; @@ -1328,6 +1350,19 @@ static int sdma_v6_0_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(6, 0, 0): + case IP_VERSION(6, 0, 2): + case IP_VERSION(6, 0, 3): + if (adev->sdma.instance[0].fw_version >= 21) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); return -EINVAL; @@ -1340,17 +1375,22 @@ static int sdma_v6_0_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v6_0_sw_fini(void *handle) +static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, true); kfree(adev->sdma.ip_dump); @@ -1358,16 +1398,16 @@ static int sdma_v6_0_sw_fini(void *handle) return 0; } -static int sdma_v6_0_hw_init(void *handle) +static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v6_0_start(adev); } -static int sdma_v6_0_hw_fini(void *handle) +static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1378,18 +1418,14 @@ static int sdma_v6_0_hw_fini(void *handle) return 0; } -static int sdma_v6_0_suspend(void *handle) +static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v6_0_hw_fini(adev); + return sdma_v6_0_hw_fini(ip_block); } -static int sdma_v6_0_resume(void *handle) +static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v6_0_hw_init(adev); + return sdma_v6_0_hw_init(ip_block); } static bool sdma_v6_0_is_idle(void *handle) @@ -1407,11 +1443,11 @@ static bool sdma_v6_0_is_idle(void *handle) return true; } -static int sdma_v6_0_wait_for_idle(void *handle) +static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); @@ -1469,6 +1505,31 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); + if (r) + return r; + + return sdma_v6_0_gfx_resume_instance(adev, i, true); +} + static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -1556,9 +1617,9 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags) { } -static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v6_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); uint32_t instance_offset; @@ -1577,9 +1638,9 @@ static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v6_0_dump_ip_state(void *handle) +static void sdma_v6_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); @@ -1601,7 +1662,6 @@ static void sdma_v6_0_dump_ip_state(void *handle) const struct amd_ip_funcs sdma_v6_0_ip_funcs = { .name = "sdma_v6_0", .early_init = sdma_v6_0_early_init, - .late_init = NULL, .sw_init = sdma_v6_0_sw_init, .sw_fini = sdma_v6_0_sw_fini, .hw_init = sdma_v6_0_hw_init, @@ -1652,6 +1712,7 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v6_0_ring_init_cond_exec, .preempt_ib = sdma_v6_0_ring_preempt_ib, + .reset = sdma_v6_0_reset_queue, }; static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev) @@ -1726,7 +1787,7 @@ static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib, uint64_t dst_offset, uint32_t byte_count) { - ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 9288f37a3cc5..d2ce6b6a7ff6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -753,9 +753,9 @@ static int sdma_v7_0_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v7_0_soft_reset(void *handle) +static int sdma_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; int i; @@ -789,9 +789,9 @@ static int sdma_v7_0_soft_reset(void *handle) return sdma_v7_0_start(adev); } -static bool sdma_v7_0_check_soft_reset(void *handle) +static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; long tmo = msecs_to_jiffies(1000); @@ -1259,9 +1259,9 @@ static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v7_0_early_init(void *handle) +static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1279,11 +1279,11 @@ static int sdma_v7_0_early_init(void *handle) return 0; } -static int sdma_v7_0_sw_init(void *handle) +static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); uint32_t *ptr; @@ -1326,9 +1326,9 @@ static int sdma_v7_0_sw_init(void *handle) return r; } -static int sdma_v7_0_sw_fini(void *handle) +static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1344,16 +1344,16 @@ static int sdma_v7_0_sw_fini(void *handle) return 0; } -static int sdma_v7_0_hw_init(void *handle) +static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v7_0_start(adev); } -static int sdma_v7_0_hw_fini(void *handle) +static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1364,18 +1364,14 @@ static int sdma_v7_0_hw_fini(void *handle) return 0; } -static int sdma_v7_0_suspend(void *handle) +static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v7_0_hw_fini(adev); + return sdma_v7_0_hw_fini(ip_block); } -static int sdma_v7_0_resume(void *handle) +static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v7_0_hw_init(adev); + return sdma_v7_0_hw_init(ip_block); } static bool sdma_v7_0_is_idle(void *handle) @@ -1393,11 +1389,11 @@ static bool sdma_v7_0_is_idle(void *handle) return true; } -static int sdma_v7_0_wait_for_idle(void *handle) +static int sdma_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); @@ -1544,9 +1540,9 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags) { } -static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v7_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); uint32_t instance_offset; @@ -1565,9 +1561,9 @@ static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v7_0_dump_ip_state(void *handle) +static void sdma_v7_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 85235470e872..00f63d3fbea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2022,9 +2022,9 @@ static uint32_t si_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } -static int si_common_early_init(void *handle) +static int si_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->smc_rreg = &si_smc_rreg; adev->smc_wreg = &si_smc_wreg; @@ -2148,17 +2148,6 @@ static int si_common_early_init(void *handle) return 0; } -static int si_common_sw_init(void *handle) -{ - return 0; -} - -static int si_common_sw_fini(void *handle) -{ - return 0; -} - - static void si_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { @@ -2633,9 +2622,9 @@ static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev) pcie_set_readrq(adev->pdev, 512); } -static int si_common_hw_init(void *handle) +static int si_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; si_fix_pci_max_read_req_size(adev); si_init_golden_registers(adev); @@ -2645,23 +2634,14 @@ static int si_common_hw_init(void *handle) return 0; } -static int si_common_hw_fini(void *handle) +static int si_common_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int si_common_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_common_hw_fini(adev); -} - -static int si_common_resume(void *handle) +static int si_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_common_hw_init(adev); + return si_common_hw_init(ip_block); } static bool si_common_is_idle(void *handle) @@ -2669,16 +2649,6 @@ static bool si_common_is_idle(void *handle) return true; } -static int si_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int si_common_soft_reset(void *handle) -{ - return 0; -} - static int si_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -2694,20 +2664,12 @@ static int si_common_set_powergating_state(void *handle, static const struct amd_ip_funcs si_common_ip_funcs = { .name = "si_common", .early_init = si_common_early_init, - .late_init = NULL, - .sw_init = si_common_sw_init, - .sw_fini = si_common_sw_fini, .hw_init = si_common_hw_init, .hw_fini = si_common_hw_fini, - .suspend = si_common_suspend, .resume = si_common_resume, .is_idle = si_common_is_idle, - .wait_for_idle = si_common_wait_for_idle, - .soft_reset = si_common_soft_reset, .set_clockgating_state = si_common_set_clockgating_state, .set_powergating_state = si_common_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version si_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 11db5b755832..47647a6083e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -457,9 +457,9 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int si_dma_early_init(void *handle) +static int si_dma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->sdma.num_instances = 2; @@ -471,11 +471,11 @@ static int si_dma_early_init(void *handle) return 0; } -static int si_dma_sw_init(void *handle) +static int si_dma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* DMA0 trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, @@ -506,9 +506,9 @@ static int si_dma_sw_init(void *handle) return r; } -static int si_dma_sw_fini(void *handle) +static int si_dma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -517,39 +517,34 @@ static int si_dma_sw_fini(void *handle) return 0; } -static int si_dma_hw_init(void *handle) +static int si_dma_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return si_dma_start(adev); } -static int si_dma_hw_fini(void *handle) +static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_dma_stop(adev); + si_dma_stop(ip_block->adev); return 0; } -static int si_dma_suspend(void *handle) +static int si_dma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_dma_hw_fini(adev); + return si_dma_hw_fini(ip_block); } -static int si_dma_resume(void *handle) +static int si_dma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_dma_hw_init(adev); + return si_dma_hw_init(ip_block); } static bool si_dma_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(SRBM_STATUS2); if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) @@ -558,20 +553,20 @@ static bool si_dma_is_idle(void *handle) return true; } -static int si_dma_wait_for_idle(void *handle) +static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (si_dma_is_idle(handle)) + if (si_dma_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int si_dma_soft_reset(void *handle) +static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block) { DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); return 0; @@ -696,7 +691,6 @@ static int si_dma_set_powergating_state(void *handle, static const struct amd_ip_funcs si_dma_ip_funcs = { .name = "si_dma", .early_init = si_dma_early_init, - .late_init = NULL, .sw_init = si_dma_sw_init, .sw_fini = si_dma_sw_fini, .hw_init = si_dma_hw_init, @@ -708,8 +702,6 @@ static const struct amd_ip_funcs si_dma_ip_funcs = { .soft_reset = si_dma_soft_reset, .set_clockgating_state = si_dma_set_clockgating_state, .set_powergating_state = si_dma_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs si_dma_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 5237395e4fab..2ec1ebe4db11 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -156,19 +156,19 @@ static void si_ih_set_rptr(struct amdgpu_device *adev, WREG32(IH_RB_RPTR, ih->rptr); } -static int si_ih_early_init(void *handle) +static int si_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; si_ih_set_interrupt_funcs(adev); return 0; } -static int si_ih_sw_init(void *handle) +static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -177,43 +177,37 @@ static int si_ih_sw_init(void *handle) return amdgpu_irq_init(adev); } -static int si_ih_sw_fini(void *handle) +static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int si_ih_hw_init(void *handle) +static int si_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return si_ih_irq_init(adev); } -static int si_ih_hw_fini(void *handle) +static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_ih_irq_disable(adev); + si_ih_irq_disable(ip_block->adev); return 0; } -static int si_ih_suspend(void *handle) +static int si_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_ih_hw_fini(adev); + return si_ih_hw_fini(ip_block); } -static int si_ih_resume(void *handle) +static int si_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_ih_hw_init(adev); + return si_ih_hw_init(ip_block); } static bool si_ih_is_idle(void *handle) @@ -227,22 +221,22 @@ static bool si_ih_is_idle(void *handle) return true; } -static int si_ih_wait_for_idle(void *handle) +static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (si_ih_is_idle(handle)) + if (si_ih_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int si_ih_soft_reset(void *handle) +static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(SRBM_STATUS); @@ -284,7 +278,6 @@ static int si_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs si_ih_ip_funcs = { .name = "si_ih", .early_init = si_ih_early_init, - .late_init = NULL, .sw_init = si_ih_sw_init, .sw_fini = si_ih_sw_fini, .hw_init = si_ih_hw_init, @@ -296,8 +289,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = { .soft_reset = si_ih_soft_reset, .set_clockgating_state = si_ih_set_clockgating_state, .set_powergating_state = si_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs si_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 481217c32d85..9b01e074af47 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -81,15 +81,9 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = false; } return 0; @@ -175,15 +169,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } } @@ -193,15 +181,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -213,7 +195,7 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c index 0af648931df5..e70ebad3f9fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -80,15 +80,9 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_MES)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = false; } return 0; @@ -186,15 +180,9 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -208,7 +196,7 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 307185c0e1b8..ede072758dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -578,20 +578,16 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) { - u32 sol_reg; - - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - /* Will reset for the following suspend abort cases. - * 1) Only reset limit on APU side, dGPU hasn't checked yet. - * 2) S3 suspend abort and TOS already launched. + * 1) Only reset on APU side, dGPU hasn't checked yet. + * 2) S3 suspend aborted in the normal S3 suspend or + * performing pm core test. */ if (adev->flags & AMD_IS_APU && adev->in_s3 && - !adev->suspend_complete && - sol_reg) + !pm_resume_via_firmware()) return true; - - return false; + else + return false; } static int soc15_asic_reset(struct amdgpu_device *adev) @@ -601,11 +597,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev) * successfully. So now, temporarily enable it for the * S3 suspend abort case. */ - if (((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) && - !soc15_need_reset_on_resume(adev)) + + if ((adev->apu_flags & AMD_APU_IS_PICASSO || + !(adev->apu_flags & AMD_APU_IS_RAVEN)) && + soc15_need_reset_on_resume(adev)) + goto asic_reset; + + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; +asic_reset: switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_PCI: dev_info(adev->dev, "PCI reset\n"); @@ -829,6 +831,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev) if (adev->asic_type == CHIP_RENOIR) return true; + if (amdgpu_gmc_need_reset_on_init(adev)) + return true; + if (amdgpu_psp_tos_reload_needed(adev)) + return true; /* Just return false for soc15 GPUs. Reset does not seem to * be necessary. */ @@ -929,9 +935,9 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .get_reg_state = &aqua_vanjaram_get_reg_state, }; -static int soc15_common_early_init(void *handle) +static int soc15_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -1198,9 +1204,9 @@ static int soc15_common_early_init(void *handle) return 0; } -static int soc15_common_late_init(void *handle) +static int soc15_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); @@ -1213,9 +1219,9 @@ static int soc15_common_late_init(void *handle) return 0; } -static int soc15_common_sw_init(void *handle) +static int soc15_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); @@ -1227,9 +1233,9 @@ static int soc15_common_sw_init(void *handle) return 0; } -static int soc15_common_sw_fini(void *handle) +static int soc15_common_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->df.funcs && adev->df.funcs->sw_fini) @@ -1251,9 +1257,9 @@ static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) } } -static int soc15_common_hw_init(void *handle) +static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc15_program_aspm(adev); @@ -1280,9 +1286,9 @@ static int soc15_common_hw_init(void *handle) return 0; } -static int soc15_common_hw_fini(void *handle) +static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc15_enable_doorbell_aperture @@ -1295,7 +1301,12 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); + /* + * For minimal init, late_init is not called, hence RAS irqs are not + * enabled. + */ if ((!amdgpu_sriov_vf(adev)) && + (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && adev->nbio.ras_if && amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { if (adev->nbio.ras && @@ -1309,22 +1320,20 @@ static int soc15_common_hw_fini(void *handle) return 0; } -static int soc15_common_suspend(void *handle) +static int soc15_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc15_common_hw_fini(adev); + return soc15_common_hw_fini(ip_block); } -static int soc15_common_resume(void *handle) +static int soc15_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (soc15_need_reset_on_resume(adev)) { dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); soc15_asic_reset(adev); } - return soc15_common_hw_init(adev); + return soc15_common_hw_init(ip_block); } static bool soc15_common_is_idle(void *handle) @@ -1332,16 +1341,6 @@ static bool soc15_common_is_idle(void *handle) return true; } -static int soc15_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc15_common_soft_reset(void *handle) -{ - return 0; -} - static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t def, data; @@ -1492,11 +1491,7 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = { .suspend = soc15_common_suspend, .resume = soc15_common_resume, .is_idle = soc15_common_is_idle, - .wait_for_idle = soc15_common_wait_for_idle, - .soft_reset = soc15_common_soft_reset, .set_clockgating_state = soc15_common_set_clockgating_state, .set_powergating_state = soc15_common_set_powergating_state, .get_clockgating_state= soc15_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index bba35880badb..d6999835918f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -556,9 +556,9 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = { .update_umd_stable_pstate = &soc21_update_umd_stable_pstate, }; -static int soc21_common_early_init(void *handle) +static int soc21_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -794,9 +794,9 @@ static int soc21_common_early_init(void *handle) return 0; } -static int soc21_common_late_init(void *handle) +static int soc21_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); @@ -832,9 +832,9 @@ static int soc21_common_late_init(void *handle) return 0; } -static int soc21_common_sw_init(void *handle) +static int soc21_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -842,14 +842,9 @@ static int soc21_common_sw_init(void *handle) return 0; } -static int soc21_common_sw_fini(void *handle) -{ - return 0; -} - -static int soc21_common_hw_init(void *handle) +static int soc21_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc21_program_aspm(adev); @@ -867,9 +862,9 @@ static int soc21_common_hw_init(void *handle) return 0; } -static int soc21_common_hw_fini(void *handle) +static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc21_enable_doorbell_aperture @@ -890,11 +885,9 @@ static int soc21_common_hw_fini(void *handle) return 0; } -static int soc21_common_suspend(void *handle) +static int soc21_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc21_common_hw_fini(adev); + return soc21_common_hw_fini(ip_block); } static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) @@ -904,9 +897,10 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) /* Will reset for the following suspend abort cases. * 1) Only reset dGPU side. * 2) S3 suspend got aborted and TOS is active. + * As for dGPU suspend abort cases the SOL value + * will be kept as zero at this resume point. */ - if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && - !adev->suspend_complete) { + if (!(adev->flags & AMD_IS_APU) && adev->in_s3) { sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); msleep(100); sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); @@ -917,16 +911,16 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) return false; } -static int soc21_common_resume(void *handle) +static int soc21_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (soc21_need_reset_on_resume(adev)) { dev_info(adev->dev, "S3 suspend aborted, resetting..."); soc21_asic_reset(adev); } - return soc21_common_hw_init(adev); + return soc21_common_hw_init(ip_block); } static bool soc21_common_is_idle(void *handle) @@ -934,16 +928,6 @@ static bool soc21_common_is_idle(void *handle) return true; } -static int soc21_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc21_common_soft_reset(void *handle) -{ - return 0; -} - static int soc21_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1002,17 +986,12 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = { .early_init = soc21_common_early_init, .late_init = soc21_common_late_init, .sw_init = soc21_common_sw_init, - .sw_fini = soc21_common_sw_fini, .hw_init = soc21_common_hw_init, .hw_fini = soc21_common_hw_fini, .suspend = soc21_common_suspend, .resume = soc21_common_resume, .is_idle = soc21_common_is_idle, - .wait_for_idle = soc21_common_wait_for_idle, - .soft_reset = soc21_common_soft_reset, .set_clockgating_state = soc21_common_set_clockgating_state, .set_powergating_state = soc21_common_set_powergating_state, .get_clockgating_state = soc21_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index 29a848f2466b..be96de92b2f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -363,9 +363,9 @@ static const struct amdgpu_asic_funcs soc24_asic_funcs = { .update_umd_stable_pstate = &soc24_update_umd_stable_pstate, }; -static int soc24_common_early_init(void *handle) +static int soc24_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -440,9 +440,9 @@ static int soc24_common_early_init(void *handle) return 0; } -static int soc24_common_late_init(void *handle) +static int soc24_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_get_irq(adev); @@ -455,9 +455,9 @@ static int soc24_common_late_init(void *handle) return 0; } -static int soc24_common_sw_init(void *handle) +static int soc24_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -465,14 +465,9 @@ static int soc24_common_sw_init(void *handle) return 0; } -static int soc24_common_sw_fini(void *handle) +static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int soc24_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc24_program_aspm(adev); @@ -494,9 +489,9 @@ static int soc24_common_hw_init(void *handle) return 0; } -static int soc24_common_hw_fini(void *handle) +static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc21_enable_doorbell_aperture @@ -512,18 +507,14 @@ static int soc24_common_hw_fini(void *handle) return 0; } -static int soc24_common_suspend(void *handle) +static int soc24_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc24_common_hw_fini(adev); + return soc24_common_hw_fini(ip_block); } -static int soc24_common_resume(void *handle) +static int soc24_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc24_common_hw_init(adev); + return soc24_common_hw_init(ip_block); } static bool soc24_common_is_idle(void *handle) @@ -531,16 +522,6 @@ static bool soc24_common_is_idle(void *handle) return true; } -static int soc24_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc24_common_soft_reset(void *handle) -{ - return 0; -} - static int soc24_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -595,14 +576,11 @@ static const struct amd_ip_funcs soc24_common_ip_funcs = { .early_init = soc24_common_early_init, .late_init = soc24_common_late_init, .sw_init = soc24_common_sw_init, - .sw_fini = soc24_common_sw_fini, .hw_init = soc24_common_hw_init, .hw_fini = soc24_common_hw_fini, .suspend = soc24_common_suspend, .resume = soc24_common_resume, .is_idle = soc24_common_is_idle, - .wait_for_idle = soc24_common_wait_for_idle, - .soft_reset = soc24_common_soft_reset, .set_clockgating_state = soc24_common_set_clockgating_state, .set_powergating_state = soc24_common_set_powergating_state, .get_clockgating_state = soc24_common_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 3ac56a9645eb..21b71a427b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -113,6 +113,14 @@ enum ta_ras_address_type { TA_RAS_PA_TO_MCA, }; +enum ta_ras_nps_mode { + TA_RAS_UNKNOWN_MODE = 0, + TA_RAS_NPS1_MODE = 1, + TA_RAS_NPS2_MODE = 2, + TA_RAS_NPS4_MODE = 4, + TA_RAS_NPS8_MODE = 8, +}; + /* Input/output structures for RAS commands */ /**********************************************************/ @@ -139,6 +147,7 @@ struct ta_ras_init_flags { uint8_t dgpu_mode; uint16_t xcc_mask; uint8_t channel_dis_num; + uint8_t nps_mode; }; struct ta_ras_mca_addr { diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 24d49d813607..5a04a6770138 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -283,9 +283,9 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev, } } -static int tonga_ih_early_init(void *handle) +static int tonga_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -297,10 +297,10 @@ static int tonga_ih_early_init(void *handle) return 0; } -static int tonga_ih_sw_init(void *handle) +static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true); if (r) @@ -314,9 +314,9 @@ static int tonga_ih_sw_init(void *handle) return r; } -static int tonga_ih_sw_fini(void *handle) +static int tonga_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -324,10 +324,10 @@ static int tonga_ih_sw_fini(void *handle) return 0; } -static int tonga_ih_hw_init(void *handle) +static int tonga_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = tonga_ih_irq_init(adev); if (r) @@ -336,27 +336,21 @@ static int tonga_ih_hw_init(void *handle) return 0; } -static int tonga_ih_hw_fini(void *handle) +static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - tonga_ih_irq_disable(adev); + tonga_ih_irq_disable(ip_block->adev); return 0; } -static int tonga_ih_suspend(void *handle) +static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return tonga_ih_hw_fini(adev); + return tonga_ih_hw_fini(ip_block); } -static int tonga_ih_resume(void *handle) +static int tonga_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return tonga_ih_hw_init(adev); + return tonga_ih_hw_init(ip_block); } static bool tonga_ih_is_idle(void *handle) @@ -370,11 +364,11 @@ static bool tonga_ih_is_idle(void *handle) return true; } -static int tonga_ih_wait_for_idle(void *handle) +static int tonga_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -386,9 +380,9 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool tonga_ih_check_soft_reset(void *handle) +static bool tonga_ih_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -405,29 +399,27 @@ static bool tonga_ih_check_soft_reset(void *handle) } } -static int tonga_ih_pre_soft_reset(void *handle) +static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->irq.srbm_soft_reset) + if (!ip_block->adev->irq.srbm_soft_reset) return 0; - return tonga_ih_hw_fini(adev); + return tonga_ih_hw_fini(ip_block); } -static int tonga_ih_post_soft_reset(void *handle) +static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->irq.srbm_soft_reset) return 0; - return tonga_ih_hw_init(adev); + return tonga_ih_hw_init(ip_block); } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->irq.srbm_soft_reset) @@ -471,7 +463,6 @@ static int tonga_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs tonga_ih_ip_funcs = { .name = "tonga_ih", .early_init = tonga_ih_early_init, - .late_init = NULL, .sw_init = tonga_ih_sw_init, .sw_fini = tonga_ih_sw_fini, .hw_init = tonga_ih_hw_init, @@ -486,8 +477,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = { .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs tonga_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 805d6662c88b..bdbca25d80c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -531,9 +531,9 @@ static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev) } -static int uvd_v3_1_early_init(void *handle) +static int uvd_v3_1_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v3_1_set_ring_funcs(adev); @@ -542,10 +542,10 @@ static int uvd_v3_1_early_init(void *handle) return 0; } -static int uvd_v3_1_sw_init(void *handle) +static int uvd_v3_1_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; void *ptr; uint32_t ucode_len; @@ -580,10 +580,10 @@ static int uvd_v3_1_sw_init(void *handle) return r; } -static int uvd_v3_1_sw_fini(void *handle) +static int uvd_v3_1_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -621,13 +621,13 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v3_1_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v3_1_hw_init(void *handle) +static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -688,13 +688,13 @@ done: /** * uvd_v3_1_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v3_1_hw_fini(void *handle) +static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -704,17 +704,17 @@ static int uvd_v3_1_hw_fini(void *handle) return 0; } -static int uvd_v3_1_prepare_suspend(void *handle) +static int uvd_v3_1_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v3_1_suspend(void *handle) +static int uvd_v3_1_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -740,23 +740,22 @@ static int uvd_v3_1_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v3_1_hw_fini(adev); + r = uvd_v3_1_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v3_1_resume(void *handle) +static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v3_1_hw_init(adev); + return uvd_v3_1_hw_init(ip_block); } static bool uvd_v3_1_is_idle(void *handle) @@ -766,10 +765,10 @@ static bool uvd_v3_1_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v3_1_wait_for_idle(void *handle) +static int uvd_v3_1_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -778,9 +777,9 @@ static int uvd_v3_1_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v3_1_soft_reset(void *handle) +static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v3_1_stop(adev); @@ -806,7 +805,6 @@ static int uvd_v3_1_set_powergating_state(void *handle, static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .name = "uvd_v3_1", .early_init = uvd_v3_1_early_init, - .late_init = NULL, .sw_init = uvd_v3_1_sw_init, .sw_fini = uvd_v3_1_sw_fini, .hw_init = uvd_v3_1_hw_init, @@ -819,8 +817,6 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .soft_reset = uvd_v3_1_soft_reset, .set_clockgating_state = uvd_v3_1_set_clockgating_state, .set_powergating_state = uvd_v3_1_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version uvd_v3_1_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 3f19c606f4de..a836dc9cfcad 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -90,9 +90,9 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v4_2_early_init(void *handle) +static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v4_2_set_ring_funcs(adev); @@ -101,10 +101,10 @@ static int uvd_v4_2_early_init(void *handle) return 0; } -static int uvd_v4_2_sw_init(void *handle) +static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* UVD TRAP */ @@ -130,10 +130,10 @@ static int uvd_v4_2_sw_init(void *handle) return r; } -static int uvd_v4_2_sw_fini(void *handle) +static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -147,13 +147,13 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v4_2_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v4_2_hw_init(void *handle) +static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -202,13 +202,13 @@ done: /** * uvd_v4_2_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v4_2_hw_fini(void *handle) +static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -218,17 +218,17 @@ static int uvd_v4_2_hw_fini(void *handle) return 0; } -static int uvd_v4_2_prepare_suspend(void *handle) +static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v4_2_suspend(void *handle) +static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -254,23 +254,22 @@ static int uvd_v4_2_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v4_2_hw_fini(adev); + r = uvd_v4_2_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v4_2_resume(void *handle) +static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v4_2_hw_init(adev); + return uvd_v4_2_hw_init(ip_block); } /** @@ -666,10 +665,10 @@ static bool uvd_v4_2_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v4_2_wait_for_idle(void *handle) +static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -678,9 +677,9 @@ static int uvd_v4_2_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v4_2_soft_reset(void *handle) +static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v4_2_stop(adev); @@ -756,7 +755,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .name = "uvd_v4_2", .early_init = uvd_v4_2_early_init, - .late_init = NULL, .sw_init = uvd_v4_2_sw_init, .sw_fini = uvd_v4_2_sw_fini, .hw_init = uvd_v4_2_hw_init, @@ -769,8 +767,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .soft_reset = uvd_v4_2_soft_reset, .set_clockgating_state = uvd_v4_2_set_clockgating_state, .set_powergating_state = uvd_v4_2_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index efd903c21d48..ab55fae3569e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -88,9 +88,9 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v5_0_early_init(void *handle) +static int uvd_v5_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v5_0_set_ring_funcs(adev); @@ -99,10 +99,10 @@ static int uvd_v5_0_early_init(void *handle) return 0; } -static int uvd_v5_0_sw_init(void *handle) +static int uvd_v5_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* UVD TRAP */ @@ -128,10 +128,10 @@ static int uvd_v5_0_sw_init(void *handle) return r; } -static int uvd_v5_0_sw_fini(void *handle) +static int uvd_v5_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -143,13 +143,13 @@ static int uvd_v5_0_sw_fini(void *handle) /** * uvd_v5_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v5_0_hw_init(void *handle) +static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -200,13 +200,13 @@ done: /** * uvd_v5_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v5_0_hw_fini(void *handle) +static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -216,17 +216,17 @@ static int uvd_v5_0_hw_fini(void *handle) return 0; } -static int uvd_v5_0_prepare_suspend(void *handle) +static int uvd_v5_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v5_0_suspend(void *handle) +static int uvd_v5_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -252,23 +252,22 @@ static int uvd_v5_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v5_0_hw_fini(adev); + r = uvd_v5_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v5_0_resume(void *handle) +static int uvd_v5_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v5_0_hw_init(adev); + return uvd_v5_0_hw_init(ip_block); } /** @@ -588,10 +587,10 @@ static bool uvd_v5_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v5_0_wait_for_idle(void *handle) +static int uvd_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -600,9 +599,9 @@ static int uvd_v5_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v5_0_soft_reset(void *handle) +static int uvd_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v5_0_stop(adev); @@ -796,10 +795,15 @@ static int uvd_v5_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + if (!ip_block) + return -EINVAL; if (enable) { /* wait for STATUS to clear */ - if (uvd_v5_0_wait_for_idle(handle)) + if (uvd_v5_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v5_0_enable_clock_gating(adev, true); @@ -863,7 +867,6 @@ out: static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .name = "uvd_v5_0", .early_init = uvd_v5_0_early_init, - .late_init = NULL, .sw_init = uvd_v5_0_sw_init, .sw_fini = uvd_v5_0_sw_fini, .hw_init = uvd_v5_0_hw_init, @@ -877,8 +880,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, .get_clockgating_state = uvd_v5_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 495de5068455..39f8c3d3a135 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -354,9 +354,9 @@ error: return r; } -static int uvd_v6_0_early_init(void *handle) +static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && @@ -375,11 +375,11 @@ static int uvd_v6_0_early_init(void *handle) return 0; } -static int uvd_v6_0_sw_init(void *handle) +static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* UVD TRAP */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); @@ -435,10 +435,10 @@ static int uvd_v6_0_sw_init(void *handle) return r; } -static int uvd_v6_0_sw_fini(void *handle) +static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -455,13 +455,13 @@ static int uvd_v6_0_sw_fini(void *handle) /** * uvd_v6_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v6_0_hw_init(void *handle) +static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -524,13 +524,13 @@ done: /** * uvd_v6_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v6_0_hw_fini(void *handle) +static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -540,17 +540,17 @@ static int uvd_v6_0_hw_fini(void *handle) return 0; } -static int uvd_v6_0_prepare_suspend(void *handle) +static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v6_0_suspend(void *handle) +static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -576,23 +576,22 @@ static int uvd_v6_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v6_0_hw_fini(adev); + r = uvd_v6_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v6_0_resume(void *handle) +static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v6_0_hw_init(adev); + return uvd_v6_0_hw_init(ip_block); } /** @@ -1151,22 +1150,22 @@ static bool uvd_v6_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v6_0_wait_for_idle(void *handle) +static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v6_0_is_idle(handle)) + if (uvd_v6_0_is_idle(adev)) return 0; } return -ETIMEDOUT; } #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v6_0_check_soft_reset(void *handle) +static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1184,9 +1183,9 @@ static bool uvd_v6_0_check_soft_reset(void *handle) } } -static int uvd_v6_0_pre_soft_reset(void *handle) +static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->uvd.inst->srbm_soft_reset) return 0; @@ -1195,9 +1194,9 @@ static int uvd_v6_0_pre_soft_reset(void *handle) return 0; } -static int uvd_v6_0_soft_reset(void *handle) +static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->uvd.inst->srbm_soft_reset) @@ -1226,9 +1225,9 @@ static int uvd_v6_0_soft_reset(void *handle) return 0; } -static int uvd_v6_0_post_soft_reset(void *handle) +static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->uvd.inst->srbm_soft_reset) return 0; @@ -1455,11 +1454,16 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ip_block *ip_block; bool enable = (state == AMD_CG_STATE_GATE); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + if (!ip_block) + return -EINVAL; + if (enable) { /* wait for STATUS to clear */ - if (uvd_v6_0_wait_for_idle(handle)) + if (uvd_v6_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v6_0_enable_clock_gating(adev, true); /* enable HW gates because UVD is idle */ @@ -1528,7 +1532,6 @@ out: static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .name = "uvd_v6_0", .early_init = uvd_v6_0_early_init, - .late_init = NULL, .sw_init = uvd_v6_0_sw_init, .sw_fini = uvd_v6_0_sw_fini, .hw_init = uvd_v6_0_hw_init, @@ -1545,8 +1548,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, .get_clockgating_state = uvd_v6_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 6068b784dc69..079131aeb2f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -361,9 +361,9 @@ error: return r; } -static int uvd_v7_0_early_init(void *handle) +static int uvd_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->asic_type == CHIP_VEGA20) { u32 harvest; @@ -395,12 +395,12 @@ static int uvd_v7_0_early_init(void *handle) return 0; } -static int uvd_v7_0_sw_init(void *handle) +static int uvd_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->uvd.num_uvd_inst; j++) { if (adev->uvd.harvest_config & (1 << j)) @@ -487,10 +487,10 @@ static int uvd_v7_0_sw_init(void *handle) return r; } -static int uvd_v7_0_sw_fini(void *handle) +static int uvd_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i, j, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_virt_free_mm_table(adev); @@ -510,13 +510,13 @@ static int uvd_v7_0_sw_fini(void *handle) /** * uvd_v7_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v7_0_hw_init(void *handle) +static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; uint32_t tmp; int i, j, r; @@ -588,13 +588,13 @@ done: /** * uvd_v7_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v7_0_hw_fini(void *handle) +static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -608,17 +608,17 @@ static int uvd_v7_0_hw_fini(void *handle) return 0; } -static int uvd_v7_0_prepare_suspend(void *handle) +static int uvd_v7_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v7_0_suspend(void *handle) +static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -644,23 +644,22 @@ static int uvd_v7_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v7_0_hw_fini(adev); + r = uvd_v7_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v7_0_resume(void *handle) +static int uvd_v7_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v7_0_hw_init(adev); + return uvd_v7_0_hw_init(ip_block); } /** @@ -1463,104 +1462,6 @@ static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -#if 0 -static bool uvd_v7_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); -} - -static int uvd_v7_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v7_0_is_idle(handle)) - return 0; - } - return -ETIMEDOUT; -} - -#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v7_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - u32 tmp = RREG32(mmSRBM_STATUS); - - if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || - REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || - (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & - AMDGPU_UVD_STATUS_BUSY_MASK)) - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, - SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); - - if (srbm_soft_reset) { - adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->uvd.inst[ring->me].srbm_soft_reset = 0; - return false; - } -} - -static int uvd_v7_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - - uvd_v7_0_stop(adev); - return 0; -} - -static int uvd_v7_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int uvd_v7_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - - mdelay(5); - - return uvd_v7_0_start(adev); -} -#endif - static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -1610,171 +1511,6 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, return 0; } -#if 0 -static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) -{ - uint32_t data, data1, data2, suvd_flags; - - data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); - data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); - data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); - - data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | - UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); - - suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - - data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | - (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | - (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); - - data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | - UVD_CGC_CTRL__UDEC_CM_MODE_MASK | - UVD_CGC_CTRL__UDEC_IT_MODE_MASK | - UVD_CGC_CTRL__UDEC_DB_MODE_MASK | - UVD_CGC_CTRL__UDEC_MP_MODE_MASK | - UVD_CGC_CTRL__SYS_MODE_MASK | - UVD_CGC_CTRL__UDEC_MODE_MASK | - UVD_CGC_CTRL__MPEG2_MODE_MASK | - UVD_CGC_CTRL__REGS_MODE_MASK | - UVD_CGC_CTRL__RBC_MODE_MASK | - UVD_CGC_CTRL__LMI_MC_MODE_MASK | - UVD_CGC_CTRL__LMI_UMC_MODE_MASK | - UVD_CGC_CTRL__IDCT_MODE_MASK | - UVD_CGC_CTRL__MPRD_MODE_MASK | - UVD_CGC_CTRL__MPC_MODE_MASK | - UVD_CGC_CTRL__LBSI_MODE_MASK | - UVD_CGC_CTRL__LRBBM_MODE_MASK | - UVD_CGC_CTRL__WCB_MODE_MASK | - UVD_CGC_CTRL__VCPU_MODE_MASK | - UVD_CGC_CTRL__JPEG_MODE_MASK | - UVD_CGC_CTRL__JPEG2_MODE_MASK | - UVD_CGC_CTRL__SCPU_MODE_MASK); - data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | - UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | - UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | - UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | - UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); - data1 |= suvd_flags; - - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); -} - -static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) -{ - uint32_t data, data1, cgc_flags, suvd_flags; - - data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); - data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); - - cgc_flags = UVD_CGC_GATE__SYS_MASK | - UVD_CGC_GATE__UDEC_MASK | - UVD_CGC_GATE__MPEG2_MASK | - UVD_CGC_GATE__RBC_MASK | - UVD_CGC_GATE__LMI_MC_MASK | - UVD_CGC_GATE__IDCT_MASK | - UVD_CGC_GATE__MPRD_MASK | - UVD_CGC_GATE__MPC_MASK | - UVD_CGC_GATE__LBSI_MASK | - UVD_CGC_GATE__LRBBM_MASK | - UVD_CGC_GATE__UDEC_RE_MASK | - UVD_CGC_GATE__UDEC_CM_MASK | - UVD_CGC_GATE__UDEC_IT_MASK | - UVD_CGC_GATE__UDEC_DB_MASK | - UVD_CGC_GATE__UDEC_MP_MASK | - UVD_CGC_GATE__WCB_MASK | - UVD_CGC_GATE__VCPU_MASK | - UVD_CGC_GATE__SCPU_MASK | - UVD_CGC_GATE__JPEG_MASK | - UVD_CGC_GATE__JPEG2_MASK; - - suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - - data |= cgc_flags; - data1 |= suvd_flags; - - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); -} - -static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | - GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); - else - tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | - GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - - -static int uvd_v7_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); - - uvd_v7_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - - if (enable) { - /* disable HW gating and enable Sw gating */ - uvd_v7_0_set_sw_clock_gating(adev); - } else { - /* wait for STATUS to clear */ - if (uvd_v7_0_wait_for_idle(handle)) - return -EBUSY; - - /* enable HW gates because UVD is idle */ - /* uvd_v7_0_set_hw_clock_gating(adev); */ - } - - return 0; -} - -static int uvd_v7_0_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - /* This doesn't actually powergate the UVD block. - * That's done in the dpm code via the SMC. This - * just re-inits the block as necessary. The actual - * gating still happens in the dpm code. We should - * revisit this when there is a cleaner line between - * the smc and the hw blocks - */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - - WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); - - if (state == AMD_PG_STATE_GATE) { - uvd_v7_0_stop(adev); - return 0; - } else { - return uvd_v7_0_start(adev); - } -} -#endif - static int uvd_v7_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1785,7 +1521,6 @@ static int uvd_v7_0_set_clockgating_state(void *handle, const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .name = "uvd_v7_0", .early_init = uvd_v7_0_early_init, - .late_init = NULL, .sw_init = uvd_v7_0_sw_init, .sw_fini = uvd_v7_0_sw_fini, .hw_init = uvd_v7_0_hw_init, @@ -1793,12 +1528,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .prepare_suspend = uvd_v7_0_prepare_suspend, .suspend = uvd_v7_0_suspend, .resume = uvd_v7_0_resume, - .is_idle = NULL /* uvd_v7_0_is_idle */, - .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */, - .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */, - .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */, - .soft_reset = NULL /* uvd_v7_0_soft_reset */, - .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */, .set_clockgating_state = uvd_v7_0_set_clockgating_state, .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 66fada199bda..c1ed91b39415 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -208,13 +208,13 @@ static bool vce_v2_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); } -static int vce_v2_0_wait_for_idle(void *handle) +static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (vce_v2_0_is_idle(handle)) + if (vce_v2_0_is_idle(adev)) return 0; } return -ETIMEDOUT; @@ -274,15 +274,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev) static int vce_v2_0_stop(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; int i; int status; + if (vce_v2_0_lmi_clean(adev)) { DRM_INFO("vce is not idle \n"); return 0; } - if (vce_v2_0_wait_for_idle(adev)) { + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN); + if (!ip_block) + return -EINVAL; + + if (vce_v2_0_wait_for_idle(ip_block)) { DRM_INFO("VCE is busy, Can't set clock gating"); return 0; } @@ -398,9 +404,9 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, } } -static int vce_v2_0_early_init(void *handle) +static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vce.num_rings = 2; @@ -410,11 +416,11 @@ static int vce_v2_0_early_init(void *handle) return 0; } -static int vce_v2_0_sw_init(void *handle) +static int vce_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* VCE */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq); @@ -444,10 +450,10 @@ static int vce_v2_0_sw_init(void *handle) return r; } -static int vce_v2_0_sw_fini(void *handle) +static int vce_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vce_suspend(adev); if (r) @@ -456,10 +462,10 @@ static int vce_v2_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v2_0_hw_init(void *handle) +static int vce_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_asic_set_vce_clocks(adev, 10000, 10000); vce_v2_0_enable_mgcg(adev, true, false); @@ -475,19 +481,17 @@ static int vce_v2_0_hw_init(void *handle) return 0; } -static int vce_v2_0_hw_fini(void *handle) +static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cancel_delayed_work_sync(&adev->vce.idle_work); + cancel_delayed_work_sync(&ip_block->adev->vce.idle_work); return 0; } -static int vce_v2_0_suspend(void *handle) +static int vce_v2_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* @@ -513,28 +517,27 @@ static int vce_v2_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v2_0_hw_fini(adev); + r = vce_v2_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v2_0_resume(void *handle) +static int vce_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vce_resume(adev); + r = amdgpu_vce_resume(ip_block->adev); if (r) return r; - return vce_v2_0_hw_init(adev); + return vce_v2_0_hw_init(ip_block); } -static int vce_v2_0_soft_reset(void *handle) +static int vce_v2_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); @@ -614,7 +617,6 @@ static int vce_v2_0_set_powergating_state(void *handle, static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .name = "vce_v2_0", .early_init = vce_v2_0_early_init, - .late_init = NULL, .sw_init = vce_v2_0_sw_init, .sw_fini = vce_v2_0_sw_fini, .hw_init = vce_v2_0_hw_init, @@ -626,8 +628,6 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .soft_reset = vce_v2_0_soft_reset, .set_clockgating_state = vce_v2_0_set_clockgating_state, .set_powergating_state = vce_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 4bfba2931b08..6bb318a06f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -64,7 +64,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); -static int vce_v3_0_wait_for_idle(void *handle); +static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block); static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state); /** @@ -396,9 +396,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) } } -static int vce_v3_0_early_init(void *handle) +static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); @@ -415,9 +415,9 @@ static int vce_v3_0_early_init(void *handle) return 0; } -static int vce_v3_0_sw_init(void *handle) +static int vce_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r, i; @@ -453,10 +453,10 @@ static int vce_v3_0_sw_init(void *handle) return r; } -static int vce_v3_0_sw_fini(void *handle) +static int vce_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vce_suspend(adev); if (r) @@ -465,10 +465,10 @@ static int vce_v3_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v3_0_hw_init(void *handle) +static int vce_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vce_v3_0_override_vce_clock_gating(adev, true); @@ -485,14 +485,14 @@ static int vce_v3_0_hw_init(void *handle) return 0; } -static int vce_v3_0_hw_fini(void *handle) +static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vce.idle_work); - r = vce_v3_0_wait_for_idle(handle); + r = vce_v3_0_wait_for_idle(ip_block); if (r) return r; @@ -500,10 +500,10 @@ static int vce_v3_0_hw_fini(void *handle) return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); } -static int vce_v3_0_suspend(void *handle) +static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -528,23 +528,22 @@ static int vce_v3_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v3_0_hw_fini(adev); + r = vce_v3_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v3_0_resume(void *handle) +static int vce_v3_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vce_resume(adev); + r = amdgpu_vce_resume(ip_block->adev); if (r) return r; - return vce_v3_0_hw_init(adev); + return vce_v3_0_hw_init(ip_block); } static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) @@ -609,13 +608,13 @@ static bool vce_v3_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS2) & mask); } -static int vce_v3_0_wait_for_idle(void *handle) +static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) - if (vce_v3_0_is_idle(handle)) + if (vce_v3_0_is_idle(adev)) return 0; return -ETIMEDOUT; @@ -627,9 +626,9 @@ static int vce_v3_0_wait_for_idle(void *handle) #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) -static bool vce_v3_0_check_soft_reset(void *handle) +static bool vce_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; /* According to VCE team , we should use VCE_STATUS instead @@ -668,9 +667,9 @@ static bool vce_v3_0_check_soft_reset(void *handle) } } -static int vce_v3_0_soft_reset(void *handle) +static int vce_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->vce.srbm_soft_reset) @@ -699,29 +698,29 @@ static int vce_v3_0_soft_reset(void *handle) return 0; } -static int vce_v3_0_pre_soft_reset(void *handle) +static int vce_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); - return vce_v3_0_suspend(adev); + return vce_v3_0_suspend(ip_block); } -static int vce_v3_0_post_soft_reset(void *handle) +static int vce_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); - return vce_v3_0_resume(adev); + return vce_v3_0_resume(ip_block); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -897,7 +896,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .name = "vce_v3_0", .early_init = vce_v3_0_early_init, - .late_init = NULL, .sw_init = vce_v3_0_sw_init, .sw_fini = vce_v3_0_sw_fini, .hw_init = vce_v3_0_hw_init, @@ -913,8 +911,6 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, .get_clockgating_state = vce_v3_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 0748bf44c880..79ee555768a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -407,9 +407,9 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) return 0; } -static int vce_v4_0_early_init(void *handle) +static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */ adev->vce.num_rings = 1; @@ -422,9 +422,9 @@ static int vce_v4_0_early_init(void *handle) return 0; } -static int vce_v4_0_sw_init(void *handle) +static int vce_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; unsigned size; @@ -493,10 +493,10 @@ static int vce_v4_0_sw_init(void *handle) return r; } -static int vce_v4_0_sw_fini(void *handle) +static int vce_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* free MM table */ amdgpu_virt_free_mm_table(adev); @@ -513,10 +513,10 @@ static int vce_v4_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v4_0_hw_init(void *handle) +static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) r = vce_v4_0_sriov_start(adev); @@ -536,14 +536,14 @@ static int vce_v4_0_hw_init(void *handle) return 0; } -static int vce_v4_0_hw_fini(void *handle) +static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vce.idle_work); if (!amdgpu_sriov_vf(adev)) { - /* vce_v4_0_wait_for_idle(handle); */ + /* vce_v4_0_wait_for_idle(ip_block); */ vce_v4_0_stop(adev); } else { /* full access mode, so don't touch any VCE register */ @@ -553,9 +553,9 @@ static int vce_v4_0_hw_fini(void *handle) return 0; } -static int vce_v4_0_suspend(void *handle) +static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -594,16 +594,16 @@ static int vce_v4_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v4_0_hw_fini(adev); + r = vce_v4_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v4_0_resume(void *handle) +static int vce_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -624,7 +624,7 @@ static int vce_v4_0_resume(void *handle) return r; } - return vce_v4_0_hw_init(adev); + return vce_v4_0_hw_init(ip_block); } static void vce_v4_0_mc_resume(struct amdgpu_device *adev) @@ -691,273 +691,6 @@ static int vce_v4_0_set_clockgating_state(void *handle, return 0; } -#if 0 -static bool vce_v4_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; - - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; - - return !(RREG32(mmSRBM_STATUS2) & mask); -} - -static int vce_v4_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) - if (vce_v4_0_is_idle(handle)) - return 0; - - return -ETIMEDOUT; -} - -#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ -#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ - VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) - -static bool vce_v4_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - - /* According to VCE team , we should use VCE_STATUS instead - * SRBM_STATUS.VCE_BUSY bit for busy status checking. - * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE - * instance's registers are accessed - * (0 for 1st instance, 10 for 2nd instance). - * - *VCE_STATUS - *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | - *|----+----+-----------+----+----+----+----------+---------+----| - *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| - * - * VCE team suggest use bit 3--bit 6 for busy status check - */ - mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - if (srbm_soft_reset) { - adev->vce.srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->vce.srbm_soft_reset = 0; - return false; - } -} - -static int vce_v4_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->vce.srbm_soft_reset) - return 0; - srbm_soft_reset = adev->vce.srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int vce_v4_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_suspend(adev); -} - - -static int vce_v4_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_resume(adev); -} - -static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) -{ - u32 tmp, data; - - tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL)); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data); -} - -static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, - bool gated) -{ - u32 data; - - /* Set Override to disable Clock Gating */ - vce_v4_0_override_vce_clock_gating(adev, true); - - /* This function enables MGCG which is controlled by firmware. - With the clocks in the gated state the core is still - accessible but the firmware will throttle the clocks on the - fly as necessary. - */ - if (gated) { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data |= 0x1ff; - data &= ~0xef0000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0x3ff000; - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x2; - data &= ~0x00010000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data |= 0x37f; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } else { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data &= ~0x80010; - data |= 0xe70008; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x10000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } - vce_v4_0_override_vce_clock_gating(adev, false); -} - -static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - -static int vce_v4_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); - int i; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_TONGA) || - (adev->asic_type == CHIP_FIJI)) - vce_v4_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) - return 0; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < 2; i++) { - /* Program VCE Instance 0 or 1 if not harvested */ - if (adev->vce.harvest_config & (1 << i)) - continue; - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); - - if (enable) { - /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ - uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data); - - /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data); - } - - vce_v4_0_set_vce_sw_clock_gating(adev, enable); - } - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} -#endif - static int vce_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state) { @@ -1076,19 +809,12 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev, const struct amd_ip_funcs vce_v4_0_ip_funcs = { .name = "vce_v4_0", .early_init = vce_v4_0_early_init, - .late_init = NULL, .sw_init = vce_v4_0_sw_init, .sw_fini = vce_v4_0_sw_fini, .hw_init = vce_v4_0_hw_init, .hw_fini = vce_v4_0_hw_fini, .suspend = vce_v4_0_suspend, .resume = vce_v4_0_resume, - .is_idle = NULL /* vce_v4_0_is_idle */, - .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */, - .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */, - .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */, - .soft_reset = NULL /* vce_v4_0_soft_reset */, - .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, .set_clockgating_state = vce_v4_0_set_clockgating_state, .set_powergating_state = vce_v4_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index ecdfbfefd66a..10e99c926fb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -95,14 +95,14 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); /** * vcn_v1_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v1_0_early_init(void *handle) +static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vcn.num_enc_rings = 2; @@ -110,7 +110,7 @@ static int vcn_v1_0_early_init(void *handle) vcn_v1_0_set_enc_ring_funcs(adev); vcn_v1_0_set_irq_funcs(adev); - jpeg_v1_0_early_init(handle); + jpeg_v1_0_early_init(ip_block); return amdgpu_vcn_early_init(adev); } @@ -118,17 +118,17 @@ static int vcn_v1_0_early_init(void *handle) /** * vcn_v1_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v1_0_sw_init(void *handle) +static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -197,7 +197,7 @@ static int vcn_v1_0_sw_init(void *handle) amdgpu_vcn_fwlog_init(adev->vcn.inst); } - r = jpeg_v1_0_sw_init(handle); + r = jpeg_v1_0_sw_init(ip_block); /* Allocate memory for VCN IP Dump buffer */ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); @@ -213,20 +213,20 @@ static int vcn_v1_0_sw_init(void *handle) /** * vcn_v1_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v1_0_sw_fini(void *handle) +static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vcn_suspend(adev); if (r) return r; - jpeg_v1_0_sw_fini(handle); + jpeg_v1_0_sw_fini(ip_block); r = amdgpu_vcn_sw_fini(adev); @@ -238,13 +238,13 @@ static int vcn_v1_0_sw_fini(void *handle) /** * vcn_v1_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v1_0_hw_init(void *handle) +static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; @@ -268,13 +268,13 @@ static int vcn_v1_0_hw_init(void *handle) /** * vcn_v1_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v1_0_hw_fini(void *handle) +static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -290,14 +290,14 @@ static int vcn_v1_0_hw_fini(void *handle) /** * vcn_v1_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v1_0_suspend(void *handle) +static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool idle_work_unexecuted; idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -306,7 +306,7 @@ static int vcn_v1_0_suspend(void *handle) amdgpu_dpm_enable_uvd(adev, false); } - r = vcn_v1_0_hw_fini(adev); + r = vcn_v1_0_hw_fini(ip_block); if (r) return r; @@ -318,20 +318,19 @@ static int vcn_v1_0_suspend(void *handle) /** * vcn_v1_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v1_0_resume(void *handle) +static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v1_0_hw_init(adev); + r = vcn_v1_0_hw_init(ip_block); return r; } @@ -1384,9 +1383,9 @@ static bool vcn_v1_0_is_idle(void *handle) return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } -static int vcn_v1_0_wait_for_idle(void *handle) +static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, @@ -1925,9 +1924,9 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring) mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround); } -static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); uint32_t inst_off, is_powered; @@ -1957,9 +1956,9 @@ static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v1_0_dump_ip_state(void *handle) +static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1988,7 +1987,6 @@ static void vcn_v1_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, - .late_init = NULL, .sw_init = vcn_v1_0_sw_init, .sw_fini = vcn_v1_0_sw_fini, .hw_init = vcn_v1_0_hw_init, @@ -1997,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .resume = vcn_v1_0_resume, .is_idle = vcn_v1_0_is_idle, .wait_for_idle = vcn_v1_0_wait_for_idle, - .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, - .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, - .soft_reset = NULL /* vcn_v1_0_soft_reset */, - .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, .set_powergating_state = vcn_v1_0_set_powergating_state, .dump_ip_state = vcn_v1_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index bfd067e2d2f1..e0322cbca3ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -100,14 +100,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v2_0_early_init(void *handle) +static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) adev->vcn.num_enc_rings = 1; @@ -124,17 +124,17 @@ static int vcn_v2_0_early_init(void *handle) /** * vcn_v2_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v2_0_sw_init(void *handle) +static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ @@ -237,14 +237,14 @@ static int vcn_v2_0_sw_init(void *handle) /** * vcn_v2_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v2_0_sw_fini(void *handle) +static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -268,13 +268,13 @@ static int vcn_v2_0_sw_fini(void *handle) /** * vcn_v2_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v2_0_hw_init(void *handle) +static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; @@ -305,13 +305,13 @@ static int vcn_v2_0_hw_init(void *handle) /** * vcn_v2_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v2_0_hw_fini(void *handle) +static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -326,20 +326,19 @@ static int vcn_v2_0_hw_fini(void *handle) /** * vcn_v2_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v2_0_suspend(void *handle) +static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v2_0_hw_fini(adev); + r = vcn_v2_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -347,20 +346,19 @@ static int vcn_v2_0_suspend(void *handle) /** * vcn_v2_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v2_0_resume(void *handle) +static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v2_0_hw_init(adev); + r = vcn_v2_0_hw_init(ip_block); return r; } @@ -1326,9 +1324,9 @@ static bool vcn_v2_0_is_idle(void *handle) return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } -static int vcn_v2_0_wait_for_idle(void *handle) +static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, @@ -2034,9 +2032,9 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); } -static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); uint32_t inst_off, is_powered; @@ -2066,9 +2064,9 @@ static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v2_0_dump_ip_state(void *handle) +static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2097,7 +2095,6 @@ static void vcn_v2_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, - .late_init = NULL, .sw_init = vcn_v2_0_sw_init, .sw_fini = vcn_v2_0_sw_fini, .hw_init = vcn_v2_0_hw_init, @@ -2106,10 +2103,6 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .resume = vcn_v2_0_resume, .is_idle = vcn_v2_0_is_idle, .wait_for_idle = vcn_v2_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_0_set_clockgating_state, .set_powergating_state = vcn_v2_0_set_powergating_state, .dump_ip_state = vcn_v2_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 04e9e806e318..6aa08281d094 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -110,14 +110,14 @@ static int amdgpu_ih_clientid_vcns[] = { /** * vcn_v2_5_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v2_5_early_init(void *handle) +static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = 2; @@ -151,17 +151,17 @@ static int vcn_v2_5_early_init(void *handle) /** * vcn_v2_5_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v2_5_sw_init(void *handle) +static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { if (adev->vcn.harvest_config & (1 << j)) @@ -295,14 +295,14 @@ static int vcn_v2_5_sw_init(void *handle) /** * vcn_v2_5_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v2_5_sw_fini(void *handle) +static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -333,13 +333,13 @@ static int vcn_v2_5_sw_fini(void *handle) /** * vcn_v2_5_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v2_5_hw_init(void *handle) +static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r = 0; @@ -381,13 +381,13 @@ static int vcn_v2_5_hw_init(void *handle) /** * vcn_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v2_5_hw_fini(void *handle) +static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -411,20 +411,19 @@ static int vcn_v2_5_hw_fini(void *handle) /** * vcn_v2_5_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v2_5_suspend(void *handle) +static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v2_5_hw_fini(adev); + r = vcn_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -432,20 +431,19 @@ static int vcn_v2_5_suspend(void *handle) /** * vcn_v2_5_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v2_5_resume(void *handle) +static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v2_5_hw_init(adev); + r = vcn_v2_5_hw_init(ip_block); return r; } @@ -1786,9 +1784,9 @@ static bool vcn_v2_5_is_idle(void *handle) return ret; } -static int vcn_v2_5_wait_for_idle(void *handle) +static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1926,9 +1924,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); uint32_t inst_off, is_powered; @@ -1958,9 +1956,9 @@ static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v2_5_dump_ip_state(void *handle) +static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1989,7 +1987,6 @@ static void vcn_v2_5_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .name = "vcn_v2_5", .early_init = vcn_v2_5_early_init, - .late_init = NULL, .sw_init = vcn_v2_5_sw_init, .sw_fini = vcn_v2_5_sw_fini, .hw_init = vcn_v2_5_hw_init, @@ -1998,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .resume = vcn_v2_5_resume, .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, @@ -2011,7 +2004,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .name = "vcn_v2_6", .early_init = vcn_v2_5_early_init, - .late_init = NULL, .sw_init = vcn_v2_5_sw_init, .sw_fini = vcn_v2_5_sw_fini, .hw_init = vcn_v2_5_hw_init, @@ -2020,10 +2012,6 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .resume = vcn_v2_5_resume, .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 65dd68b32280..6732ad7f16f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -116,14 +116,14 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v3_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v3_0_early_init(void *handle) +static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; @@ -153,18 +153,18 @@ static int vcn_v3_0_early_init(void *handle) /** * vcn_v3_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v3_0_sw_init(void *handle) +static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; int vcn_doorbell_index = 0; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vcn_sw_init(adev); if (r) @@ -299,13 +299,13 @@ static int vcn_v3_0_sw_init(void *handle) /** * vcn_v3_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v3_0_sw_fini(void *handle) +static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -338,13 +338,13 @@ static int vcn_v3_0_sw_fini(void *handle) /** * vcn_v3_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v3_0_hw_init(void *handle) +static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r; @@ -413,13 +413,13 @@ static int vcn_v3_0_hw_init(void *handle) /** * vcn_v3_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v3_0_hw_fini(void *handle) +static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -443,20 +443,19 @@ static int vcn_v3_0_hw_fini(void *handle) /** * vcn_v3_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v3_0_suspend(void *handle) +static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v3_0_hw_fini(adev); + r = vcn_v3_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -464,20 +463,19 @@ static int vcn_v3_0_suspend(void *handle) /** * vcn_v3_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v3_0_resume(void *handle) +static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v3_0_hw_init(adev); + r = vcn_v3_0_hw_init(ip_block); return r; } @@ -2116,9 +2114,9 @@ static bool vcn_v3_0_is_idle(void *handle) return ret; } -static int vcn_v3_0_wait_for_idle(void *handle) +static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2251,9 +2249,9 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); uint32_t inst_off; @@ -2284,9 +2282,9 @@ static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v3_0_dump_ip_state(void *handle) +static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2315,7 +2313,6 @@ static void vcn_v3_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .name = "vcn_v3_0", .early_init = vcn_v3_0_early_init, - .late_init = NULL, .sw_init = vcn_v3_0_sw_init, .sw_fini = vcn_v3_0_sw_fini, .hw_init = vcn_v3_0_hw_init, @@ -2324,10 +2321,6 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .resume = vcn_v3_0_resume, .is_idle = vcn_v3_0_is_idle, .wait_for_idle = vcn_v3_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_v3_0_set_powergating_state, .dump_ip_state = vcn_v3_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 26c6f10a8c8f..5512259cac79 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -106,14 +106,14 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev); /** * vcn_v4_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v4_0_early_init(void *handle) +static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; if (amdgpu_sriov_vf(adev)) { @@ -164,14 +164,14 @@ static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) /** * vcn_v4_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_sw_init(void *handle) +static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); uint32_t *ptr; @@ -253,13 +253,13 @@ static int vcn_v4_0_sw_init(void *handle) /** * vcn_v4_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_sw_fini(void *handle) +static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -294,13 +294,13 @@ static int vcn_v4_0_sw_fini(void *handle) /** * vcn_v4_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_hw_init(void *handle) +static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -341,13 +341,13 @@ static int vcn_v4_0_hw_init(void *handle) /** * vcn_v4_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_hw_fini(void *handle) +static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -372,20 +372,19 @@ static int vcn_v4_0_hw_fini(void *handle) /** * vcn_v4_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_suspend(void *handle) +static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v4_0_hw_fini(adev); + r = vcn_v4_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -393,20 +392,19 @@ static int vcn_v4_0_suspend(void *handle) /** * vcn_v4_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_resume(void *handle) +static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_hw_init(adev); + r = vcn_v4_0_hw_init(ip_block); return r; } @@ -1975,13 +1973,13 @@ static bool vcn_v4_0_is_idle(void *handle) /** * vcn_v4_0_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_wait_for_idle(void *handle) +static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2158,9 +2156,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); uint32_t inst_off, is_powered; @@ -2190,9 +2188,9 @@ static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_dump_ip_state(void *handle) +static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2222,7 +2220,6 @@ static void vcn_v4_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .name = "vcn_v4_0", .early_init = vcn_v4_0_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_sw_init, .sw_fini = vcn_v4_0_sw_fini, .hw_init = vcn_v4_0_hw_init, @@ -2231,10 +2228,6 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .resume = vcn_v4_0_resume, .is_idle = vcn_v4_0_is_idle, .wait_for_idle = vcn_v4_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_set_clockgating_state, .set_powergating_state = vcn_v4_0_set_powergating_state, .dump_ip_state = vcn_v4_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 0fda70336300..cf808a153fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -95,16 +95,23 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, int inst_idx, bool indirect); + +static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) +{ + return (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))); +} + /** * vcn_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int vcn_v4_0_3_early_init(void *handle) +static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -119,13 +126,13 @@ static int vcn_v4_0_3_early_init(void *handle) /** * vcn_v4_0_3_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_3_sw_init(void *handle) +static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); @@ -212,13 +219,13 @@ static int vcn_v4_0_3_sw_init(void *handle) /** * vcn_v4_0_3_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_3_sw_fini(void *handle) +static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(&adev->ddev, &idx)) { @@ -249,13 +256,13 @@ static int vcn_v4_0_3_sw_fini(void *handle) /** * vcn_v4_0_3_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_3_hw_init(void *handle) +static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; @@ -308,13 +315,13 @@ static int vcn_v4_0_3_hw_init(void *handle) /** * vcn_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_3_hw_fini(void *handle) +static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -327,20 +334,19 @@ static int vcn_v4_0_3_hw_fini(void *handle) /** * vcn_v4_0_3_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_3_suspend(void *handle) +static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = vcn_v4_0_3_hw_fini(adev); + r = vcn_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -348,20 +354,19 @@ static int vcn_v4_0_3_suspend(void *handle) /** * vcn_v4_0_3_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_3_resume(void *handle) +static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_3_hw_init(adev); + r = vcn_v4_0_3_hw_init(ip_block); return r; } @@ -1430,8 +1435,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); @@ -1442,8 +1447,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); @@ -1567,13 +1572,13 @@ static bool vcn_v4_0_3_is_idle(void *handle) /** * vcn_v4_0_3_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_3_wait_for_idle(void *handle) +static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1733,9 +1738,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; } -static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); uint32_t inst_off, is_powered; @@ -1765,9 +1770,9 @@ static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_3_dump_ip_state(void *handle) +static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off, inst_id; @@ -1798,7 +1803,6 @@ static void vcn_v4_0_3_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .name = "vcn_v4_0_3", .early_init = vcn_v4_0_3_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_3_sw_init, .sw_fini = vcn_v4_0_3_sw_fini, .hw_init = vcn_v4_0_3_hw_init, @@ -1807,10 +1811,6 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .resume = vcn_v4_0_3_resume, .is_idle = vcn_v4_0_3_is_idle, .wait_for_idle = vcn_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_v4_0_3_set_powergating_state, .dump_ip_state = vcn_v4_0_3_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 9d4f5352a62c..71961fb3f7ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -104,14 +104,14 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v4_0_5_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v4_0_5_early_init(void *handle) +static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -124,14 +124,14 @@ static int vcn_v4_0_5_early_init(void *handle) /** * vcn_v4_0_5_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_5_sw_init(void *handle) +static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); uint32_t *ptr; @@ -220,13 +220,13 @@ static int vcn_v4_0_5_sw_init(void *handle) /** * vcn_v4_0_5_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_5_sw_fini(void *handle) +static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -261,13 +261,13 @@ static int vcn_v4_0_5_sw_fini(void *handle) /** * vcn_v4_0_5_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_5_hw_init(void *handle) +static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -291,13 +291,13 @@ static int vcn_v4_0_5_hw_init(void *handle) /** * vcn_v4_0_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_5_hw_fini(void *handle) +static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -320,20 +320,19 @@ static int vcn_v4_0_5_hw_fini(void *handle) /** * vcn_v4_0_5_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_5_suspend(void *handle) +static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v4_0_5_hw_fini(adev); + r = vcn_v4_0_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -341,20 +340,19 @@ static int vcn_v4_0_5_suspend(void *handle) /** * vcn_v4_0_5_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_5_resume(void *handle) +static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_5_hw_init(adev); + r = vcn_v4_0_5_hw_init(ip_block); return r; } @@ -1469,13 +1467,13 @@ static bool vcn_v4_0_5_is_idle(void *handle) /** * vcn_v4_0_5_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_5_wait_for_idle(void *handle) +static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1616,9 +1614,9 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); uint32_t inst_off, is_powered; @@ -1648,9 +1646,9 @@ static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_5_dump_ip_state(void *handle) +static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1680,7 +1678,6 @@ static void vcn_v4_0_5_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .name = "vcn_v4_0_5", .early_init = vcn_v4_0_5_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_5_sw_init, .sw_fini = vcn_v4_0_5_sw_fini, .hw_init = vcn_v4_0_5_hw_init, @@ -1689,10 +1686,6 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .resume = vcn_v4_0_5_resume, .is_idle = vcn_v4_0_5_is_idle, .wait_for_idle = vcn_v4_0_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, .set_powergating_state = vcn_v4_0_5_set_powergating_state, .dump_ip_state = vcn_v4_0_5_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index c305386358b4..fe2cc1a80c13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -87,14 +87,14 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v5_0_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v5_0_0_early_init(void *handle) +static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -108,14 +108,14 @@ static int vcn_v5_0_0_early_init(void *handle) /** * vcn_v5_0_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v5_0_0_sw_init(void *handle) +static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); uint32_t *ptr; @@ -187,13 +187,13 @@ static int vcn_v5_0_0_sw_init(void *handle) /** * vcn_v5_0_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v5_0_0_sw_fini(void *handle) +static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -225,13 +225,13 @@ static int vcn_v5_0_0_sw_fini(void *handle) /** * vcn_v5_0_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v5_0_0_hw_init(void *handle) +static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -255,13 +255,13 @@ static int vcn_v5_0_0_hw_init(void *handle) /** * vcn_v5_0_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v5_0_0_hw_fini(void *handle) +static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -284,20 +284,19 @@ static int vcn_v5_0_0_hw_fini(void *handle) /** * vcn_v5_0_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v5_0_0_suspend(void *handle) +static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v5_0_0_hw_fini(adev); + r = vcn_v5_0_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -305,20 +304,19 @@ static int vcn_v5_0_0_suspend(void *handle) /** * vcn_v5_0_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v5_0_0_resume(void *handle) +static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v5_0_0_hw_init(adev); + r = vcn_v5_0_0_hw_init(ip_block); return r; } @@ -1196,13 +1194,13 @@ static bool vcn_v5_0_0_is_idle(void *handle) /** * vcn_v5_0_0_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v5_0_0_wait_for_idle(void *handle) +static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1343,9 +1341,9 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); uint32_t inst_off, is_powered; @@ -1375,9 +1373,9 @@ static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v5_0_dump_ip_state(void *handle) +static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1406,7 +1404,6 @@ static void vcn_v5_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .name = "vcn_v5_0_0", .early_init = vcn_v5_0_0_early_init, - .late_init = NULL, .sw_init = vcn_v5_0_0_sw_init, .sw_fini = vcn_v5_0_0_sw_fini, .hw_init = vcn_v5_0_0_hw_init, @@ -1415,10 +1412,6 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .resume = vcn_v5_0_0_resume, .is_idle = vcn_v5_0_0_is_idle, .wait_for_idle = vcn_v5_0_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, .set_powergating_state = vcn_v5_0_0_set_powergating_state, .dump_ip_state = vcn_v5_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index bf68e18e3824..0fedadd0a6a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -472,18 +472,18 @@ static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs; } -static int vega10_ih_early_init(void *handle) +static int vega10_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vega10_ih_set_interrupt_funcs(adev); vega10_ih_set_self_irq_funcs(adev); return 0; } -static int vega10_ih_sw_init(void *handle) +static int vega10_ih_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -525,43 +525,35 @@ static int vega10_ih_sw_init(void *handle) return r; } -static int vega10_ih_sw_fini(void *handle) +static int vega10_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int vega10_ih_hw_init(void *handle) +static int vega10_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_irq_init(adev); + return vega10_ih_irq_init(ip_block->adev); } -static int vega10_ih_hw_fini(void *handle) +static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - vega10_ih_irq_disable(adev); + vega10_ih_irq_disable(ip_block->adev); return 0; } -static int vega10_ih_suspend(void *handle) +static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_hw_fini(adev); + return vega10_ih_hw_fini(ip_block); } -static int vega10_ih_resume(void *handle) +static int vega10_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_hw_init(adev); + return vega10_ih_hw_init(ip_block); } static bool vega10_ih_is_idle(void *handle) @@ -570,13 +562,13 @@ static bool vega10_ih_is_idle(void *handle) return true; } -static int vega10_ih_wait_for_idle(void *handle) +static int vega10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int vega10_ih_soft_reset(void *handle) +static int vega10_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -633,7 +625,6 @@ static int vega10_ih_set_powergating_state(void *handle, const struct amd_ip_funcs vega10_ih_ip_funcs = { .name = "vega10_ih", .early_init = vega10_ih_early_init, - .late_init = NULL, .sw_init = vega10_ih_sw_init, .sw_fini = vega10_ih_sw_fini, .hw_init = vega10_ih_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index ac439f0565e3..1c9aff742e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); + if (enable) { + /* Unset the CLEAR_OVERFLOW bit to make sure the next step + * is switching the bit from 0 to 1 + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Clear RB_OVERFLOW bit */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + } + /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -526,18 +553,18 @@ static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs; } -static int vega20_ih_early_init(void *handle) +static int vega20_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vega20_ih_set_interrupt_funcs(adev); vega20_ih_set_self_irq_funcs(adev); return 0; } -static int vega20_ih_sw_init(void *handle) +static int vega20_ih_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr = true; int r; @@ -586,19 +613,19 @@ static int vega20_ih_sw_init(void *handle) return r; } -static int vega20_ih_sw_fini(void *handle) +static int vega20_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int vega20_ih_hw_init(void *handle) +static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = vega20_ih_irq_init(adev); if (r) @@ -607,27 +634,21 @@ static int vega20_ih_hw_init(void *handle) return 0; } -static int vega20_ih_hw_fini(void *handle) +static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - vega20_ih_irq_disable(adev); + vega20_ih_irq_disable(ip_block->adev); return 0; } -static int vega20_ih_suspend(void *handle) +static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega20_ih_hw_fini(adev); + return vega20_ih_hw_fini(ip_block); } -static int vega20_ih_resume(void *handle) +static int vega20_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega20_ih_hw_init(adev); + return vega20_ih_hw_init(ip_block); } static bool vega20_ih_is_idle(void *handle) @@ -636,13 +657,13 @@ static bool vega20_ih_is_idle(void *handle) return true; } -static int vega20_ih_wait_for_idle(void *handle) +static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int vega20_ih_soft_reset(void *handle) +static int vega20_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -696,7 +717,6 @@ static int vega20_ih_set_powergating_state(void *handle, const struct amd_ip_funcs vega20_ih_ip_funcs = { .name = "vega20_ih", .early_init = vega20_ih_early_init, - .late_init = NULL, .sw_init = vega20_ih_sw_init, .sw_fini = vega20_ih_sw_fini, .hw_init = vega20_ih_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 792b2eb6bbac..a83505815d39 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1455,9 +1455,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = #define CZ_REV_BRISTOL(rev) \ ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) -static int vi_common_early_init(void *handle) +static int vi_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->flags & AMD_IS_APU) { adev->smc_rreg = &cz_smc_rreg; @@ -1679,9 +1679,9 @@ static int vi_common_early_init(void *handle) return 0; } -static int vi_common_late_init(void *handle) +static int vi_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_vi_mailbox_get_irq(adev); @@ -1689,9 +1689,9 @@ static int vi_common_late_init(void *handle) return 0; } -static int vi_common_sw_init(void *handle) +static int vi_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_vi_mailbox_add_irq_id(adev); @@ -1699,14 +1699,9 @@ static int vi_common_sw_init(void *handle) return 0; } -static int vi_common_sw_fini(void *handle) +static int vi_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int vi_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* move the golden regs per IP block */ vi_init_golden_registers(adev); @@ -1718,9 +1713,9 @@ static int vi_common_hw_init(void *handle) return 0; } -static int vi_common_hw_fini(void *handle) +static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable the doorbell aperture */ vi_enable_doorbell_aperture(adev, false); @@ -1731,18 +1726,14 @@ static int vi_common_hw_fini(void *handle) return 0; } -static int vi_common_suspend(void *handle) +static int vi_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vi_common_hw_fini(adev); + return vi_common_hw_fini(ip_block); } -static int vi_common_resume(void *handle) +static int vi_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vi_common_hw_init(adev); + return vi_common_hw_init(ip_block); } static bool vi_common_is_idle(void *handle) @@ -1750,16 +1741,6 @@ static bool vi_common_is_idle(void *handle) return true; } -static int vi_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int vi_common_soft_reset(void *handle) -{ - return 0; -} - static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable) { @@ -2047,19 +2028,14 @@ static const struct amd_ip_funcs vi_common_ip_funcs = { .early_init = vi_common_early_init, .late_init = vi_common_late_init, .sw_init = vi_common_sw_init, - .sw_fini = vi_common_sw_fini, .hw_init = vi_common_hw_init, .hw_fini = vi_common_hw_fini, .suspend = vi_common_suspend, .resume = vi_common_resume, .is_idle = vi_common_is_idle, - .wait_for_idle = vi_common_wait_for_idle, - .soft_reset = vi_common_soft_reset, .set_clockgating_state = vi_common_set_clockgating_state, .set_powergating_state = vi_common_set_powergating_state, .get_clockgating_state = vi_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version vi_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 3e6b4736a7fe..065d87841459 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -365,7 +365,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, p->pasid, dev->id); - err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, + err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id, NULL, NULL, NULL, &doorbell_offset_in_process); if (err != 0) goto err_create_queue; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 48caecf7e72e..723f1220e1cc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -28,6 +28,7 @@ #include "kfd_topology.h" #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_xgmi.h" /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. * GPU processor ID are expressed with Bit[31]=1. @@ -2329,6 +2330,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, continue; if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; + if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev)) + continue; sub_type_hdr = (typeof(sub_type_hdr))( (char *)sub_type_hdr + sizeof(struct crat_subtype_iolink)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index fad1c8f2bc83..956198da7859 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -534,7 +534,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) kfd->cwsr_isa = cwsr_trap_gfx11_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); } else { - BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE); + BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) + > KFD_CWSR_TMA_OFFSET); kfd->cwsr_isa = cwsr_trap_gfx12_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex); } @@ -1392,6 +1393,13 @@ void kfd_dec_compute_active(struct kfd_node *node) WARN_ONCE(count < 0, "Compute profile ref. count error"); } +static bool kfd_compute_active(struct kfd_node *node) +{ + if (atomic_read(&node->kfd->compute_profile)) + return true; + return false; +} + void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { /* @@ -1485,6 +1493,24 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) return node->dqm->ops.halt(node->dqm); } +bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) +{ + struct kfd_node *node; + + if (!kfd->init_complete) + return false; + + if (node_id >= kfd->num_nodes) { + dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", + node_id, kfd->num_nodes - 1); + return false; + } + + node = kfd->nodes[node_id]; + + return kfd_compute_active(node); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 648f40091aa3..c79fe9069e22 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -202,6 +202,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, int r, queue_type; uint64_t wptr_addr_off; + if (!dqm->sched_running || dqm->sched_halt) + return 0; if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; @@ -270,6 +272,8 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, int r; struct mes_remove_queue_input queue_input; + if (!dqm->sched_running || dqm->sched_halt) + return 0; if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; @@ -292,7 +296,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, return r; } -static int remove_all_queues_mes(struct device_queue_manager *dqm) +static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm) { struct device_process_node *cur; struct device *dev = dqm->dev->adev->dev; @@ -319,6 +323,33 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm) return retval; } +static int add_all_kfd_queues_mes(struct device_queue_manager *dqm) +{ + struct device_process_node *cur; + struct device *dev = dqm->dev->adev->dev; + struct qcm_process_device *qpd; + struct queue *q; + int retval = 0; + + list_for_each_entry(cur, &dqm->queues, list) { + qpd = cur->qpd; + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_active) + continue; + retval = add_queue_mes(dqm, q, qpd); + if (retval) { + dev_err(dev, "%s: Failed to add queue %d for dev %d", + __func__, + q->properties.queue_id, + dqm->dev->id); + return retval; + } + } + } + + return retval; +} + static int suspend_all_queues_mes(struct device_queue_manager *dqm) { struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; @@ -1742,7 +1773,7 @@ static int halt_cpsch(struct device_queue_manager *dqm) KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else - ret = remove_all_queues_mes(dqm); + ret = remove_all_kfd_queues_mes(dqm); } dqm->sched_halt = true; dqm_unlock(dqm); @@ -1768,6 +1799,9 @@ static int unhalt_cpsch(struct device_queue_manager *dqm) ret = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + else + ret = add_all_kfd_queues_mes(dqm); + dqm_unlock(dqm); return ret; @@ -1867,7 +1901,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else - remove_all_queues_mes(dqm); + remove_all_kfd_queues_mes(dqm); dqm->sched_running = false; @@ -2048,7 +2082,7 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, { unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; struct device *dev = dqm->dev->adev->dev; - uint64_t *fence_addr = dqm->fence_addr; + uint64_t *fence_addr = dqm->fence_addr; while (*fence_addr != fence_value) { /* Fatal err detected, this response won't come */ @@ -2254,6 +2288,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, goto out; *dqm->fence_addr = KFD_FENCE_INIT; + mb(); pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr, KFD_FENCE_COMPLETED); /* should be timed out */ @@ -3173,7 +3208,7 @@ struct copy_context_work_handler_workarea { struct kfd_process *p; }; -static void copy_context_work_handler (struct work_struct *work) +static void copy_context_work_handler(struct work_struct *work) { struct copy_context_work_handler_workarea *workarea; struct mqd_manager *mqd_mgr; @@ -3200,6 +3235,9 @@ static void copy_context_work_handler (struct work_struct *work) struct qcm_process_device *qpd = &pdd->qpd; list_for_each_entry(q, &qpd->queues_list, list) { + if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE) + continue; + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; /* We ignore the return value from get_wave_state diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 8ee3d07ffbdf..eacfeb32f35d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -445,14 +445,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", mpages, cpages, migrate.npages); - kfd_smi_event_migration_end(node, p->lead_thread->pid, - start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, node->id, trigger); - svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + 0, node->id, trigger, r); out: if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); @@ -751,14 +750,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(node, p->lead_thread->pid, - start >> PAGE_SHIFT, end >> PAGE_SHIFT, - node->id, 0, trigger); - svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + node->id, 0, trigger, r); out: if (!r && cpages) { mpages = cpages - upages; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 26e48fdc8728..9e5ca0b93b2a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1347,7 +1347,6 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); void pqm_uninit(struct process_queue_manager *pqm); int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_node *dev, - struct file *f, struct queue_properties *properties, unsigned int *qid, const struct kfd_criu_queue_priv_data *q_data, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d4aa843aacfd..87cd52cf4ee9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -271,11 +271,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; int i; - struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES]; + struct kfd_cu_occupancy *cu_occupancy; u32 queue_format; - memset(cu_occupancy, 0x0, sizeof(cu_occupancy)); - pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); dev = pdd->dev; if (dev->kfd2kgd->get_cu_occupancy == NULL) @@ -293,6 +291,10 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) wave_cnt = 0; max_waves_per_cu = 0; + cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL); + if (!cu_occupancy) + return -ENOMEM; + /* * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition. * For AQL queues, because of cooperative dispatch we multiply the wave count @@ -318,6 +320,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; + kfree(cu_occupancy); return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); } @@ -338,8 +341,8 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, attr_sdma); struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; - INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, - kfd_sdma_activity_worker); + INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work, + kfd_sdma_activity_worker); sdma_activity_work_handler.pdd = pdd; sdma_activity_work_handler.sdma_activity_counter = 0; @@ -347,6 +350,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, schedule_work(&sdma_activity_work_handler.sdma_activity_work); flush_work(&sdma_activity_work_handler.sdma_activity_work); + destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work); return snprintf(buffer, PAGE_SIZE, "%llu\n", (sdma_activity_work_handler.sdma_activity_counter)/ @@ -850,8 +854,10 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) goto out; } - /* A prior open of /dev/kfd could have already created the process. */ - process = find_process(thread, false); + /* A prior open of /dev/kfd could have already created the process. + * find_process will increase process kref in this case + */ + process = find_process(thread, true); if (process) { pr_debug("Process already found\n"); } else { @@ -899,8 +905,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) init_waitqueue_head(&process->wait_irq_drain); } out: - if (!IS_ERR(process)) - kref_get(&process->ref); mutex_unlock(&kfd_processes_mutex); mmput(thread->mm); @@ -1186,10 +1190,8 @@ static void kfd_process_ref_release(struct kref *ref) static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) { - int idx = srcu_read_lock(&kfd_processes_srcu); - struct kfd_process *p = find_process_by_mm(mm); - - srcu_read_unlock(&kfd_processes_srcu, idx); + /* This increments p->ref counter if kfd process p exists */ + struct kfd_process *p = kfd_lookup_process_by_mm(mm); return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 01b960b15274..c76db22a1000 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -235,7 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm) static int init_user_queue(struct process_queue_manager *pqm, struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, - struct file *f, unsigned int qid) + unsigned int qid) { int retval; @@ -300,7 +300,6 @@ cleanup: int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_node *dev, - struct file *f, struct queue_properties *properties, unsigned int *qid, const struct kfd_criu_queue_priv_data *q_data, @@ -374,7 +373,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * allocate_sdma_queue() in create_queue() has the * corresponding check logic. */ - retval = init_user_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -395,7 +394,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = init_user_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -1029,8 +1028,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, print_queue_properties(&qp); - ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack, - NULL); + ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL); if (ret) { pr_err("Failed to create new queue err:%d\n", ret); goto exit; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index de8b9abf7afc..9b8169761ec5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -44,7 +44,7 @@ struct kfd_smi_client { bool suser; }; -#define MAX_KFIFO_SIZE 1024 +#define KFD_MAX_KFIFO_SIZE 8192 static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *); static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *); @@ -86,7 +86,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user, struct kfd_smi_client *client = filep->private_data; unsigned char *buf; - size = min_t(size_t, size, MAX_KFIFO_SIZE); + size = min_t(size_t, size, KFD_MAX_KFIFO_SIZE); buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -292,12 +292,13 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, - uint32_t from, uint32_t to, uint32_t trigger) + uint32_t from, uint32_t to, uint32_t trigger, + int error_code) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END, KFD_EVENT_FMT_MIGRATE_END( ktime_get_boottime_ns(), pid, start, end - start, - from, to, trigger)); + from, to, trigger, error_code)); } void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, @@ -354,7 +355,7 @@ int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) return -ENOMEM; INIT_LIST_HEAD(&client->list); - ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL); + ret = kfifo_alloc(&client->fifo, KFD_MAX_KFIFO_SIZE, GFP_KERNEL); if (ret) { kfree(client); return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 85010b8307f8..503bff13d815 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -44,7 +44,8 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, uint32_t trigger); void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, - uint32_t from, uint32_t to, uint32_t trigger); + uint32_t from, uint32_t to, uint32_t trigger, + int error_code); void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger); void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 1893c27746a5..3e2911895c74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3111,8 +3111,6 @@ retry_write_locked: start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); if (prange->actual_loc != 0 || best_loc != 0) { - migration = true; - if (best_loc) { r = svm_migrate_to_vram(prange, best_loc, start, last, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); @@ -3135,7 +3133,9 @@ retry_write_locked: if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", r, svms, start, last); - goto out_unlock_range; + goto out_migrate_fail; + } else { + migration = true; } } @@ -3145,6 +3145,7 @@ retry_write_locked: pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", r, svms, start, last); +out_migrate_fail: kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3871591c9aec..9476e30d6baa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1998,6 +1998,8 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + + dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8d97f17ffe66..f0a6816709ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -320,18 +320,18 @@ static bool dm_is_idle(void *handle) return true; } -static int dm_wait_for_idle(void *handle) +static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; } -static bool dm_check_soft_reset(void *handle) +static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) { return false; } -static int dm_soft_reset(void *handle) +static int dm_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; @@ -968,7 +968,7 @@ static int dm_set_powergating_state(void *handle, } /* Prototypes of private functions */ -static int dm_early_init(void *handle); +static int dm_early_init(struct amdgpu_ip_block *ip_block); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1307,6 +1307,29 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) DRM_INFO("DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); + /* Keeping sanity checks off if + * DCN31 >= 4.0.59.0 + * DCN314 >= 8.0.16.0 + * Otherwise, turn on sanity checks + */ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) + adev->dm.dc->debug.sanity_checks = true; + break; + case IP_VERSION(3, 1, 4): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) + adev->dm.dc->debug.sanity_checks = true; + break; + default: + break; + } + return 0; } @@ -1696,6 +1719,26 @@ dm_allocate_gpu_mem( return da->cpu_ptr; } +void +dm_free_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } + +} + static enum dmub_status dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, enum dmub_gpint_command command_code, @@ -1762,16 +1805,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * /* Send the chunk */ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) - /* No need to free bb here since it shall be done in dm_sw_fini() */ - return NULL; + goto free_bb; } /* Now ask DMUB to copy the bb */ ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); if (ret != DMUB_STATUS_OK) - return NULL; + goto free_bb; return bb; + +free_bb: + dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); + return NULL; + } static enum dmub_ips_disable_type dm_get_default_ips_mode( @@ -1886,7 +1933,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) else init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); } else { - init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) + init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); + else + init_data.flags.gpu_vm_support = + (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; @@ -2115,9 +2166,9 @@ error: return -EINVAL; } -static int amdgpu_dm_early_fini(void *handle) +static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_audio_fini(adev); @@ -2509,9 +2560,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } -static int dm_sw_init(void *handle) +static int dm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->dm.cgs_device = amdgpu_cgs_create_device(adev); @@ -2531,9 +2582,9 @@ static int dm_sw_init(void *handle) return load_dmcu_fw(adev); } -static int dm_sw_fini(void *handle) +static int dm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dal_allocation *da; list_for_each_entry(da, &adev->dm.da_list, list) { @@ -2541,11 +2592,11 @@ static int dm_sw_fini(void *handle) amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); list_del(&da->list); kfree(da); + adev->dm.bb_from_dmub = NULL; break; } } - adev->dm.bb_from_dmub = NULL; kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; @@ -2598,9 +2649,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) return ret; } -static int dm_late_init(void *handle) +static int dm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; @@ -2790,7 +2841,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -2808,9 +2859,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * - Vblank support * - Debug FS entries, if enabled */ -static int dm_hw_init(void *handle) +static int dm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* Create DAL display manager */ @@ -2824,15 +2875,15 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ -static int dm_hw_fini(void *handle) +static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_hpd_fini(adev); @@ -2936,9 +2987,9 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_suspend(void *handle) +static int dm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; @@ -3125,9 +3176,9 @@ cleanup: kfree(bundle); } -static int dm_resume(void *handle) +static int dm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; @@ -3142,8 +3193,7 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j, ret; - bool need_hotplug = false; + int i, r, j; struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { @@ -3332,23 +3382,16 @@ static int dm_resume(void *handle) aconnector->mst_root) continue; - ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); - - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); - if (need_hotplug) - drm_kms_helper_hotplug_event(ddev); - amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -3379,8 +3422,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version dm_ip_block = { @@ -3495,7 +3536,7 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, - aconnector->edid); + aconnector->drm_edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { @@ -3554,18 +3595,19 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { - aconnector->edid = NULL; + aconnector->drm_edid = NULL; if (aconnector->dc_link->aux_mode) { - drm_dp_cec_unset_edid( - &aconnector->dm_dp_aux.aux); + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); } } else { - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; + const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; + + aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); + drm_edid_connector_update(connector, aconnector->drm_edid); if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, + connector->display_info.source_physical_address); } if (!aconnector->timing_requested) { @@ -3576,17 +3618,16 @@ void amdgpu_dm_update_connector_after_detect( "failed to create aconnector->requested_timing\n"); } - drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); update_connector_ext_caps(aconnector); } else { drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); - drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + drm_edid_free(aconnector->drm_edid); + aconnector->drm_edid = NULL; kfree(aconnector->timing_requested); aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ @@ -4622,7 +4663,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { - rc = dc_link_set_backlight_level(link, brightness, 0); + struct set_backlight_level_params backlight_level_params = { 0 }; + + backlight_level_params.backlight_pwm_u16_16 = brightness; + backlight_level_params.transition_time_in_ms = 0; + + rc = dc_link_set_backlight_level(link, &backlight_level_params); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } @@ -5177,15 +5223,20 @@ static ssize_t s3_debug_store(struct device *device, int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); + if (!ip_block) + return -EINVAL; ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { - dm_resume(adev); + dm_resume(ip_block); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else - dm_suspend(adev); + dm_suspend(ip_block); } return ret == 0 ? count : 0; @@ -5257,9 +5308,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) return r; } -static int dm_early_init(void *handle) +static int dm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); @@ -7122,32 +7173,24 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link && dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - aconnector->edid = edid; - + aconnector->drm_edid = drm_edid; /* Update emulated (virtual) sink's EDID */ if (dc_em_sink && dc_link) { + // FIXME: Get rid of drm_edid_raw() + const struct edid *edid = drm_edid_raw(drm_edid); + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); - memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + memmove(dc_em_sink->dc_edid.raw_edid, edid, + (edid->extensions + 1) * EDID_LENGTH); dm_helpers_parse_edid_caps( dc_link, &dc_em_sink->dc_edid, @@ -7177,36 +7220,26 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; + const struct edid *edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - if (drm_detect_hdmi_monitor(edid)) + if (connector->display_info.is_hdmi) init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, @@ -7313,10 +7346,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + uint8_t bpc_limit = 6; if (!dm_state) return NULL; + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + bpc_limit = 8; + do { stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, @@ -7337,11 +7375,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, - dc_result, + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth), dc_status_to_str(dc_result)); dc_stream_release(stream); @@ -7349,10 +7388,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc -= 2; /* lower bpc to retry validation */ } - } while (stream == NULL && requested_bpc >= 6); + } while (stream == NULL && requested_bpc >= bpc_limit); - if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); + if ((dc_result == DC_FAIL_ENC_VALIDATE || + dc_result == DC_EXCEED_DONGLE_CAP) && + !aconnector->force_yuv420_output) { + DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", + __func__, __LINE__); aconnector->force_yuv420_output = true; stream = create_validate_stream_for_sink(aconnector, drm_mode, @@ -7893,16 +7935,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (edid) { + if (drm_edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = - drm_add_edid_modes(connector, edid); + drm_edid_connector_add_modes(connector); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have @@ -7916,10 +7958,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling - * drm_add_edid_modes() and need to be + * drm_edid_connector_add_modes() and need to be * restored here. */ - amdgpu_dm_update_freesync_caps(connector, edid); + amdgpu_dm_update_freesync_caps(connector, drm_edid); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8015,12 +8057,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!(amdgpu_freesync_vid_mode && drm_edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8033,24 +8075,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct drm_encoder *encoder; - struct edid *edid = amdgpu_dm_connector->edid; + const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; struct dc_link_settings *verified_link_cap = &amdgpu_dm_connector->dc_link->verified_link_cap; const struct dc *dc = amdgpu_dm_connector->dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); - if (!drm_edid_is_valid(edid)) { + if (!drm_edid) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); } else { - amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) amdgpu_dm_connector_add_common_modes(encoder, connector); - amdgpu_dm_connector_add_freesync_modes(connector, edid); + amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } amdgpu_dm_fbc_init(connector); @@ -9580,7 +9622,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) + if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); @@ -10124,6 +10166,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); + + trace_amdgpu_dm_atomic_commit_tail_finish(state); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -12024,7 +12068,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, } static void parse_edid_displayid_vrr(struct drm_connector *connector, - struct edid *edid) + const struct edid *edid) { u8 *edid_ext = NULL; int i; @@ -12067,7 +12111,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector, } static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12102,7 +12146,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, + struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12136,7 +12181,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. - * @edid: EDID from monitor + * @drm_edid: DRM EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by @@ -12144,19 +12189,16 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int i = 0; - struct detailed_timing *timing; - struct detailed_non_pixel *data; - struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + const struct edid *edid; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; @@ -12169,13 +12211,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; - if (!edid || !sink) { + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; - connector->display_info.monitor_range.min_vfreq = 0; - connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; @@ -12186,6 +12228,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + /* Some eDP panels only have the refresh rate range info in DisplayID */ if ((connector->display_info.monitor_range.min_vfreq == 0 || connector->display_info.monitor_range.max_vfreq == 0)) @@ -12193,67 +12237,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { - bool edid_check_required = false; - - if (amdgpu_dm_connector->dc_link && - amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { - if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { - amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) - freesync_capable = true; - } else { - edid_check_required = edid->version > 1 || - (edid->version == 1 && - edid->revision > 1); - } - } - - if (edid_check_required) { - for (i = 0; i < 4; i++) { - - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; - - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - - if (edid->revision >= 4) { - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) - connector->display_info.monitor_range.min_vfreq += 255; - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) - connector->display_info.monitor_range.max_vfreq += 255; - } - - amdgpu_dm_connector->min_vfreq = - connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = - connector->display_info.monitor_range.max_vfreq; - - break; - } - - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { - - freesync_capable = true; - } - } + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (vsdb_info.replay_mode) { @@ -12262,12 +12249,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; } - } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 90dfffec33cf..6464a8378387 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -673,7 +673,7 @@ struct amdgpu_dm_connector { /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ - struct edid *edid; + const struct drm_edid *drm_edid; /* shared with amdgpu */ struct amdgpu_hpd hpd; @@ -951,7 +951,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid); + const struct drm_edid *drm_edid); void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); @@ -1004,6 +1004,9 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev, enum dc_gpu_mem_alloc_type type, size_t size, long long *addr); +void dm_free_gpu_mem(struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *addr); bool amdgpu_dm_is_headless(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 288be19db7c1..64a041c2af05 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -35,8 +35,8 @@ #include "amdgpu_dm_trace.h" #include "amdgpu_dm_debugfs.h" -#define HPD_DETECTION_PERIOD_uS 5000000 -#define HPD_DETECTION_TIME_uS 1000 +#define HPD_DETECTION_PERIOD_uS 2000000 +#define HPD_DETECTION_TIME_uS 100000 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) { @@ -154,6 +154,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( amdgpu_dm_psr_enable(vblank_work->stream); if (dm->idle_workqueue && + (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) && dm->dc->idle_optimizations_allowed && dm->idle_workqueue->enable && !dm->idle_workqueue->running) @@ -251,10 +252,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - if (dm->active_vblank_irq_count > 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n"); + if (dm->active_vblank_irq_count > 0) dc_allow_idle_optimizations(dm->dc, false); - } /* * Control PSR based on vblank requirements from OS @@ -272,10 +271,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) vblank_work->acrtc->dm_irq_params.allow_sr_entry); } - if (dm->active_vblank_irq_count == 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n"); + if (dm->active_vblank_irq_count == 0) dc_allow_idle_optimizations(dm->dc, true); - } mutex_unlock(&dm->dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index db56b0aa5454..6a97bb2d9160 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1529,7 +1529,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1543,8 +1542,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1558,10 +1555,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_clock_en); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1719,7 +1715,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1733,8 +1728,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1748,10 +1741,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1907,7 +1899,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1921,8 +1912,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1936,10 +1925,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2091,7 +2079,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2105,8 +2092,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2120,10 +2105,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_bits_per_pixel); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2270,7 +2254,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2284,8 +2267,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2299,10 +2280,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2328,7 +2308,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2342,8 +2321,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2357,10 +2334,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2401,7 +2377,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2415,8 +2390,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2430,10 +2403,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_chunk_size); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2474,7 +2446,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2488,8 +2459,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2503,10 +2472,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_bpg_offset); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index eea317dcbe8c..b0fea0856866 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -23,6 +23,8 @@ * */ +#include <acpi/video.h> + #include <linux/string.h> #include <linux/acpi.h> #include <linux/i2c.h> @@ -642,6 +644,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, // write rc data memmove(rc_data, data, length); ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + if (ret < 0) + goto err; } // write rc offset @@ -650,20 +654,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + if (ret < 0) + goto err; // write rc length rc_length[0] = (unsigned char) length & 0xFF; rc_length[1] = (unsigned char) (length >> 8) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + if (ret < 0) + goto err; // write rc cmd rc_cmd = cmd | 0x80; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); - - if (ret < 0) { - DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); - return false; - } + if (ret < 0) + goto err; // poll until active is 0 for (i = 0; i < 10; i++) { @@ -686,6 +691,10 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; + +err: + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); + return false; } static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) @@ -892,6 +901,60 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) return dp_sink_present; } +static int +dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct drm_connector *connector = data; + struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); + unsigned char start = block * EDID_LENGTH; + void *edid; + int r; + + if (!acpidev) + return -ENODEV; + + /* fetch the entire edid from BIOS */ + r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid); + if (r < 0) { + drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); + return r; + } + if (len > r || start > r || start + len > r) { + r = -EINVAL; + goto cleanup; + } + + memcpy(buf, edid + start, len); + r = 0; + +cleanup: + kfree(edid); + + return r; +} + +static const struct drm_edid * +dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + + if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) + return NULL; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + break; + default: + return NULL; + } + + if (connector->force == DRM_FORCE_OFF) + return NULL; + + return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); +} + enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, @@ -902,7 +965,8 @@ enum dc_edid_status dm_helpers_read_local_edid( struct i2c_adapter *ddc; int retry = 3; enum dc_edid_status edid_status; - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; @@ -913,26 +977,31 @@ enum dc_edid_status dm_helpers_read_local_edid( * do check sum and retry to make sure read correct edid. */ do { - - edid = drm_get_edid(&aconnector->base, ddc); + drm_edid = dm_helpers_read_acpi_edid(aconnector); + if (drm_edid) + drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); + else + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); /* DP Compliance Test 4.2.2.6 */ if (link->aux_mode && connector->edid_corrupt) drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); - if (!edid && connector->edid_corrupt) { + if (!drm_edid && connector->edid_corrupt) { connector->edid_corrupt = false; return EDID_BAD_CHECKSUM; } - if (!edid) + if (!drm_edid) return EDID_NO_RESPONSE; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); /* We don't need the original edid anymore */ - kfree(edid); + drm_edid_free(drm_edid); edid_status = dm_helpers_parse_edid_caps( link, @@ -1055,17 +1124,8 @@ void dm_helpers_free_gpu_mem( void *pvMem) { struct amdgpu_device *adev = ctx->driver_context; - struct dal_allocation *da; - - /* walk the da list in DM */ - list_for_each_entry(da, &adev->dm.da_list, list) { - if (pvMem == da->cpu_ptr) { - amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); - list_del(&da->list); - kfree(da); - break; - } - } + + dm_free_gpu_mem(adev, type, pvMem); } bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) @@ -1314,4 +1374,4 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream { // TODO return false; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a08e8a0b696c..6e4359490613 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -129,7 +129,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) dc_sink_release(aconnector->dc_sink); } - kfree(aconnector->edid); + drm_edid_free(aconnector->drm_edid); drm_connector_cleanup(connector); drm_dp_mst_put_port_malloc(aconnector->mst_output_port); @@ -182,7 +182,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) dc_sink_release(dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; } @@ -302,16 +302,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector) return drm_add_edid_modes(connector, NULL); - if (!aconnector->edid) { - struct edid *edid; + if (!aconnector->drm_edid) { + const struct drm_edid *drm_edid; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); + drm_edid = drm_dp_mst_edid_read(connector, + &aconnector->mst_root->mst_mgr, + aconnector->mst_output_port); - if (!edid) { + if (!drm_edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, false); - drm_connector_update_edid_property( + drm_edid_connector_update( &aconnector->base, NULL); @@ -345,7 +347,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, true); } @@ -360,10 +362,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + const struct edid *edid; + + edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw() dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, &init_params); if (!dc_sink) { @@ -405,7 +410,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); + connector, aconnector->drm_edid); #if defined(CONFIG_DRM_AMD_DC_FP) if (!validate_dsc_caps_on_connector(aconnector)) @@ -419,10 +424,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } } - drm_connector_update_edid_property( - &aconnector->base, aconnector->edid); + drm_edid_connector_update(&aconnector->base, aconnector->drm_edid); - ret = drm_add_edid_modes(connector, aconnector->edid); + ret = drm_edid_connector_add_modes(connector); return ret; } @@ -500,7 +504,7 @@ dm_dp_mst_detect(struct drm_connector *connector, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; @@ -1120,6 +1124,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int i, k, ret; bool debugfs_overwrite = false; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); @@ -1127,7 +1132,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return PTR_ERR(mst_state); /* Set up params */ - DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count); + DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -1143,6 +1148,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (!aconnector->mst_output_port) continue; + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n", + __func__, __LINE__, stream); + continue; + } + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; @@ -1175,6 +1188,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count); + if (count == 0) { ASSERT(0); return 0; @@ -1302,7 +1317,7 @@ static bool is_dsc_need_re_compute( continue; aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; - if (!aconnector || !aconnector->dsc_aux) + if (!aconnector) continue; stream_on_link[new_stream_on_link_num] = aconnector; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index adc710fe4a45..8d2cf95ae739 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c @@ -78,10 +78,3 @@ void bios_set_scratch_critical_state( uint32_t critial_state = state ? 1 : 0; REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state); } - -uint32_t bios_get_vga_enabled_displays( - struct dc_bios *bios) -{ - return REG_READ(BIOS_SCRATCH_3) & 0XFFFF; -} - diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index e1b4a40a353d..ab162f2fe577 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h @@ -34,7 +34,6 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset, bool bios_is_accelerated_mode(struct dc_bios *bios); void bios_set_scratch_acc_mode_change(struct dc_bios *bios, uint32_t state); void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); -uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index e93df3d6222e..bc123f1884da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -50,12 +50,13 @@ #include "link.h" #include "logger_types.h" + + +#include "yellow_carp_offset.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "yellow_carp_offset.h" - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 29eff386505a..91d872d6d392 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -53,9 +53,6 @@ #include "logger_types.h" -#undef DC_LOGGER -#define DC_LOGGER \ - clk_mgr->base.base.ctx->logger #define MAX_INSTANCE 7 @@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } }; +#undef DC_LOGGER +#define DC_LOGGER \ + clk_mgr->base.base.ctx->logger #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index b46a3afe48ca..b77333817f18 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -55,6 +55,7 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger + #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -132,6 +133,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state * for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dccg *dccg = clk_mgr_internal->dccg; struct pipe_ctx *pipe = safe_to_lower ? &context->res_ctx.pipe_ctx[i] : &dc->current_state->res_ctx.pipe_ctx[i]; @@ -148,8 +151,21 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state * new_pipe->stream_res.stream_enc && new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled && new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc); - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || - !pipe->stream->link_enc) && !stream_changed_otg_dig_on) { + + bool has_active_hpo = false; + + if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) { + has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && + dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe); + + } + + + if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) && + (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) { + + /* This w/a should not trigger when we have a dig active */ if (disable) { if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) @@ -257,11 +273,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 }; int i; - for (i = 0; i < context->stream_count; ++i) { const struct dc_stream_state *stream = context->streams[i]; const struct dc_link *link = stream->link; - uint8_t lowest_dpia_index = 0, hr_index = 0; + uint8_t lowest_dpia_index = 0; + unsigned int hr_index = 0; if (!link) continue; @@ -271,6 +287,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_ continue; hr_index = (link->link_index - lowest_dpia_index) / 2; + if (hr_index >= MAX_HOST_ROUTERS_NUM) + continue; host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing( &stream->timing, dc_link_get_highest_encoding_format(link)); } @@ -975,11 +993,8 @@ static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - bool ips_supported = true; - - ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false; - return ips_supported; + return dcn35_smu_get_ips_supported(clk_mgr) ? true : false; } static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index a88f1b6ea64c..7872c6cabb14 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -621,8 +621,8 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream, * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object * @stream: The stream to configure CRC on. - * @enable: Enable CRC if true, disable otherwise. * @crc_window: CRC window (x/y start/end) information + * @enable: Enable CRC if true, disable otherwise. * @continuous: Capture CRC on every frame if true. Otherwise, only capture * once. * @@ -1157,6 +1157,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) + get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else { if (dc->ctx->dce_version < DCN_VERSION_2_0) color_space_to_black_color( @@ -1233,16 +1235,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) */ if (is_phantom) { if (tg->funcs->enable_crtc) { - int main_pipe_width = 0, main_pipe_height = 0; - struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); - - if (old_paired_stream) { - main_pipe_width = old_paired_stream->dst.width; - main_pipe_height = old_paired_stream->dst.height; - } - - if (dc->hwss.blank_phantom) - dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); + if (dc->hwseq->funcs.blank_pixel_data) + dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); tg->funcs->enable_crtc(tg); } } @@ -1437,6 +1431,7 @@ void dc_hardware_init(struct dc *dc) detect_edp_presence(dc); if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) dc->hwss.init_hw(dc); + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); } void dc_init_callbacks(struct dc *dc, @@ -1876,6 +1871,41 @@ void dc_z10_save_init(struct dc *dc) dc->hwss.z10_save_init(dc); } +/* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory + * Prevents over allocation of DET during unlock process + * e.g. 2 pipe config with different streams with a max of 20 DET segments + * Before: After: + * - Pipe0: 10 DET segments - Pipe0: 12 DET segments + * - Pipe1: 10 DET segments - Pipe1: 8 DET segments + * If Pipe0 gets updated first, 22 DET segments will be allocated + */ +static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) +{ + unsigned int i = 0; + struct pipe_ctx *pipe = NULL; + struct timing_generator *tg = NULL; + + if (!dc->config.set_pipe_unlock_order) + return; + + memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + continue; + } + + if (resource_calculate_det_for_stream(context, pipe) < + resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) { + dc->scratch.pipes_to_unlock_first[i] = true; + } + } +} + /** * dc_commit_state_no_check - Apply context to the hardware * @@ -1974,6 +2004,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; } + determine_pipe_unlock_order(dc, context); /* Program all planes within new context*/ if (dc->res_pool->funcs->prepare_mcache_programming) dc->res_pool->funcs->prepare_mcache_programming(dc, context); @@ -2156,6 +2187,14 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; res = dc_validate_with_context(dc, set, params->stream_count, context, false); + + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + */ + if (res == DC_OK && dc->res_pool->funcs->link_encs_assign) + dc->res_pool->funcs->link_encs_assign( + dc, context, context->streams, context->stream_count); + if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; @@ -2477,41 +2516,35 @@ static enum surface_update_type get_scaling_info_update_type( if (!u->scaling_info) return UPDATE_TYPE_FAST; - if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width + if (u->scaling_info->src_rect.width != u->surface->src_rect.width + || u->scaling_info->src_rect.height != u->surface->src_rect.height + || u->scaling_info->dst_rect.width != u->surface->dst_rect.width || u->scaling_info->dst_rect.height != u->surface->dst_rect.height + || u->scaling_info->clip_rect.width != u->surface->clip_rect.width + || u->scaling_info->clip_rect.height != u->surface->clip_rect.height || u->scaling_info->scaling_quality.integer_scaling != - u->surface->scaling_quality.integer_scaling - ) { + u->surface->scaling_quality.integer_scaling) { update_flags->bits.scaling_change = 1; + if (u->scaling_info->src_rect.width > u->surface->src_rect.width + || u->scaling_info->src_rect.height > u->surface->src_rect.height) + /* Making src rect bigger requires a bandwidth change */ + update_flags->bits.clock_change = 1; + if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) && (u->scaling_info->dst_rect.width < u->surface->src_rect.width || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; - } - - if (u->scaling_info->src_rect.width != u->surface->src_rect.width - || u->scaling_info->src_rect.height != u->surface->src_rect.height) { - update_flags->bits.scaling_change = 1; - if (u->scaling_info->src_rect.width > u->surface->src_rect.width - || u->scaling_info->src_rect.height > u->surface->src_rect.height) - /* Making src rect bigger requires a bandwidth change */ - update_flags->bits.clock_change = 1; + if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && + (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || + u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) + /* Changing clip size of a large surface may result in MPC slice count change */ + update_flags->bits.bandwidth_change = 1; } - if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && - (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || - u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) - /* Changing clip size of a large surface may result in MPC slice count change */ - update_flags->bits.bandwidth_change = 1; - - if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || - u->scaling_info->clip_rect.height != u->surface->clip_rect.height) - update_flags->bits.clip_size_change = 1; - if (u->scaling_info->src_rect.x != u->surface->src_rect.x || u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->clip_rect.x != u->surface->clip_rect.x @@ -2520,13 +2553,13 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) update_flags->bits.position_change = 1; + /* process every update flag before returning */ if (update_flags->bits.clock_change || update_flags->bits.bandwidth_change || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.position_change || - update_flags->bits.clip_size_change) + if (update_flags->bits.position_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -2617,7 +2650,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.lut_3d) { + if (update_flags->bits.lut_3d && + u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -2637,6 +2671,29 @@ static enum surface_update_type det_surface_update(const struct dc *dc, return overall_type; } +/* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't + * while both planes are flip_immediate + */ +static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) +{ + bool has_flip_immediate_plane = false; + int i; + + for (i = 0; i < surface_count; i++) { + if (updates[i].surface->flip_immediate) { + has_flip_immediate_plane = true; + break; + } + } + + if (has_flip_immediate_plane && surface_count > 1) { + for (i = 0; i < surface_count; i++) { + if (updates[i].surface->flip_immediate) + updates[i].surface->update_flags.bits.addr_update = 1; + } + } +} + static enum surface_update_type check_update_surfaces_for_stream( struct dc *dc, struct dc_surface_update *updates, @@ -2699,6 +2756,9 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->scaler_sharpener_update) su_flags->bits.scaler_sharpener = 1; + if (stream_update->sharpening_required) + su_flags->bits.sharpening_required = 1; + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2870,10 +2930,20 @@ static void copy_surface_update_to_plane( sizeof(struct dc_transfer_func_distributed_points)); } - if (srf_update->func_shaper) + if (srf_update->cm2_params) { + surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; + surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; + surface->mcm_luts = srf_update->cm2_params->cm2_luts; + } + + if (srf_update->func_shaper) { memcpy(&surface->in_shaper_func, srf_update->func_shaper, sizeof(surface->in_shaper_func)); + if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER) + surface->mcm_luts.shaper = &surface->in_shaper_func; + } + if (srf_update->lut3d_func) memcpy(&surface->lut3d_func, srf_update->lut3d_func, sizeof(surface->lut3d_func)); @@ -2886,10 +2956,17 @@ static void copy_surface_update_to_plane( surface->sdr_white_level_nits = srf_update->sdr_white_level_nits; - if (srf_update->blend_tf) + if (srf_update->blend_tf) { memcpy(&surface->blend_tf, srf_update->blend_tf, sizeof(surface->blend_tf)); + if (surface->mcm_lut1d_enable) + surface->mcm_luts.lut1d_func = &surface->blend_tf; + } + + if (srf_update->cm2_params || srf_update->blend_tf) + surface->lut_bank_a = !surface->lut_bank_a; + if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; @@ -2901,11 +2978,7 @@ static void copy_surface_update_to_plane( if (srf_update->gamut_remap_matrix) surface->gamut_remap_matrix = *srf_update->gamut_remap_matrix; - if (srf_update->cm2_params) { - surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; - surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; - surface->mcm_luts = srf_update->cm2_params->cm2_luts; - } + if (srf_update->cursor_csc_color_matrix) surface->cursor_csc_color_matrix = *srf_update->cursor_csc_color_matrix; @@ -3037,6 +3110,8 @@ static void copy_stream_update_to_stream(struct dc *dc, } if (update->scaler_sharpener_update) stream->scaler_sharpener_update = *update->scaler_sharpener_update; + if (update->sharpening_required) + stream->sharpening_required = *update->sharpening_required; } static void backup_planes_and_stream_state( @@ -3153,6 +3228,11 @@ static bool update_planes_and_stream_state(struct dc *dc, context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. + * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip + * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. + */ + force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); if (update_type == UPDATE_TYPE_FULL) backup_planes_and_stream_state(&dc->scratch.current_state, stream); @@ -3225,8 +3305,7 @@ static bool update_planes_and_stream_state(struct dc *dc, if (update_type != UPDATE_TYPE_MED) continue; - if (surface->update_flags.bits.clip_size_change || - surface->update_flags.bits.position_change) { + if (surface->update_flags.bits.position_change) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -3625,6 +3704,10 @@ static void commit_planes_for_stream_fast(struct dc *dc, struct pipe_ctx *top_pipe_to_program = NULL; struct dc_stream_status *stream_status = NULL; bool should_offload_fams2_flip = false; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + + if (should_lock_all_pipes) + determine_pipe_unlock_order(dc, context); if (dc->debug.fams2_config.bits.enable && dc->debug.fams2_config.bits.enable_offload_flip && @@ -3677,13 +3760,14 @@ static void commit_planes_for_stream_fast(struct dc *dc, if (!pipe_ctx->plane_state) continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; + pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ pipe_ctx->plane_state->triplebuffer_flips = true; } } @@ -3742,6 +3826,8 @@ static void commit_planes_for_stream(struct dc *dc, bool subvp_curr_use = false; uint8_t current_stream_mask = 0; + if (should_lock_all_pipes) + determine_pipe_unlock_order(dc, context); // Once we apply the new subvp context to hardware it won't be in the // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context @@ -3749,7 +3835,7 @@ static void commit_planes_for_stream(struct dc *dc, dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); - if (update_type == UPDATE_TYPE_FULL) + if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) hwss_process_outstanding_hw_updates(dc, dc->current_state); for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -3776,6 +3862,9 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } + if (update_type == UPDATE_TYPE_FULL) + hwss_wait_for_outstanding_hw_updates(dc, dc->current_state); + top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); @@ -3920,19 +4009,20 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->plane_state) continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ - pipe_ctx->plane_state->triplebuffer_flips = true; + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; } } if (update_type == UPDATE_TYPE_FULL) { /* force vsync flip when reconfiguring pipes to prevent underflow */ plane_state->flip_immediate = false; + plane_state->triplebuffer_flips = false; } } @@ -3953,7 +4043,6 @@ static void commit_planes_for_stream(struct dc *dc, continue; ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( @@ -4028,7 +4117,7 @@ static void commit_planes_for_stream(struct dc *dc, /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { - /*only enable triplebuffer for fast_update*/ + /*only enable triplebuffer for fast_update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } @@ -4777,6 +4866,11 @@ static bool update_planes_and_stream_v1(struct dc *dc, update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. + * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip + * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. + */ + force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); if (update_type >= UPDATE_TYPE_FULL) { @@ -5338,8 +5432,10 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) { - if (dc->debug.disable_idle_power_optimizations) + if (dc->debug.disable_idle_power_optimizations) { + DC_LOG_DEBUG("%s: disabled\n", __func__); return; + } if (allow != dc->idle_optimizations_allowed) DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, @@ -5356,8 +5452,10 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const return; if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && - dc->hwss.apply_idle_power_optimizations(dc, allow)) + dc->hwss.apply_idle_power_optimizations(dc, allow)) { dc->idle_optimizations_allowed = allow; + DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled"); + } } void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) @@ -5999,7 +6097,12 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state { struct dc_power_profile profile = { 0 }; - profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support; + if (!context || !context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) + return profile; + struct dc *dc = context->clk_mgr->ctx->dc; + + if (dc->res_pool->funcs->get_power_profile) + profile.power_level = dc->res_pool->funcs->get_power_profile(context); return profile; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 801cdbc8117d..af1ea5792560 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -46,11 +46,6 @@ DC_LOG_IF_TRACE(__VA_ARGS__); \ } while (0) -#define TIMING_TRACE(...) do {\ - if (dc->debug.timing_trace) \ - DC_LOG_SYNC(__VA_ARGS__); \ -} while (0) - #define CLOCK_TRACE(...) do {\ if (dc->debug.clock_trace) \ DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \ @@ -306,43 +301,6 @@ void post_surface_trace(struct dc *dc) } -void context_timing_trace( - struct dc *dc, - struct resource_context *res_ctx) -{ - int i; - int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; - struct crtc_position position; - unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; - DC_LOGGER_INIT(dc->ctx->logger); - - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - /* get_position() returns CRTC vertical/horizontal counter - * hence not applicable for underlay pipe - */ - if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) - continue; - - pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); - h_pos[i] = position.horizontal_count; - v_pos[i] = position.vertical_count; - } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - - if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) - continue; - - TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", - pipe_ctx->stream_res.tg->inst, - pipe_ctx->stream->timing.h_total, - pipe_ctx->stream->timing.v_total, - h_pos[i], v_pos[i]); - } -} - void context_clock_trace( struct dc *dc, struct dc_state *context) @@ -434,3 +392,43 @@ char *dc_status_to_str(enum dc_status status) return "Unexpected status error"; } + +char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding) +{ + switch (pixel_encoding) { + case PIXEL_ENCODING_RGB: + return "RGB"; + case PIXEL_ENCODING_YCBCR422: + return "YUV422"; + case PIXEL_ENCODING_YCBCR444: + return "YUV444"; + case PIXEL_ENCODING_YCBCR420: + return "YUV420"; + default: + return "Unknown"; + } +} + +char *dc_color_depth_to_str(enum dc_color_depth color_depth) +{ + switch (color_depth) { + case COLOR_DEPTH_666: + return "6-bpc"; + case COLOR_DEPTH_888: + return "8-bpc"; + case COLOR_DEPTH_101010: + return "10-bpc"; + case COLOR_DEPTH_121212: + return "12-bpc"; + case COLOR_DEPTH_141414: + return "14-bpc"; + case COLOR_DEPTH_161616: + return "16-bpc"; + case COLOR_DEPTH_999: + return "9-bpc"; + case COLOR_DEPTH_111111: + return "11-bpc"; + default: + return "Unknown"; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7ee2be8f82c4..0419ee7f22a5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -312,11 +312,11 @@ void get_mpctree_visual_confirm_color( { const struct tg_color pipe_colors[6] = { {MAX_TG_COLOR_VALUE, 0, 0}, /* red */ - {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, /* orange */ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */ {0, MAX_TG_COLOR_VALUE, 0}, /* green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */ - {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, /* purple */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */ }; struct pipe_ctx *top_pipe = pipe_ctx; @@ -497,6 +497,23 @@ void get_mclk_switch_visual_confirm_color( } } +void get_cursor_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + if (pipe_ctx->stream && pipe_ctx->stream->cursor_position.enable) { + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } else { + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + } +} + void set_p_state_switch_method( struct dc *dc, struct dc_state *context, @@ -1071,8 +1088,13 @@ void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_con if (!pipe_ctx->stream) continue; - if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) - pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + /* For full update we must wait for all double buffer updates, not just DRR updates. This + * is particularly important for minimal transitions. Only check for OTG_MASTER pipes, + * as non-OTG Master pipes share the same OTG as + */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && dc->hwss.wait_for_all_pending_updates) { + dc->hwss.wait_for_all_pending_updates(pipe_ctx); + } hubp = pipe_ctx->plane_res.hubp; if (!hubp) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index dfdfe22d9e85..457d60eeb486 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -430,11 +430,10 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, } bool dc_link_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { return link->dc->link_srv->edp_set_backlight_level(link, - backlight_pwm_u16_16, frame_ramp); + backlight_level_params); } bool dc_link_set_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c7599c40d4be..33125b95c3a1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -765,25 +765,6 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } -/* - * This is a preliminary vp size calculation to allow us to check taps support. - * The result is completely overridden afterwards. - */ -static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) -{ - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - - data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width)); - data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height)); - data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width)); - data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height)); - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { - swap(data->viewport.width, data->viewport.height); - swap(data->viewport_c.width, data->viewport_c.height); - } -} - static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { struct rect rec; @@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx); + struct scaling_taps temp = {0}; bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1525,8 +1507,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) calculate_recout(pipe_ctx); /* depends on pixel format */ calculate_scaling_ratios(pipe_ctx); - /* depends on scaling ratios and recout, does not calculate offset yet */ - calculate_viewport_size(pipe_ctx); /* * LB calculations depend on vp size, h/v_active and scaling ratios @@ -1547,6 +1527,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; + // get TAP value with 100x100 dummy data for max scaling qualify, override + // if a new scaling quality required + pipe_ctx->plane_res.scl_data.viewport.width = 100; + pipe_ctx->plane_res.scl_data.viewport.height = 100; + pipe_ctx->plane_res.scl_data.viewport_c.width = 100; + pipe_ctx->plane_res.scl_data.viewport_c.height = 100; + if (pipe_ctx->plane_res.xfm != NULL) + res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( + pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + if (pipe_ctx->plane_res.dpp != NULL) + res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + temp = pipe_ctx->plane_res.scl_data.taps; + + calculate_inits_and_viewports(pipe_ctx); + if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); @@ -1573,11 +1571,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) &plane_state->scaling_quality); } - /* - * Depends on recout, scaling ratios, h_active and taps - * May need to re-check lb size after this in some obscure scenario - */ - if (res) + if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps || + pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps || + pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c || + pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c)) calculate_inits_and_viewports(pipe_ctx); /* @@ -4094,14 +4091,6 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; - /* - * Only update link encoder to stream assignment after bandwidth validation passed. - * TODO: Split out assignment and validation. - */ - if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); - return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 9a406d74c0dd..55dc482d9b36 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -292,7 +292,9 @@ bool dc_stream_set_cursor_attributes( * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ - if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (dc->debug.allow_sw_cursor_fallback && + attributes->height * attributes->width * 4 > 16384 && + !stream->hw_cursor_req) { if (check_subvp_sw_cursor_fallback_req(dc, stream)) return false; } @@ -421,7 +423,6 @@ bool dc_stream_program_cursor_position( /* apply/update visual confirm */ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) { /* update software state */ - uint32_t color_value = MAX_TG_COLOR_VALUE; int i; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -429,15 +430,7 @@ bool dc_stream_program_cursor_position( /* adjust visual confirm color for all pipes with current stream */ if (stream == pipe_ctx->stream) { - if (stream->cursor_position.enable) { - pipe_ctx->visual_confirm_color.color_r_cr = color_value; - pipe_ctx->visual_confirm_color.color_g_y = 0; - pipe_ctx->visual_confirm_color.color_b_cb = 0; - } else { - pipe_ctx->visual_confirm_color.color_r_cr = 0; - pipe_ctx->visual_confirm_color.color_g_y = 0; - pipe_ctx->visual_confirm_color.color_b_cb = color_value; - } + get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); /* programming hardware */ if (pipe_ctx->plane_state) @@ -819,12 +812,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->dst.height, stream->output_color_space); DC_LOG_DC( - "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", + "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n", stream->timing.pix_clk_100hz / 10, stream->timing.h_total, stream->timing.v_total, - stream->timing.pixel_encoding, - stream->timing.display_color_depth); + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth)); DC_LOG_DC( "\tlink: %d\n", stream->link->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3992ad73165b..e143fab00a86 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -55,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.301" +#define DC_VER "3.2.309" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -225,6 +225,11 @@ struct dc_dmub_caps { bool subvp_psr; bool gecc_enable; uint8_t fams_ver; + bool aux_backlight_support; +}; + +struct dc_scl_caps { + bool sharpener_support; }; struct dc_caps { @@ -292,6 +297,7 @@ struct dc_caps { bool sequential_ono; /* Conservative limit for DCC cases which require ODM4:1 to support*/ uint32_t dcc_plane_width_limit; + struct dc_scl_caps scl_caps; }; struct dc_bug_wa { @@ -463,6 +469,7 @@ struct dc_config { unsigned int enable_fpo_flicker_detection; bool disable_hbr_audio_dp2; bool consolidated_dpia_dp_lt; + bool set_pipe_unlock_order; }; enum visual_confirm { @@ -862,7 +869,6 @@ struct dc_debug_options { bool sanity_checks; bool max_disp_clk; bool surface_trace; - bool timing_trace; bool clock_trace; bool validation_trace; bool bandwidth_calcs_trace; @@ -1061,6 +1067,7 @@ struct dc_debug_options { unsigned int sharpen_policy; unsigned int scale_to_sharpness_policy; bool skip_full_updated_if_possible; + unsigned int enable_oled_edp_power_up_opt; }; @@ -1253,7 +1260,6 @@ union surface_update_flags { uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; - uint32_t clip_size_change: 1; uint32_t position_change:1; uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; @@ -1355,6 +1361,7 @@ struct dc_plane_state { enum mpcc_movable_cm_location mcm_location; struct dc_csc_transform cursor_csc_color_matrix; bool adaptive_sharpness_en; + int adaptive_sharpness_policy; int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; @@ -1461,6 +1468,7 @@ struct dc { struct dc_scratch_space current_state; struct dc_scratch_space new_state; struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack + bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */ } scratch; struct dml2_configuration_options dml2_options; @@ -1513,7 +1521,7 @@ struct dc_surface_update { * change cm2_params.component_settings: Full update * change cm2_params.cm2_luts: Fast update */ - struct dc_cm2_parameters *cm2_params; + const struct dc_cm2_parameters *cm2_params; const struct dc_csc_transform *cursor_csc_color_matrix; unsigned int sdr_white_level_nits; }; @@ -1770,7 +1778,6 @@ struct dc_link { bool dongle_mode_timing_override; bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; - bool disable_assr_for_uhbr; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1786,6 +1793,7 @@ struct dc_link { // BW ALLOCATON USB4 ONLY struct dc_dpia_bw_alloc dpia_bw_alloc_config; bool skip_implict_edp_power_control; + enum backlight_control_type backlight_control_type; }; /* Return an enumerated dc_link. @@ -2203,8 +2211,7 @@ void dc_link_edp_panel_backlight_power_on(struct dc_link *link, * and 16 bit fractional, where 1.0 is max backlight value. */ bool dc_link_set_backlight_level(const struct dc_link *dc_link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); /* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */ bool dc_link_set_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 1e7de0f03290..f90fc154549a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -519,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi union dmub_rb_cmd cmd = { 0 }; unsigned int panel_inst = 0; - if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst)) + if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) && + dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) return; memset(&cmd, 0, sizeof(cmd)); @@ -1012,7 +1013,6 @@ static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of @@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) memset(&new_signals, 0, sizeof(new_signals)); + new_signals.bits.allow_idle = 1; /* always set */ + if (dc->config.disable_ips == DMUB_IPS_ENABLE || dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { new_signals.bits.allow_pg = 1; @@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) */ dc_dmub_srv->needs_idle_wake = false; - if (prev_driver_signals.bits.allow_ips2 && + if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { DC_LOG_IPS( @@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) } dc_dmub_srv_notify_idle(dc, false); - if (prev_driver_signals.bits.allow_ips1) { + if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, @@ -1862,3 +1864,81 @@ void dc_dmub_srv_fams2_passthrough_flip( dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); } } + +bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) +{ + bool result; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, + start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + + return result; +} + +void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +{ + uint32_t i; + enum dmub_gpint_command command_code; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + switch (output->ips_mode) { + case DMUB_IPS_MODE_IPS1_MAX: + command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS2: + command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS1_RCG: + command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS1_ONO2_ON: + command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; + break; + default: + command_code = DMUB_GPINT__INVALID_COMMAND; + break; + } + + if (command_code == DMUB_GPINT__INVALID_COMMAND) + return; + + // send gpint commands and wait for ack + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, + (uint16_t)(output->ips_mode), + &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->residency_percent = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, + (uint16_t)(output->ips_mode), + &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->entry_counter = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, + (uint16_t)(output->ips_mode), + &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_active_time_us[0] = 0; + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, + (uint16_t)(output->ips_mode), + &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_active_time_us[1] = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, + (uint16_t)(output->ips_mode), + &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_inactive_time_us[0] = 0; + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, + (uint16_t)(output->ips_mode), + &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_inactive_time_us[1] = 0; + + // NUM_IPS_HISTOGRAM_BUCKETS = 16 + for (i = 0; i < 16; i++) + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->histogram[i] = 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 42f0cb672d8b..10b48198b7a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -209,4 +209,43 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_stream_state *stream, struct dc_surface_update *srf_updates, int surface_count); + +/** + * struct ips_residency_info - struct containing info from dmub_ips_residency_stats + * + * @ips_mode: The mode of IPS that the follow stats appertain to + * @residency_percent: The percentage of time spent in given IPS mode in millipercent + * @entry_counter: The number of entries made in to this IPS state + * @total_active_time_us: uint32_t array of length 2 representing time in the given IPS mode + * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. + * @total_inactive_time_us: uint32_t array of length 2 representing time outside the given IPS mode + * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. + * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c + */ +struct ips_residency_info { + enum dmub_ips_mode ips_mode; + unsigned int residency_percent; + unsigned int entry_counter; + unsigned int total_active_time_us[2]; + unsigned int total_inactive_time_us[2]; + unsigned int histogram[16]; +}; + +/** + * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status + * + * @dc_dmub_srv: The DC DMUB service pointer + * @start_measurement: Describes whether to start or stop measurement + * + * Return: true if GPINT was sent successfully, false otherwise + */ +bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); + +/** + * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * + * @dc_dmub_srv: The DC DMUB service pointer + * @output: Output struct to copy the the residency info to + */ +void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 41bd95e9177a..8dd6eb044829 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1166,6 +1166,7 @@ struct dpcd_caps { int8_t branch_dev_name[6]; int8_t branch_hw_revision; int8_t branch_fw_revision[2]; + int8_t branch_vendor_specific_data[4]; bool allow_invalid_MSA_timing_param; bool panel_mode_edp; @@ -1191,6 +1192,7 @@ struct dpcd_caps { struct edp_psr_info psr_info; struct replay_info pr_info; + uint16_t edp_oled_emission_rate; }; union dpcd_sink_ext_caps { @@ -1204,7 +1206,7 @@ union dpcd_sink_ext_caps { uint8_t oled : 1; uint8_t reserved_2 : 1; uint8_t miniled : 1; - uint8_t reserved : 1; + uint8_t emission_output : 1; } bits; uint8_t raw; }; @@ -1358,6 +1360,9 @@ struct dp_trace { #ifndef DP_TUNNELING_IRQ #define DP_TUNNELING_IRQ (1 << 5) #endif +#ifndef DP_BRANCH_VENDOR_SPECIFIC_START +#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C +#endif /** USB4 DPCD BW Allocation Registers Chapter 10.7 **/ #ifndef DP_TUNNELING_CAPABILITIES #define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index 44afcd989224..bd37ec82b42d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -26,7 +26,6 @@ #ifndef _DC_PLANE_H_ #define _DC_PLANE_H_ -#include "dc.h" #include "dc_hw_types.h" struct dc_plane_state *dc_create_plane_state(const struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 603552dbd771..c8d8e335fa37 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -8,13 +8,13 @@ #include "dcn32/dcn32_dpp.h" #include "dcn401/dcn401_dpp.h" -static struct spl_funcs dcn2_spl_funcs = { +static struct spl_callbacks dcn2_spl_callbacks = { .spl_calc_lb_num_partitions = dscl2_spl_calc_lb_num_partitions, }; -static struct spl_funcs dcn32_spl_funcs = { +static struct spl_callbacks dcn32_spl_callbacks = { .spl_calc_lb_num_partitions = dscl32_spl_calc_lb_num_partitions, }; -static struct spl_funcs dcn401_spl_funcs = { +static struct spl_callbacks dcn401_spl_callbacks = { .spl_calc_lb_num_partitions = dscl401_spl_calc_lb_num_partitions, }; static void populate_splrect_from_rect(struct spl_rect *spl_rect, const struct rect *rect) @@ -38,6 +38,7 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality, spl_scaling_quality->h_taps = scaling_quality->h_taps; spl_scaling_quality->v_taps_c = scaling_quality->v_taps_c; spl_scaling_quality->v_taps = scaling_quality->v_taps; + spl_scaling_quality->integer_scaling = scaling_quality->integer_scaling; } static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality, const struct spl_taps *spl_scaling_quality) @@ -76,16 +77,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl // This is used to determine the vtap support switch (plane_state->ctx->dce_version) { case DCN_VERSION_2_0: - spl_in->funcs = &dcn2_spl_funcs; + spl_in->callbacks = dcn2_spl_callbacks; break; case DCN_VERSION_3_2: - spl_in->funcs = &dcn32_spl_funcs; + spl_in->callbacks = dcn32_spl_callbacks; break; case DCN_VERSION_4_01: - spl_in->funcs = &dcn401_spl_funcs; + spl_in->callbacks = dcn401_spl_callbacks; break; default: - spl_in->funcs = &dcn2_spl_funcs; + spl_in->callbacks = dcn2_spl_callbacks; } // Make format field from spl_in point to plane_res scl_data format spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format; @@ -187,14 +188,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active; spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active; - spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy; + spl_in->sharpen_policy = (enum sharpen_policy)plane_state->adaptive_sharpness_policy; spl_in->debug.scale_to_sharpness_policy = (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy; /* Check if it is stream is in fullscreen and if its HDR. * Use this to determine sharpness levels */ - spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream); + spl_in->is_fullscreen = pipe_ctx->stream->sharpening_required; spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream); spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index caa45db50232..db1e63a7d460 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -26,7 +26,6 @@ #ifndef _DC_STATE_H_ #define _DC_STATE_H_ -#include "dc.h" #include "inc/core_status.h" struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 14ea47eda0c8..413970588a26 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -143,6 +143,7 @@ union stream_update_flags { uint32_t crtc_timing_adjust : 1; uint32_t fams_changed : 1; uint32_t scaler_sharpener : 1; + uint32_t sharpening_required : 1; } bits; uint32_t raw; @@ -310,6 +311,7 @@ struct dc_stream_state { struct luminance_data lumin_data; bool scaler_sharpener_update; + bool sharpening_required; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -356,6 +358,7 @@ struct dc_stream_update { struct dc_cursor_position *cursor_position; bool *hw_cursor_req; bool *scaler_sharpener_update; + bool *sharpening_required; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 6d7989b751e2..edf4df1d03b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -179,6 +179,9 @@ struct dc_panel_patch { unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; + uint8_t blankstream_before_otg_off; + bool oled_optimize_display_on; + unsigned int force_mst_blocked_discovery; }; struct dc_edid_caps { @@ -922,6 +925,12 @@ struct display_endpoint_id { enum display_endpoint_type ep_type; }; +enum backlight_control_type { + BACKLIGHT_CONTROL_PWM = 0, + BACKLIGHT_CONTROL_VESA_AUX = 1, + BACKLIGHT_CONTROL_AMD_AUX = 2, +}; + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) struct otg_phy_mux { uint8_t phy_output_num; @@ -1295,4 +1304,31 @@ struct dc_commit_streams_params { enum dc_power_source_type power_source; }; +struct set_backlight_level_params { + /* backlight in pwm */ + uint32_t backlight_pwm_u16_16; + /* brightness ramping */ + uint32_t frame_ramp; + /* backlight control type + * 0: PWM backlight control + * 1: VESA AUX backlight control + * 2: AMD AUX backlight control + */ + enum backlight_control_type control_type; + /* backlight in millinits */ + uint32_t backlight_millinits; + /* transition time in ms */ + uint32_t transition_time_in_ms; + /* minimum luminance in nits */ + uint32_t min_luminance; + /* maximum luminance in nits */ + uint32_t max_luminance; + /* minimum backlight in pwm */ + uint32_t min_backlight_pwm; + /* maximum backlight in pwm */ + uint32_t max_backlight_pwm; + /* AUX HW instance */ + uint8_t aux_inst; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b889004509a..d3e46c3cfa57 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk( int otg_inst, int dp_hpo_inst) { - /* set the dtbclk_p source */ - dccg401_set_dtbclk_p_src(dccg, src, otg_inst); - /* enabled to select one of the DTBCLKs for pipe */ if (src == REFCLK) dccg401_disable_dpstreamclk(dccg, dp_hpo_inst); @@ -805,33 +802,6 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - switch (link_enc_inst) { - case 0: - REG_UPDATE(SYMCLKA_CLOCK_ENABLE, - SYMCLKA_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1); - break; - case 1: - REG_UPDATE(SYMCLKB_CLOCK_ENABLE, - SYMCLKB_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1); - break; - case 2: - REG_UPDATE(SYMCLKC_CLOCK_ENABLE, - SYMCLKC_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1); - break; - case 3: - REG_UPDATE(SYMCLKD_CLOCK_ENABLE, - SYMCLKD_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1); - break; - } - switch (stream_enc_inst) { case 0: REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, @@ -864,37 +834,8 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst } } -/*get other front end connected to this backend*/ -static uint8_t dccg401_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst) -{ - uint8_t num_enabled_symclk_fe = 0; - uint32_t fe_clk_en[4] = {0}, be_clk_sel[4] = {0}; - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - uint8_t i; - - REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0], - SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]); - - REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1], - SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]); - - REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2], - SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]); - - REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3], - SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]); - - for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) { - if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst) - num_enabled_symclk_fe++; - } - - return num_enabled_symclk_fe; -} - static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) { - uint8_t num_enabled_symclk_fe = 0; struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); switch (stream_enc_inst) { @@ -919,31 +860,6 @@ static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_ins SYMCLKD_FE_SRC_SEL, 0); break; } - - /*check other enabled symclk fe connected to this be */ - num_enabled_symclk_fe = dccg401_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst); - /*only turn off backend clk if other front ends attached to this backend are all off, - for mst, only turn off the backend if this is the last front end*/ - if (num_enabled_symclk_fe == 0) { - switch (link_enc_inst) { - case 0: - REG_UPDATE(SYMCLKA_CLOCK_ENABLE, - SYMCLKA_CLOCK_ENABLE, 0); - break; - case 1: - REG_UPDATE(SYMCLKB_CLOCK_ENABLE, - SYMCLKB_CLOCK_ENABLE, 0); - break; - case 2: - REG_UPDATE(SYMCLKC_CLOCK_ENABLE, - SYMCLKC_CLOCK_ENABLE, 0); - break; - case 3: - REG_UPDATE(SYMCLKD_CLOCK_ENABLE, - SYMCLKD_CLOCK_ENABLE, 0); - break; - } - } } static const struct dccg_funcs dccg401_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 5c2825bc9a87..d199e4ed2e59 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -277,7 +277,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dynamic_range_rgb = 0; /*full range*/ uint8_t dynamic_range_ycbcr = 1; /*bt709*/ @@ -380,7 +379,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; if (REG(DP_MSA_TIMING_PARAM1)) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c index db7557a1c613..8a3fbf95c48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c @@ -76,7 +76,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MAS mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C, value); - temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT; @@ -112,7 +111,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MAS mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L, value); - temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT; diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index 8db9f7514466..889f314cac65 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -717,7 +717,7 @@ static struct link_encoder *dce60_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index eaed5d1c398a..dcd2cdfe91eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -365,23 +365,18 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 11 segments - * segment is from 2^-10 to 2^1 + /* 13 segments + * segment is from 2^-12 to 2^0 * There are less than 256 points, for optimization */ - seg_distr[0] = 3; - seg_distr[1] = 4; - seg_distr[2] = 4; - seg_distr[3] = 4; - seg_distr[4] = 4; - seg_distr[5] = 4; - seg_distr[6] = 4; - seg_distr[7] = 4; - seg_distr[8] = 4; - seg_distr[9] = 4; - seg_distr[10] = 1; - - region_start = -10; + const uint8_t SEG_COUNT = 12; + + for (i = 0; i < SEG_COUNT; i++) + seg_distr[i] = 4; + + seg_distr[SEG_COUNT] = 1; + + region_start = -SEG_COUNT; region_end = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 05df502a54f2..88cf47a5ea75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -46,7 +46,7 @@ #include "clk_mgr.h" __printf(3, 4) -unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...) +unsigned int snprintf_count(char *pbuf, unsigned int bufsize, const char *fmt, ...) { int ret_vsnprintf; unsigned int chars_printed; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index f31f0e3abfc0..0690c346f2c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -140,23 +140,18 @@ bool cm3_helper_translate_curve_to_hw_format( region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 11 segments - * segment is from 2^-10 to 2^0 + /* 13 segments + * segment is from 2^-12 to 2^0 * There are less than 256 points, for optimization */ - seg_distr[0] = 3; - seg_distr[1] = 4; - seg_distr[2] = 4; - seg_distr[3] = 4; - seg_distr[4] = 4; - seg_distr[5] = 4; - seg_distr[6] = 4; - seg_distr[7] = 4; - seg_distr[8] = 4; - seg_distr[9] = 4; - seg_distr[10] = 1; - - region_start = -10; + const uint8_t SEG_COUNT = 12; + + for (i = 0; i < SEG_COUNT; i++) + seg_distr[i] = 4; + + seg_distr[SEG_COUNT] = 1; + + region_start = -SEG_COUNT; region_end = 1; } @@ -285,157 +280,6 @@ bool cm3_helper_translate_curve_to_hw_format( return true; } -#define NUM_DEGAMMA_REGIONS 12 - - -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params) -{ - struct curve_points3 *corner_points; - struct pwl_result_data *rgb_resulted; - struct pwl_result_data *rgb; - struct pwl_result_data *rgb_plus_1; - - int32_t region_start, region_end; - int32_t i; - uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points; - - if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) - return false; - - corner_points = lut_params->corner_points; - rgb_resulted = lut_params->rgb_resulted; - hw_points = 0; - - memset(lut_params, 0, sizeof(struct pwl_params)); - memset(seg_distr, 0, sizeof(seg_distr)); - - region_start = -NUM_DEGAMMA_REGIONS; - region_end = 0; - - - for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) - seg_distr[i] = -1; - /* 12 segments - * segments are from 2^-12 to 0 - */ - for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++) - seg_distr[i] = 4; - - for (k = 0; k < MAX_REGIONS_NUMBER; k++) { - if (seg_distr[k] != -1) - hw_points += (1 << seg_distr[k]); - } - - j = 0; - for (k = 0; k < (region_end - region_start); k++) { - increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]); - start_index = (region_start + k + MAX_LOW_POINT) * - NUMBER_SW_SEGMENTS; - for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS; - i += increment) { - if (j == hw_points - 1) - break; - if (i >= TRANSFER_FUNC_POINTS) - return false; - rgb_resulted[j].red = output_tf->tf_pts.red[i]; - rgb_resulted[j].green = output_tf->tf_pts.green[i]; - rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; - j++; - } - } - - /* last point */ - start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS; - rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; - rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; - rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - - corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_start)); - corner_points[0].green.x = corner_points[0].red.x; - corner_points[0].blue.x = corner_points[0].red.x; - corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_end)); - corner_points[1].green.x = corner_points[1].red.x; - corner_points[1].blue.x = corner_points[1].red.x; - - corner_points[0].red.y = rgb_resulted[0].red; - corner_points[0].green.y = rgb_resulted[0].green; - corner_points[0].blue.y = rgb_resulted[0].blue; - - /* see comment above, m_arrPoints[1].y should be the Y value for the - * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) - */ - corner_points[1].red.y = rgb_resulted[hw_points - 1].red; - corner_points[1].green.y = rgb_resulted[hw_points - 1].green; - corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; - corner_points[1].red.slope = dc_fixpt_zero; - corner_points[1].green.slope = dc_fixpt_zero; - corner_points[1].blue.slope = dc_fixpt_zero; - - if (output_tf->tf == TRANSFER_FUNCTION_PQ) { - /* for PQ, we want to have a straight line from last HW X point, - * and the slope to be such that we hit 1.0 at 10000 nits. - */ - const struct fixed31_32 end_value = - dc_fixpt_from_int(125); - - corner_points[1].red.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), - dc_fixpt_sub(end_value, corner_points[1].red.x)); - corner_points[1].green.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), - dc_fixpt_sub(end_value, corner_points[1].green.x)); - corner_points[1].blue.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), - dc_fixpt_sub(end_value, corner_points[1].blue.x)); - } - - lut_params->hw_points_num = hw_points; - - k = 0; - for (i = 1; i < MAX_REGIONS_NUMBER; i++) { - if (seg_distr[k] != -1) { - lut_params->arr_curve_points[k].segments_num = - seg_distr[k]; - lut_params->arr_curve_points[i].offset = - lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); - } - k++; - } - - if (seg_distr[k] != -1) - lut_params->arr_curve_points[k].segments_num = seg_distr[k]; - - rgb = rgb_resulted; - rgb_plus_1 = rgb_resulted + 1; - - i = 1; - while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); - rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); - rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); - - ++rgb_plus_1; - ++rgb; - ++i; - } - cm3_helper_convert_to_custom_float(rgb_resulted, - lut_params->corner_points, - hw_points, false); - - return true; -} - bool cm3_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, struct curve_points3 *corner_points, diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c index f496e952ceec..d01a8b8f9595 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c @@ -255,7 +255,6 @@ void enc1_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dp_pixel_encoding = 0; uint8_t dp_component_depth = 0; @@ -362,7 +361,6 @@ void enc1_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; switch (output_color_space) { diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c index 5b343f745cf3..ae81451a3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c @@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc) REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); } +static bool enc314_is_fifo_enabled(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val; + + REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val); + return (reset_val != 0); +} + void enc314_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) @@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = { .enable_fifo = enc314_enable_fifo, .disable_fifo = enc314_disable_fifo, + .is_fifo_enabled = enc314_is_fifo_enabled, .set_input_mode = enc314_set_dig_input_mode, }; diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c index 0a27e0942a12..098c2a01a850 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c @@ -447,7 +447,6 @@ void enc401_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dp_pixel_encoding = 0; uint8_t dp_component_depth = 0; @@ -603,7 +602,6 @@ void enc401_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; switch (output_color_space) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 9405c47ee2a9..f81e5a4e1d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -143,7 +143,7 @@ void generic_reg_wait(const struct dc_context *ctx, unsigned int delay_between_poll_us, unsigned int time_out_num_tries, const char *func_name, int line); -unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...); +unsigned int snprintf_count(char *pBuf, unsigned int bufSize, const char *fmt, ...); /* These macros need to be used with soc15 registers in order to retrieve * the actual offset. diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 565f3c492477..0c8c4a080c50 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -785,12 +785,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 9d6675ecc5f1..c935903b68e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -845,12 +845,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 4fce64a030b6..390c1a77fda6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 3fa9a5da02f6..843d6004258c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index eb3ed965e48b..cd8cca651419 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -1049,12 +1049,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 9e1c18b90805..5718000627b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -435,8 +435,6 @@ static void get_meta_and_pte_attr( blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -485,8 +483,6 @@ static void get_meta_and_pte_attr( - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 1c10ba4dcdde..cee1b351e105 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -1280,12 +1280,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; @@ -1775,15 +1772,6 @@ static unsigned int CalculateVMAndRowBytes( *PixelPTEReqWidth = 32768.0 / BytePerPixel; *PTERequestSize = 64; FractionOfPTEReturnDrop = 0; - } else if (MacroTileSizeBytes == 4096) { - PixelPTEReqHeightPTEs = 1; - *PixelPTEReqHeight = MacroTileHeight; - *PixelPTEReqWidth = 8 * *MacroTileWidth; - *PTERequestSize = 64; - if (ScanDirection != dm_vert) - FractionOfPTEReturnDrop = 0; - else - FractionOfPTEReturnDrop = 7.0 / 8; } else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) { PixelPTEReqHeightPTEs = 16; *PixelPTEReqHeight = 16 * BlockHeight256Bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index b28fcc8608ff..76d3bb3c9155 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -392,8 +392,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double)blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -464,8 +462,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 2b275e680379..f567a9023682 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1444,12 +1444,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index b57b095cd4a8..c46bda2141ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -413,8 +413,6 @@ static void get_meta_and_pte_attr( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -481,8 +479,6 @@ static void get_meta_and_pte_attr( log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index debfa31583a6..5865e8fa2d8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -1461,12 +1461,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61b3bebf24c9..b7d2a0caec11 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -501,8 +501,6 @@ static void get_meta_and_pte_attr( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -569,8 +567,6 @@ static void get_meta_and_pte_attr( log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index d92fb428ee96..86ac7d59fd32 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -4097,12 +4097,9 @@ bool dml32_CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index a201dbb743d7..d9e63c4fdd95 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -204,8 +204,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .num_states = 8, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 250.0, - .sr_enter_plus_exit_z8_time_us = 350.0, + .sr_exit_z8_time_us = 263.0, + .sr_enter_plus_exit_z8_time_us = 363.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index d8bfc85e5dcd..88dc2b97e7bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -559,12 +559,11 @@ static void get_surf_rq_param( const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param, bool is_chroma) { - bool mode_422 = 0; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; unsigned int meta_pitch = 0; - unsigned int ppe = mode_422 ? 2 : 1; + unsigned int ppe = 1; bool surf_linear; bool surf_vert; unsigned int bytes_per_element; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 8697eac1e1f7..138b4b1e42ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -859,7 +859,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->immediate_flip = plane_state->flip_immediate; plane->composition.rect_out_height_spans_vactive = - plane_state->dst_rect.height >= stream->timing.v_addressable && + plane_state->dst_rect.height >= stream->src.height && stream->dst.height >= stream->timing.v_addressable; } @@ -1036,6 +1036,7 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported; context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0; context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz; + context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz; } void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index d35dd507cb9f..bbc28b9a15a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -13,11 +13,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { - *dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); if (!(*dml_ctx)) return false; - (*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); + (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); if (!((*dml_ctx)->v21.dml_init.dml2_instance)) return false; @@ -27,7 +27,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) (*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config; (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; - (*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); + (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); if (!((*dml_ctx)->v21.mode_programming.programming)) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 83fc15bf13cf..25b607e7b726 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -88,6 +88,7 @@ struct dml2_display_arb_regs { uint32_t sdpif_request_rate_limit; uint32_t allow_sdpif_rate_limit_when_cstate_req; uint32_t dcfclk_deep_sleep_hysteresis; + uint32_t pstate_stall_threshold; }; struct dml2_cursor_dlg_regs{ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index 0aa4e4d343b0..3d41ffde91c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -159,6 +159,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters phantom->timing.v_total = meta->v_total; phantom->timing.v_active = meta->v_active; phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active; phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; phantom->timing.drr_config.enabled = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 3ea54fd52e46..92e43a1e4dd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -12236,6 +12236,8 @@ static void rq_dlg_get_dlg_reg( static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param) { + double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz; + arb_param->max_req_outstanding = mode_lib->soc.max_outstanding_reqs; arb_param->min_req_outstanding = mode_lib->soc.max_outstanding_reqs; // turn off the sat level feature if this set to max arb_param->sdpif_request_rate_limit = (3 * mode_lib->ip.words_per_channel * mode_lib->soc.clk_table.dram_config.channel_count) / 4; @@ -12247,6 +12249,7 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co arb_param->compbuf_size = mode_lib->mp.CompressedBufferSizeInkByte / mode_lib->ip.compressed_buffer_segment_size_in_kbytes; arb_param->allow_sdpif_rate_limit_when_cstate_req = dml_get_hw_debug5(mode_lib); arb_param->dcfclk_deep_sleep_hysteresis = dml_get_dcfclk_deep_sleep_hysteresis(mode_lib); + arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index ab229e1598ae..714b5c39b7e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -425,6 +425,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters phantom->timing.v_total = meta->v_total; phantom->timing.v_active = meta->v_active; phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active; phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; phantom->timing.drr_config.enabled = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index dd9971867f74..92269f0e50ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -1799,6 +1799,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp } if (s->pmo_dcn4.num_pstate_candidates > 0) { + s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates - 1].allow_state_increase = true; s->pmo_dcn4.cur_pstate_candidate = -1; return true; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 866b0abcff1b..9190c1328d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -209,8 +209,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1; if (odms_needed <= unused_dpps) { - unused_dpps -= odms_needed; - if (odms_needed == 1) { p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1; optimization_done = true; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h index cd1706d301e7..f09cba8e29cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -690,6 +690,7 @@ struct dcn20_dpp { int lb_memory_size; int lb_bits_per_entry; bool is_write_to_ram_a_safe; + bool dispclk_r_gate_disable; struct scaler_data scl_data; struct pwl_params pwl_data; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index b110f35ef66b..f236824126e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -572,6 +572,7 @@ struct dcn3_dpp { int lb_memory_size; int lb_bits_per_entry; bool is_write_to_ram_a_safe; + bool dispclk_r_gate_disable; struct scaler_data scl_data; struct pwl_params pwl_data; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index 8473c694bfdc..62b7012cda43 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -50,13 +50,21 @@ void dpp35_dppclk_control( DPPCLK_RATE_CONTROL, dppclk_div, DPP_CLOCK_ENABLE, 1); else - REG_UPDATE_2(DPP_CONTROL, + if (dpp->dispclk_r_gate_disable) + REG_UPDATE_2(DPP_CONTROL, DPP_CLOCK_ENABLE, 1, DISPCLK_R_GATE_DISABLE, 1); + else + REG_UPDATE(DPP_CONTROL, + DPP_CLOCK_ENABLE, 1); } else - REG_UPDATE_2(DPP_CONTROL, + if (dpp->dispclk_r_gate_disable) + REG_UPDATE_2(DPP_CONTROL, DPP_CLOCK_ENABLE, 0, DISPCLK_R_GATE_DISABLE, 0); + else + REG_UPDATE(DPP_CONTROL, + DPP_CLOCK_ENABLE, 0); } void dpp35_program_bias_and_scale_fcnv( @@ -128,6 +136,10 @@ bool dpp35_construct( (const struct dcn3_dpp_mask *)(tf_mask)); dpp->base.funcs = &dcn35_dpp_funcs; + + // w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs + if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10) + dpp->dispclk_r_gate_disable = true; return ret; } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c index 5105fd580017..2f92e7d4981b 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c @@ -1091,7 +1091,8 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, /* ISHARP_DELTA_LUT */ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta); dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level; - dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta; + memcpy(dpp->scl_data.dscl_prog_data.isharp_delta, scl_data->dscl_prog_data.isharp_delta, + sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE); if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) return; diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h index bd98b327a6c7..b86347c9b038 100644 --- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h @@ -63,10 +63,6 @@ bool cm3_helper_translate_curve_to_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint); -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params); - bool cm3_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, struct curve_points3 *corner_points, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index f344478e9bd4..b099989d9364 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -443,7 +443,6 @@ struct gpio *dal_gpio_create_irq( case GPIO_ID_GPIO_PAD: break; default: - id = GPIO_ID_HPD; ASSERT_CRITICAL(false); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h index a1e2cde9c4cc..4bd1dda07719 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h @@ -198,6 +198,8 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_B; + uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL1; + uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL2; }; #define HUBBUB_REG_FIELD_LIST_DCN32(type) \ @@ -313,7 +315,12 @@ struct dcn_hubbub_registers { type DCN_VM_ERROR_VMID;\ type DCN_VM_ERROR_TABLE_LEVEL;\ type DCN_VM_ERROR_PIPE;\ - type DCN_VM_ERROR_INTERRUPT_STATUS + type DCN_VM_ERROR_INTERRUPT_STATUS;\ + type DCHUBBUB_TIMEOUT_ERROR_STATUS;\ + type DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD;\ + type DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD;\ + type DCHUBBUB_TIMEOUT_DETECTION_EN;\ + type DCHUBBUB_TIMEOUT_TIMER_RESET #define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index 37d26fa0b6fb..5d658e9bef64 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1192,6 +1192,17 @@ static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) } } +static void dcn401_program_timeout_thresholds(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* request backpressure and outstanding return threshold (unused)*/ + //REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, arb_regs->req_stall_threshold); + + /* P-State stall threshold */ + REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, arb_regs->pstate_stall_threshold); +} + static const struct hubbub_funcs hubbub4_01_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, @@ -1215,6 +1226,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_det_segments = dcn401_program_det_segments, .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, + .program_timeout_thresholds = dcn401_program_timeout_thresholds, }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h index f35f19ba3e18..5f1960722ebd 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h @@ -123,8 +123,12 @@ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ - HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) - + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_ERROR_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_DETECTION_EN, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_TIMER_RESET, mask_sh) bool hubbub401_program_urgent_watermarks( struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 4fbed0298adf..81f4c386c287 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1039,7 +1039,8 @@ void dce110_edp_backlight_control( link_transmitter_control(ctx->dc_bios, &cntl); if (enable && link->dpcd_sink_ext_caps.bits.oled && - !link->dc->config.edp_no_power_sequencing) { + !link->dc->config.edp_no_power_sequencing && + !link->local_sink->edid_caps.panel_patch.oled_optimize_display_on) { post_T7_delay += link->panel_config.pps.extra_post_t7_ms; msleep(post_T7_delay); } @@ -3142,9 +3143,10 @@ static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) } bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; struct dc_link *link = pipe_ctx->stream->link; struct dc *dc = link->ctx->dc; struct abm *abm = pipe_ctx->stream_res.abm; @@ -3315,7 +3317,7 @@ void dce110_disable_link_output(struct dc_link *link, * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ - if (dmcu != NULL && dmcu->funcs->lock_phy) + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index ed3cc3648e8e..06789ac3a224 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -88,8 +88,7 @@ void dce110_edp_wait_for_hpd_ready( bool power_up); bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx); void dce110_set_pipe(struct pipe_ctx *pipe_ctx); void dce110_disable_link_output(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index a6a1db5ba8ba..681bb92c6069 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -3453,7 +3453,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index a80c08582932..05424a9af58b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1458,8 +1458,12 @@ void dcn20_pipe_control_lock( } else { if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); - else - pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + else { + if (dc->hwseq->funcs.perform_3dlut_wa_unlock) + dc->hwseq->funcs.perform_3dlut_wa_unlock(pipe); + else + pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + } } } @@ -1732,7 +1736,6 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.scaler || plane_state->update_flags.bits.scaling_change || plane_state->update_flags.bits.position_change || - plane_state->update_flags.bits.clip_size_change || plane_state->update_flags.bits.per_pixel_alpha_change || pipe_ctx->stream->update_flags.bits.scaling) { pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; @@ -1745,7 +1748,6 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || - (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { hubp->funcs->mem_program_viewport( @@ -2056,22 +2058,15 @@ void dcn20_program_front_end_for_ctx( */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && - dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; if (tg->funcs->enable_crtc) { - if (dc->hwss.blank_phantom) { - int main_pipe_width = 0, main_pipe_height = 0; - struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); - - if (phantom_stream) { - main_pipe_width = phantom_stream->dst.width; - main_pipe_height = phantom_stream->dst.height; - } - - dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); + if (dc->hwseq->funcs.blank_pixel_data) { + dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); } tg->funcs->enable_crtc(tg); } @@ -2255,9 +2250,9 @@ void dcn20_post_unlock_program_front_end( struct timing_generator *tg = pipe->stream_res.tg; - if (tg->funcs->get_double_buffer_pending) { + if (tg->funcs->get_optc_double_buffer_pending) { for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us - && tg->funcs->get_double_buffer_pending(tg); j++) + && tg->funcs->get_optc_double_buffer_pending(tg); j++) udelay(polling_interval_us); } } @@ -2771,7 +2766,6 @@ void dcn20_reset_back_end_for_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { - int i; struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); @@ -2838,19 +2832,16 @@ void dcn20_reset_back_end_for_pipe( } } - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) - break; - - if (i == dc->res_pool->pipe_count) - return; - /* * In case of a dangling plane, setting this to NULL unconditionally * causes failures during reset hw ctx where, if stream is NULL, * it is expected that the pipe_ctx pointers to pipes and plane are NULL. */ pipe_ctx->stream = NULL; + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 1ea95f8d4cbc..61efb15572ff 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -137,7 +137,7 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dpms_off = true; } -static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, +bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst) { union dmub_rb_cmd cmd; @@ -199,7 +199,7 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst, @@ -234,7 +234,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, otg_inst, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, panel_cntl->pwrseq_inst); @@ -242,14 +242,15 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) } bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; uint32_t otg_inst; + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; if (!abm || !tg || !panel_cntl) return false; @@ -257,7 +258,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { - dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); + dce110_set_backlight_level(pipe_ctx, backlight_level_params); return true; } @@ -268,7 +269,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h index 9cee9bdb8de9..f72a27ac1bf1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h @@ -47,11 +47,12 @@ void dcn21_optimize_pwr_state( void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx); +bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, + uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst); void dcn21_set_pipe(struct pipe_ctx *pipe_ctx); void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx); bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); bool dcn21_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index bded33575493..e89ebfda4873 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -245,6 +245,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; int acquired_rmu = 0; @@ -283,8 +284,14 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, stream->lut3d_func->state.bits.rmu_mux_num); + if (!result) + DC_LOG_ERROR("%s: program_3dlut failed\n", __func__); + result = mpc->funcs->program_shaper(mpc, shaper_lut, stream->lut3d_func->state.bits.rmu_mux_num); + if (!result) + DC_LOG_ERROR("%s: program_shaper failed\n", __func__); + } else { // loop through the available mux and release the requested mpcc_id mpc->funcs->release_rmu(mpc, mpcc_id); @@ -486,7 +493,6 @@ bool dcn30_mmhubbub_warmup( } /*following is the original: warmup each DWB's mcif buffer*/ for (i = 0; i < num_dwb; i++) { - dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst]; /*warmup is for VM mode only*/ if (wb_info[i].mcif_buf_params.p_vmid == 0) @@ -1185,3 +1191,30 @@ void dcn30_prepare_bandwidth(struct dc *dc, if (!dc->clk_mgr->clks.fw_based_mclk_switching) dc_dmub_srv_p_state_delegate(dc, false, context); } + +void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + bool pending_updates = false; + unsigned int i; + + if (tg && tg->funcs->is_tg_enabled(tg)) { + // Poll for 100ms maximum + for (i = 0; i < 100000; i++) { + pending_updates = false; + if (tg->funcs->get_optc_double_buffer_pending) + pending_updates |= tg->funcs->get_optc_double_buffer_pending(tg); + + if (tg->funcs->get_otg_double_buffer_pending) + pending_updates |= tg->funcs->get_otg_double_buffer_pending(tg); + + if (tg->funcs->get_pipe_update_pending && pipe_ctx->plane_state) + pending_updates |= tg->funcs->get_pipe_update_pending(tg); + + if (!pending_updates) + break; + + udelay(1); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index 6a153e7ce910..4b90b781c4f2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -96,4 +96,6 @@ void dcn30_set_hubp_blank(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 2a8dc40d2847..0e8d32e3dbae 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -108,7 +108,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn10_update_visual_confirm_color, - .is_abm_supported = dcn21_is_abm_supported + .is_abm_supported = dcn21_is_abm_supported, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c index 93e49d87a67c..780ce4c064aa 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c @@ -107,6 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h index 0bca48ccbfa2..a6e0115a53ee 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h @@ -23,8 +23,8 @@ * */ -#ifndef __DC_DCN30_INIT_H__ -#define __DC_DCN30_INIT_H__ +#ifndef __DC_DCN301_INIT_H__ +#define __DC_DCN301_INIT_H__ struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 3d4b31bd9946..03ba01f4ace1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -47,9 +47,11 @@ #include "dce/dmub_outbox.h" #include "link.h" #include "dcn10/dcn10_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" #include "dce/dce_i2c_hw.h" +#include "dce/dmub_abm_lcd.h" #define DC_LOGGER_INIT(logger) @@ -517,10 +519,18 @@ static void dcn31_reset_back_end_for_pipe( dc->hwss.set_abm_immediate_disable(pipe_ctx); + link = pipe_ctx->stream->link; + + if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) && + (link->connector_signal == SIGNAL_TYPE_EDP)) + dc->hwss.blank_stream(pipe_ctx); + pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) pipe_ctx->stream_res.tg->funcs->set_odm_bypass( @@ -532,7 +542,6 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - link = pipe_ctx->stream->link; /* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot * feature. When system resume from S4 with second @@ -633,3 +642,51 @@ void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, triggers, params->num_frames); } + +static void dmub_abm_set_backlight(struct dc_context *dc, + struct set_backlight_level_params *backlight_level_params, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = backlight_level_params->frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_level_params->backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_control_type = + (enum dmub_backlight_control_type) backlight_level_params->control_type; + cmd.abm_set_backlight.abm_set_backlight_data.min_luminance = backlight_level_params->min_luminance; + cmd.abm_set_backlight.abm_set_backlight_data.max_luminance = backlight_level_params->max_luminance; + cmd.abm_set_backlight.abm_set_backlight_data.min_backlight_pwm = backlight_level_params->min_backlight_pwm; + cmd.abm_set_backlight.abm_set_backlight_data.max_backlight_pwm = backlight_level_params->max_backlight_pwm; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, + struct set_backlight_level_params *backlight_level_params) +{ + struct dc_context *dc = pipe_ctx->stream->ctx; + struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; + + dcn21_dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + + dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h index b8bc939da155..0d09aa8cfb65 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h @@ -51,6 +51,8 @@ int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_ void dcn31_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); +bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, + struct set_backlight_level_params *params); bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); @@ -59,5 +61,4 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params); - #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 56f3c70d4b55..5f8f45b48720 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -98,7 +98,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 4e93eeedfc1b..9b88eb72086d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider( } } +static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe) +{ + return pipe && pipe->stream + // Check dig's otg instance. + && pipe->stream_res.stream_enc + && pipe->stream_res.stream_enc->funcs->dig_source_otg + && pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc) + && pipe->stream->link && pipe->stream->link->link_enc + && pipe->stream->link->link_enc->funcs->is_dig_enabled + && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc) + && pipe->stream_res.stream_enc->funcs->is_fifo_enabled + && pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc); +} + void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx) { unsigned int i; @@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) && + !pipe->stream->apply_seamless_boot_optimization && + !pipe->stream->apply_edp_fast_boot_optimization) { + if (dcn314_is_pipe_dig_fifo_on(pipe)) + continue; pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -478,7 +496,7 @@ void dcn314_disable_link_output(struct dc_link *link, * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ - if (dmcu != NULL && dmcu->funcs->lock_phy) + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index 68e6de6b5758..6bdfbf22ce87 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -100,7 +100,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 2e8c9f738259..d7f8b2dcaa6b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -439,6 +439,7 @@ bool dcn32_set_mpc_shaper_3dlut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; @@ -458,13 +459,13 @@ bool dcn32_set_mpc_shaper_3dlut( if (stream->lut3d_func && stream->lut3d_func->state.bits.initialized == 1) { - result = mpc->funcs->program_3dlut(mpc, - &stream->lut3d_func->lut_3d, - mpcc_id); + result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, mpcc_id); + if (!result) + DC_LOG_ERROR("%s: program_3dlut failed\n", __func__); - result = mpc->funcs->program_shaper(mpc, - shaper_lut, - mpcc_id); + result = mpc->funcs->program_shaper(mpc, shaper_lut, mpcc_id); + if (!result) + DC_LOG_ERROR("%s: program_shaper failed\n", __func__); } return result; @@ -1398,10 +1399,10 @@ void dcn32_disable_link_output(struct dc_link *link, link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control && + link->dc->hwss.edp_power_control && !link->skip_implict_edp_power_control) link->dc->hwss.edp_power_control(link, false); - else if (dmcu != NULL && dmcu->funcs->lock_phy) + else if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); @@ -1698,52 +1699,6 @@ void dcn32_init_blank( hws->funcs.wait_for_blank_complete(opp); } -void dcn32_blank_phantom(struct dc *dc, - struct timing_generator *tg, - int width, - int height) -{ - struct dce_hwseq *hws = dc->hwseq; - enum dc_color_space color_space; - struct tg_color black_color = {0}; - struct output_pixel_processor *opp = NULL; - uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; - uint32_t i; - - /* program opp dpg blank color */ - color_space = COLOR_SPACE_SRGB; - color_space_to_black_color(dc, color_space, &black_color); - - otg_active_width = width; - otg_active_height = height; - - /* get the OPTC source */ - tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); - ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); - - for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { - if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { - opp = dc->res_pool->opps[i]; - break; - } - } - - if (opp && opp->funcs->opp_set_disp_pattern_generator) - opp->funcs->opp_set_disp_pattern_generator( - opp, - CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, - CONTROLLER_DP_COLOR_SPACE_UDEFINED, - COLOR_DEPTH_UNDEFINED, - &black_color, - otg_active_width, - otg_active_height, - 0); - - if (tg->funcs->is_tg_enabled(tg)) - hws->funcs.wait_for_blank_complete(opp); -} - /* phantom stream id's can change often, but can be identical between contexts. * This function checks for the condition the streams are identical to avoid * redundant pipe transitions. diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cac4a08b92a4..0303a5953673 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -119,11 +119,6 @@ void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); -void dcn32_blank_phantom(struct dc *dc, - struct timing_generator *tg, - int width, - int height); - bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 3422b564ae98..5ecee7e320da 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -98,7 +98,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn30_hardware_release, .set_pipe = dcn21_set_pipe, @@ -117,10 +117,10 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, .program_outstanding_updates = dcn32_program_outstanding_updates, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn32_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index bd309dbdf7b2..e599cdc465bf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -309,6 +309,7 @@ void dcn35_init_hw(struct dc *dc) dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; + dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support; } if (dc->res_pool->pg_cntl) { @@ -841,6 +842,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) uint32_t num_opps = 0; uint32_t opp_id_src0 = OPP_ID_INVALID; uint32_t opp_id_src1 = OPP_ID_INVALID; + uint32_t optc_dsc_state = 0; // Step 1: To find out which OPTC is running & OPTC DSC is ON // We can't use res_pool->res_cap->num_timing_generator to check @@ -849,7 +851,6 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) // Some ASICs would be fused display pipes less than the default setting. // In dcnxx_resource_construct function, driver would obatin real information. for (i = 0; i < dc->res_pool->timing_generator_count; i++) { - uint32_t optc_dsc_state = 0; struct timing_generator *tg = dc->res_pool->timing_generators[i]; if (tg->funcs->is_tg_enabled(tg)) { @@ -864,15 +865,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) } } - // Step 2: To power down DSC but skip DSC of running OPTC + // Step 2: To power down DSC but skip DSC of running OPTC for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct dcn_dsc_state s = {0}; - dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + /* avoid reading DSC state when it is not in use as it may be power gated */ + if (optc_dsc_state) { + dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); - if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && - s.dsc_clock_en && s.dsc_fw_en) - continue; + if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && + s.dsc_clock_en && s.dsc_fw_en) + continue; + } pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 2bbf1fef94fd..fd67779c27a9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -101,7 +101,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, @@ -123,7 +123,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, - .program_outstanding_updates = dcn32_program_outstanding_updates, }; static const struct hwseq_private_funcs dcn35_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index d00822e8daa5..3c275a1eff58 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -100,7 +100,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, @@ -122,7 +122,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, - .program_outstanding_updates = dcn32_program_outstanding_updates, .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 0b743669f23b..e8cc1bfa73f3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -506,7 +506,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); /* 1D LUT */ - if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) { + if (mcm_luts.lut1d_func) { memset(&m_lut_params, 0, sizeof(m_lut_params)); if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; @@ -521,7 +521,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id); } if (mpc->funcs->program_lut_mode) - mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id); + mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id); } /* Shaper */ @@ -669,11 +669,17 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; + struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc; + struct mpc *mpc = dc->res_pool->mpc; bool result; const struct pwl_params *lut_params = NULL; bool rval; + if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { + dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a); + return true; + } + mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; // 1D LUT @@ -844,6 +850,13 @@ enum dc_status dcn401_enable_stream_timing( odm_slice_width, last_odm_slice_width); } + /* set DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) { + if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst); + } + } + /* HW program guide assume display already disable * by unplug sequence. OTG assume stop. */ @@ -1004,8 +1017,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); } else { - /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */ - dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst); dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } @@ -1063,7 +1074,6 @@ static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of @@ -1097,6 +1107,58 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct } } +static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding) +{ + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + link_encoding, + &pipe_ctx->pll_settings); + break; + } + } +} + +void dcn401_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) { + disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + } else { + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) + link->dc->hwss.edp_power_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +} + void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -1492,6 +1554,11 @@ void dcn401_optimize_bandwidth( pipe_ctx->dlg_regs.min_dst_y_next_start); } } + + /* update timeout thresholds */ + if (hubbub->funcs->program_timeout_thresholds) { + hubbub->funcs->program_timeout_thresholds(hubbub, &context->bw_ctx.bw.dcn.arb_regs); + } } void dcn401_fams2_global_control_lock(struct dc *dc, @@ -1669,7 +1736,7 @@ void dcn401_hardware_release(struct dc *dc) } } -void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) +void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) { struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; @@ -1695,6 +1762,9 @@ void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, hubbub->funcs->wait_for_det_update) hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst); } + } else { + if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update) + hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst); } } } @@ -1705,7 +1775,6 @@ void dcn401_interdependent_update_lock(struct dc *dc, unsigned int i = 0; struct pipe_ctx *pipe = NULL; struct timing_generator *tg = NULL; - bool pipe_unlocked[MAX_PIPES] = {0}; if (lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1719,48 +1788,91 @@ void dcn401_interdependent_update_lock(struct dc *dc, dc->hwss.pipe_control_lock(dc, pipe, true); } } else { - /* Unlock pipes based on the change in DET allocation instead of pipe index - * Prevents over allocation of DET during unlock process - * e.g. 2 pipe config with different streams with a max of 20 DET segments - * Before: After: - * - Pipe0: 10 DET segments - Pipe0: 12 DET segments - * - Pipe1: 10 DET segments - Pipe1: 8 DET segments - * If Pipe0 gets updated first, 22 DET segments will be allocated - */ + /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; - int current_pipe_idx = i; if (!resource_is_pipe_type(pipe, OTG_MASTER) || !tg->funcs->is_tg_enabled(tg) || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { - pipe_unlocked[i] = true; continue; } - // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared - struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream); - - if (old_otg_master) - current_pipe_idx = old_otg_master->pipe_idx; - if (resource_calculate_det_for_stream(context, pipe) < - resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) { + if (dc->scratch.pipes_to_unlock_first[i]) { + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; dc->hwss.pipe_control_lock(dc, pipe, false); - pipe_unlocked[i] = true; - dcn401_wait_for_det_buffer_update(dc, context, pipe); + /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/ + dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe); } } + /* Unlocking the rest of the pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (pipe_unlocked[i]) + if (dc->scratch.pipes_to_unlock_first[i]) continue; + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + continue; + } + dc->hwss.pipe_control_lock(dc, pipe, false); } } } +void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx) +{ + /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that + * HUBP will properly fetch 3DLUT contents after unlock. + * + * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless + * of whether OTG lock is currently being held or not. + */ + struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; + struct pipe_ctx *odm_pipe, *mpc_pipe; + int i, wa_pipe_ct = 0; + + for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) { + for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) { + if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src + == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM + && mpc_pipe->plane_state->mcm_shaper_3dlut_setting + == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) { + wa_pipes[wa_pipe_ct++] = mpc_pipe; + } + } + } + + if (wa_pipe_ct > 0) { + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true); + + for (i = 0; i < wa_pipe_ct; ++i) { + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); + } + + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status) + pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false); + + for (i = 0; i < wa_pipe_ct; ++i) { + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); + } + + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false); + } else { + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + } +} + void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context) { @@ -1770,3 +1882,125 @@ void dcn401_program_outstanding_updates(struct dc *dc, if (hubbub->funcs->program_compbuf_segments) hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); } + +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct dc_link *link = pipe_ctx->stream->link; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + + DC_LOGGER_INIT(dc->ctx->logger); + if (pipe_ctx->stream_res.stream_enc == NULL) { + pipe_ctx->stream = NULL; + return; + } + + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; + } + } + + /* by upper caller loop, parent pipe: pipe0, will be reset last. + * back end share by all pipes and will be disable only when disable + * parent pipe. + */ + if (pipe_ctx->top_pipe == NULL) { + + dc->hwss.set_abm_immediate_disable(pipe_ctx); + + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); + if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) + pipe_ctx->stream_res.tg->funcs->set_odm_bypass( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, NULL); + /* TODO - convert symclk_ref_cnts for otg to a bit map to solve + * the case where the same symclk is shared across multiple otg + * instances + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link->phy_state.symclk_ref_cnts.otg = 0; + if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { + link_hwss->disable_link_output(link, + &pipe_ctx->link_res, pipe_ctx->stream->signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + /* reset DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst); + } + +/* + * In case of a dangling plane, setting this to NULL unconditionally + * causes failures during reset hw ctx where, if stream is NULL, + * it is expected that the pipe_ctx pointers to pipes and plane are NULL. + */ + pipe_ctx->stream = NULL; + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; + DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", + pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); +} + +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context) +{ + int i; + struct dce_hwseq *hws = dc->hwseq; + + /* Reset Back End*/ + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || + pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index a27e62081685..28a513dfc005 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -55,6 +55,10 @@ void dcn401_populate_mcm_luts(struct dc *dc, bool lut_bank_a); void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); +void dcn401_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx); bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable); @@ -81,7 +85,16 @@ void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); -void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context); +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index a2ca07235c83..23e4f208152e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -77,14 +77,14 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn401_hardware_release, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dcn32_disable_link_output, + .disable_link_output = dcn401_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .enable_phantom_streams = dcn32_enable_phantom_streams, @@ -93,13 +93,13 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .fams2_global_control_lock = dcn401_fams2_global_control_lock, .fams2_update_config = dcn401_fams2_update_config, .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, .program_outstanding_updates = dcn401_program_outstanding_updates, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn401_private_funcs = { @@ -111,7 +111,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, @@ -136,8 +136,9 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, - .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, + .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, + .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index ac9205625623..66fdc5805d0a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -365,8 +365,7 @@ struct hw_sequencer_funcs { void (*clear_status_bits)(struct dc *dc, unsigned int mask); bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx); @@ -462,6 +461,7 @@ struct hw_sequencer_funcs { void (*program_outstanding_updates)(struct dc *dc, struct dc_state *context); void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); + void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx); }; void color_space_to_black_color( @@ -504,6 +504,10 @@ void get_mclk_switch_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_cursor_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void set_p_state_switch_method( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 0ac675456979..22a5d4a03c98 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -182,6 +182,7 @@ struct hwseq_private_funcs { struct pipe_ctx *pipe_ctx, struct dc_cm2_func_luts mcm_luts, bool lut_bank_a); + void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fa5edd03d004..b5afd8c3103d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -60,5 +60,7 @@ enum dc_status { }; char *dc_status_to_str(enum dc_status status); +char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding); +char *dc_color_depth_to_str(enum dc_color_depth color_depth); #endif /* _CORE_STATUS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index bfb8b8502d20..8597e866bfe6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -215,6 +215,10 @@ struct resource_funcs { void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx); + /* + * Get indicator of power from a context that went through full validation + */ + int (*get_power_profile)(const struct dc_state *context); }; struct audio_support{ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 67c32401893e..6c1d41c0f099 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -228,6 +228,7 @@ struct hubbub_funcs { void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); + void (*program_timeout_thresholds)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 3d4c8bd42b49..b74e18cc1e66 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -342,7 +342,11 @@ struct timing_generator_funcs { void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); - bool (*get_double_buffer_pending)(struct timing_generator *tg); + bool (*get_optc_double_buffer_pending)(struct timing_generator *tg); + bool (*get_otg_double_buffer_pending)(struct timing_generator *tg); + bool (*get_pipe_update_pending)(struct timing_generator *tg); + void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable); + bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 72a8479e1f2d..f04292086c08 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -248,8 +248,7 @@ struct link_service { uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak); bool (*edp_set_backlight_level)(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); bool (*edp_set_backlight_level_nits)(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index d21ee9d12d26..e026c728042a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -48,6 +48,9 @@ #include "dm_helpers.h" #include "clk_mgr.h" + // Offset DPCD 050Eh == 0x5A +#define MST_HUB_ID_0x5A 0x5A + #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) @@ -692,6 +695,15 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) link->wa_flags.dpia_mst_dsc_always_on = true; + + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_vendor_specific_data[2] == MST_HUB_ID_0x5A && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) { + link->wa_flags.dpia_mst_dsc_always_on = true; + } } static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index c4e03482ba9a..41cab9ad6885 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2082,6 +2082,9 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link_settings->link_rate == LINK_RATE_LOW) skip_video_pattern = false; + if (stream->sink_patches.oled_optimize_display_on) + set_default_brightness_aux(link); + if (perform_link_training_with_retries(link_settings, skip_video_pattern, lt_attempts, @@ -2105,10 +2108,14 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); - if (link->dpcd_sink_ext_caps.bits.oled == 1) - msleep(bl_oled_enable_delay); - edp_backlight_enable_aux(link, true); + if (!stream->sink_patches.oled_optimize_display_on) { + set_default_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) + msleep(bl_oled_enable_delay); + edp_backlight_enable_aux(link, true); + } else { + edp_backlight_enable_aux(link, true); + } } return status; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index d78c8ec4de79..9dabaf682171 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -51,9 +51,10 @@ #include "dc_dmub_srv.h" #include "gpio_service_interface.h" +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + #define DC_LOGGER \ link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) @@ -1207,6 +1208,13 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } + + core_link_read_dpcd( + link, + DP_BRANCH_VENDOR_SPECIFIC_START, + (uint8_t *)link->dpcd_caps.branch_vendor_specific_data, + sizeof(link->dpcd_caps.branch_vendor_specific_data)); + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { union dp_dfp_cap_ext dfp_cap_ext; @@ -1625,7 +1633,11 @@ static bool retrieve_link_cap(struct dc_link *link) } /* Read DP tunneling information. */ - status = dpcd_get_tunneling_device_data(link); + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dpcd_get_tunneling_device_data(link); + if (status != DC_OK) + dm_error("%s: Read tunneling device data failed.\n", __func__); + } dpcd_set_source_specific_data(link); /* Sink may need to configure internals based on vendor, so allow some @@ -1842,6 +1854,9 @@ static bool retrieve_link_cap(struct dc_link *link) DP_FEC_CAPABILITY, &link->dpcd_caps.fec_cap.raw, sizeof(link->dpcd_caps.fec_cap.raw)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_FEC_CAPABILITY) failed\n", __func__, __LINE__); + status = core_link_read_dpcd( link, DP_DSC_SUPPORT, @@ -1864,6 +1879,9 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_DSC_BRANCH_OVERALL_THROUGHPUT_0) failed\n", __func__, __LINE__); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); @@ -2055,6 +2073,14 @@ void detect_edp_sink_caps(struct dc_link *link) core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, &link->dpcd_caps.pr_info.max_deviation_line, sizeof(link->dpcd_caps.pr_info.max_deviation_line)); + + /* + * OLED Emission Rate info + */ + if (link->dpcd_sink_ext_caps.bits.emission_output) + core_link_read_dpcd(link, DP_SINK_EMISSION_RATE, + (uint8_t *)&link->dpcd_caps.edp_oled_emission_rate, + sizeof(link->dpcd_caps.edp_oled_emission_rate)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 6af42ba9885c..0d123e647652 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -59,12 +59,18 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) dpcd_dp_tun_data, sizeof(dpcd_dp_tun_data)); + if (status != DC_OK) + goto err; + status = core_link_read_dpcd( link, DP_USB4_ROUTER_TOPOLOGY_ID, dpcd_topology_data, sizeof(dpcd_topology_data)); + if (status != DC_OK) + goto err; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = @@ -75,6 +81,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; +err: return status; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 96bf135b6f05..48abeaa88678 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -221,21 +221,11 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link) &replay_error_status.raw, sizeof(replay_error_status.raw)); - link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = - replay_error_status.bits.LINK_CRC_ERROR; - link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = - replay_configuration.bits.DESYNC_ERROR_STATUS; - link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = - replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; - - if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || - link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || - link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { + if (replay_error_status.bits.LINK_CRC_ERROR || + replay_configuration.bits.DESYNC_ERROR_STATUS || + replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) { bool allow_active; - if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR) - link->replay_settings.config.received_desync_error_hpd = 1; - if (link->replay_settings.config.force_disable_desync_error_check) return; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 27b881f947e8..754c895e1bfb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -272,7 +272,7 @@ void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { - fsleep(wait_in_micro_secs); + usleep_range_state(wait_in_micro_secs, wait_in_micro_secs, TASK_UNINTERRUPTIBLE); DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -1107,9 +1107,13 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_DOWNSPREAD_CTRL) failed\n", __func__, __LINE__); status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LANE_COUNT_SET) failed\n", __func__, __LINE__); if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { @@ -1125,12 +1129,19 @@ enum dc_status dpcd_set_link_settings( supported_link_rates, sizeof(supported_link_rates)); } status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__); + status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_RATE_SET) failed\n", __func__, __LINE__); } else { rate = get_dpcd_link_rate(<_settings->link_settings); status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__); } if (rate) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index b5cf75975fff..ccf8096dde29 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -412,7 +412,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 3aa05a2be6c0..e0e3bb865359 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -157,31 +157,13 @@ bool edp_set_backlight_level_nits(struct dc_link *link, uint32_t backlight_millinits, uint32_t transition_time_in_ms) { - struct dpcd_source_backlight_set dpcd_backlight_set; - uint8_t backlight_control = isHDR ? 1 : 0; - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - // OLEDs have no PWM, they can only use AUX - if (link->dpcd_sink_ext_caps.bits.oled == 1) - backlight_control = 1; - - *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; - *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - - - if (!link->dpcd_caps.panel_luminance_control) { - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *)(&dpcd_backlight_set), - sizeof(dpcd_backlight_set)) != DC_OK) - return false; - - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, - &backlight_control, 1) != DC_OK) - return false; - } else { + // use internal backlight control if dmub capabilities are not present + if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && + !link->dc->caps.dmub_caps.aux_backlight_support) { uint8_t backlight_enable = 0; struct target_luminance_value *target_luminance = NULL; @@ -205,6 +187,24 @@ bool edp_set_backlight_level_nits(struct dc_link *link, (uint8_t *)(target_luminance), sizeof(struct target_luminance_value)) != DC_OK) return false; + } else if (link->backlight_control_type == BACKLIGHT_CONTROL_AMD_AUX) { + struct dpcd_source_backlight_set dpcd_backlight_set; + *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; + *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + uint8_t backlight_control = isHDR ? 1 : 0; + // OLEDs have no PWM, they can only use AUX + if (link->dpcd_sink_ext_caps.bits.oled == 1) + backlight_control = 1; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)(&dpcd_backlight_set), + sizeof(dpcd_backlight_set)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_control, 1) != DC_OK) + return false; } return true; @@ -519,11 +519,11 @@ static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) } bool edp_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { struct dc *dc = link->ctx->dc; - + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; DC_LOGGER_INIT(link->ctx->logger); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); @@ -544,10 +544,11 @@ bool edp_set_backlight_level(const struct dc_link *link, return false; } + backlight_level_params->frame_ramp = frame_ramp; + dc->hwss.set_backlight_level( pipe_ctx, - backlight_pwm_u16_16, - frame_ramp); + backlight_level_params); } return true; } @@ -940,8 +941,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream struct replay_context replay_context = { 0 }; unsigned int lineTimeInNs = 0; - - union replay_enable_and_configuration replay_config; + union replay_enable_and_configuration replay_config = { 0 }; union dpcd_alpm_configuration alpm_config; @@ -1168,9 +1168,6 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (link_res->hpo_dp_link_enc) { - if (link->wa_flags.disable_assr_for_uhbr) - return; - link_enc_index = link_res->hpo_dp_link_enc->inst; use_hpo_dp_link_enc = true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 30dc8c24c008..bcfa6ac5d4e7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -36,8 +36,7 @@ bool edp_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak); bool edp_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); bool edp_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index b7a57f98553d..40757f20d73f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -202,6 +202,7 @@ struct dcn_optc_registers { uint32_t OPTC_CLOCK_CONTROL; uint32_t OPTC_WIDTH_CONTROL2; uint32_t OTG_PSTATE_REGISTER; + uint32_t OTG_PIPE_UPDATE_STATUS; }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -566,6 +567,12 @@ struct dcn_optc_registers { type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;\ type OPTC_DOUBLE_BUFFER_PENDING;\ +#define TG_REG_FIELD_LIST_DCN2_0(type) \ + type OTG_FLIP_PENDING;\ + type OTG_DC_REG_UPDATE_PENDING;\ + type OTG_CURSOR_UPDATE_PENDING;\ + type OTG_VUPDATE_KEEPOUT_STATUS;\ + #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; @@ -600,6 +607,7 @@ struct dcn_optc_registers { struct dcn_optc_shift { TG_REG_FIELD_LIST(uint8_t) + TG_REG_FIELD_LIST_DCN2_0(uint8_t) TG_REG_FIELD_LIST_DCN3_2(uint8_t) TG_REG_FIELD_LIST_DCN3_5(uint8_t) TG_REG_FIELD_LIST_DCN401(uint8_t) @@ -607,6 +615,7 @@ struct dcn_optc_shift { struct dcn_optc_mask { TG_REG_FIELD_LIST(uint32_t) + TG_REG_FIELD_LIST_DCN2_0(uint32_t) TG_REG_FIELD_LIST_DCN3_2(uint32_t) TG_REG_FIELD_LIST_DCN3_5(uint32_t) TG_REG_FIELD_LIST_DCN401(uint32_t) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h index 364034b19028..928e110b95fb 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h @@ -43,7 +43,8 @@ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SR(DWB_SOURCE_SELECT),\ SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ @@ -53,6 +54,10 @@ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c index abcd03d78668..4c95c0958612 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c @@ -271,6 +271,48 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c optc1->opp_count = opp_cnt; } +/* OTG status register that indicates OPTC update is pending */ +bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t update_pending = 0; + + REG_GET(OPTC_INPUT_GLOBAL_CONTROL, + OPTC_DOUBLE_BUFFER_PENDING, + &update_pending); + + return (update_pending == 1); +} + +/* OTG status register that indicates OTG update is pending */ +bool optc3_get_otg_update_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t update_pending = 0; + + REG_GET(OTG_DOUBLE_BUFFER_CONTROL, + OTG_UPDATE_PENDING, + &update_pending); + + return (update_pending == 1); +} + +/* OTG status register that indicates surface update is pending */ +bool optc3_get_pipe_update_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t flip_pending = 0; + uint32_t dc_update_pending = 0; + + REG_GET_2(OTG_PIPE_UPDATE_STATUS, + OTG_FLIP_PENDING, + &flip_pending, + OTG_DC_REG_UPDATE_PENDING, + &dc_update_pending); + + return (flip_pending == 1 || dc_update_pending == 1); +} + /** * optc3_set_timing_double_buffer() - DRR double buffering control * @@ -375,6 +417,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn30_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h index bda974d432ea..e2303f9eaf13 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h @@ -109,7 +109,8 @@ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SR(DWB_SOURCE_SELECT) + SR(DWB_SOURCE_SELECT),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define DCN30_VTOTAL_REGS_SF(mask_sh) @@ -209,6 +210,7 @@ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ @@ -319,7 +321,11 @@ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh) + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn30_timing_generator_init(struct optc *optc1); @@ -356,4 +362,7 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); +bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc); +bool optc3_get_otg_update_pending(struct timing_generator *optc); +bool optc3_get_pipe_update_pending(struct timing_generator *optc); #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c index 1a22ae89fb55..d7a45ef2d01b 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c @@ -169,6 +169,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn301_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 30b81a448ce2..fbbe86d00c2e 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -99,7 +99,8 @@ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SRI(OTG_CRC_CNTL2, OTG, inst),\ SR(DWB_SOURCE_SELECT),\ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ @@ -254,7 +255,11 @@ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn31_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h index 99c098e76116..0ff72b97b465 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h @@ -98,7 +98,8 @@ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ @@ -248,7 +249,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn314_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 00094f0e8470..c217f653b3c8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -297,18 +297,6 @@ static void optc32_set_drr( optc32_setup_manual_trigger(optc); } -bool optc32_get_double_buffer_pending(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t update_pending = 0; - - REG_GET(OPTC_INPUT_GLOBAL_CONTROL, - OPTC_DOUBLE_BUFFER_PENDING, - &update_pending); - - return (update_pending == 1); -} - static struct timing_generator_funcs dcn32_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -373,7 +361,9 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, - .get_double_buffer_pending = optc32_get_double_buffer_pending, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn32_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 665d7c52f67c..0b0964a9da74 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -177,7 +177,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn32_timing_generator_init(struct optc *optc1); void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); @@ -185,6 +189,5 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); -bool optc32_get_double_buffer_pending(struct timing_generator *optc); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index d077e2392379..be749ab41dce 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -67,7 +67,11 @@ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\ SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\ - SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh) + SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn35_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index a5d6a7dca554..783ca9acc762 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -430,6 +430,35 @@ static void optc401_program_global_sync( REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout); } +static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, optc1->vready_offset + 10, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, enable); + + return; +} + +static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + uint32_t lock_status = 0; + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, locked, + 1, 150000); + + REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &lock_status); + + if (lock_status != locked) + return false; + + return true; +} + static struct timing_generator_funcs dcn401_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -493,7 +522,11 @@ static struct timing_generator_funcs dcn401_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, - .get_double_buffer_pending = optc32_get_double_buffer_pending, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, + .set_vupdate_keepout = optc401_set_vupdate_keepout, + .wait_update_lock_status = optc401_wait_update_lock_status, }; void dcn401_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h index bb13a645802d..1be89571986f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h @@ -159,7 +159,11 @@ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\ SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\ - SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh) + SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn401_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 53a5f4cb648c..e698543ec937 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -623,7 +623,7 @@ static struct link_encoder *dce100_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index 91da5cf85b69..035c6cfdaee5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -668,7 +668,7 @@ static struct link_encoder *dce110_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 162856c523e4..480a50967385 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -629,7 +629,7 @@ static struct link_encoder *dce112_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 621825a51f46..c63c59623433 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -706,7 +706,7 @@ static struct link_encoder *dce120_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index a73d3c6ef425..3d5113f010bb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -723,7 +723,7 @@ static struct link_encoder *dce80_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 563c5eec83ff..770a380cc03d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -533,7 +533,6 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, /* raven smu dones't allow 0 disp clk, @@ -560,18 +559,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_stutter = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, -}; - static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); @@ -751,7 +738,7 @@ static struct link_encoder *dcn10_link_encoder_create( kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc10) + if (!enc10 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -1400,8 +1387,6 @@ static bool dcn10_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index eea2b3b307cd..189d0c85872e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -706,7 +706,6 @@ static const struct resource_caps res_cap_nv14 = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, @@ -920,7 +919,7 @@ struct link_encoder *dcn20_link_encoder_create( kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index fc54483b9104..d3d67d366523 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -600,7 +600,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -797,7 +796,7 @@ static struct link_encoder *dcn201_link_encoder_create( kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); struct dcn10_link_encoder *enc10; - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; enc10 = &enc20->enc10; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 347e6aaea582..021ba8ac5c8c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -610,7 +610,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, @@ -1298,7 +1297,7 @@ static struct link_encoder *dcn21_link_encoder_create( kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc21) + if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 5040a4c6ed18..cd31e4f16c14 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -711,7 +711,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, //No DMCU on DCN30 .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -927,7 +926,7 @@ static struct link_encoder *dcn30_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 7d04739c3ba1..a9816affd312 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -682,7 +682,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, @@ -883,7 +882,7 @@ static struct link_encoder *dcn301_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn301_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 5791b5cc2875..02af8b8f4d27 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -81,7 +81,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -893,7 +892,7 @@ static struct link_encoder *dcn302_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 63f0f882c861..7002a8dd358a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -82,7 +82,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -839,7 +838,7 @@ static struct link_encoder *dcn303_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index ac8cb20e2e3b..c16cf1c8f7f9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -858,7 +858,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -869,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = true, + .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1093,7 +1092,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 169924d0a839..c0f48c78e968 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -876,7 +876,6 @@ static const struct dc_debug_options debug_defaults_drv = { .replay_skip_crtc_disabled = true, .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, @@ -889,7 +888,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4k*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = true, + .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1149,7 +1148,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 3f4b9dba4112..6c3295259a81 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -858,7 +858,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, /*hw not support it*/ .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -1091,7 +1090,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, @@ -1812,6 +1811,11 @@ static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } +static int dcn315_get_power_profile(const struct dc_state *context) +{ + return !context->bw_ctx.bw.dcn.clk.p_state_change_support; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; @@ -1840,6 +1844,7 @@ static struct resource_funcs dcn315_res_pool_funcs = { .update_bw_bounding_box = dcn315_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn315_get_panel_config_defaults, + .get_power_profile = dcn315_get_power_profile, }; static bool dcn315_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 5fd52c5fcee4..6edaaadcb173 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -853,7 +853,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, /*hw not support it*/ .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -1085,7 +1084,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index a124ad9bd108..01d1a11d5545 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -689,7 +689,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore @@ -1039,7 +1038,7 @@ static struct link_encoder *dcn32_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1990,6 +1989,10 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned return 0; } + if (dc->caps.max_cab_allocation_bytes == 0) { + return 0xffffffff; + } + /* add 2 lines for worst case alignment */ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 7901792afb7b..86c6e5e8c42e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -1054,7 +1054,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst) /* HUBP */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 827a94f84f10..5cb74fd9cb7d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -686,7 +686,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1035,7 +1034,7 @@ static struct link_encoder *dcn321_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 893a9d9ee870..6cc2960b6104 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -712,7 +712,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1074,7 +1073,7 @@ static struct link_encoder *dcn35_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 70abd32ce2ad..d87e2641cda1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -692,7 +692,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1054,7 +1053,7 @@ static struct link_encoder *dcn35_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 9d56fbdcd06a..db93bac247c0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -685,7 +685,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1032,7 +1031,7 @@ static struct link_encoder *dcn401_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1579,7 +1578,8 @@ static void dcn401_destroy_resource_pool(struct resource_pool **pool) } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -1688,6 +1688,45 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) } } +static int dcn401_get_power_profile(const struct dc_state *context) +{ + int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000; + int dpm_level = 0; + + for (int i = 0; i < context->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { + if (context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz == 0 || + uclk_mhz < context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) + break; + if (uclk_mhz > context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) + dpm_level++; + } + + return dpm_level; +} + +static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans) +{ + unsigned int num_available_chans = 1; + + /* channels for MALL must be a power of 2 */ + while (num_chans > 1) { + num_available_chans = (num_available_chans << 1); + num_chans = (num_chans >> 1); + } + + /* cannot be odd */ + num_available_chans &= ~1; + + /* clamp to max available channels for MALL per ASIC */ + if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) { + num_available_chans = num_available_chans > 16 ? 16 : num_available_chans; + } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) { + num_available_chans = num_available_chans > 8 ? 8 : num_available_chans; + } + + return num_available_chans; +} + static struct resource_funcs dcn401_res_pool_funcs = { .destroy = dcn401_destroy_resource_pool, .link_enc_create = dcn401_link_encoder_create, @@ -1714,6 +1753,7 @@ static struct resource_funcs dcn401_res_pool_funcs = { .prepare_mcache_programming = dcn401_prepare_mcache_programming, .build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, + .get_power_profile = dcn401_get_power_profile, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1795,14 +1835,12 @@ static bool dcn401_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - /* total size = mall per channel * num channels * 1024 * 1024 */ - dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; /* Calculate the available MALL space */ - dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( dc, dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel * 1024 * 1024; dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; @@ -1867,6 +1905,7 @@ static bool dcn401_resource_construct( dc->config.prefer_easf = true; dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; + dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2132,6 +2171,7 @@ static bool dcn401_resource_construct( /* SPL */ spl_init_easf_filter_coeffs(); spl_init_blur_scale_coeffs(); + dc->caps.scl_caps.sharpener_support = true; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 514d1ce20df9..7c8d61db153d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -536,8 +536,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ - SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \ + SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst) /* HUBBUB */ #define HUBBUB_REG_LIST_DCN4_01_RI(id) \ @@ -609,7 +610,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SR(DCHUBBUB_CLOCK_CNTL), \ SR(DCHUBBUB_SDPIF_CFG0), \ SR(DCHUBBUB_SDPIF_CFG1), \ - SR(DCHUBBUB_MEM_PWR_MODE_CTRL) + SR(DCHUBBUB_MEM_PWR_MODE_CTRL), \ + SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL1), \ + SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL2) /* DCCG */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c index 014e8a296f0c..614276200aa0 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c @@ -848,13 +848,13 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, * surfaces based on policy setting */ if (!spl_is_yuv420(spl_in->basic_in.format) && - (spl_in->debug.sharpen_policy == SHARPEN_YUV)) + (spl_in->sharpen_policy == SHARPEN_YUV)) return enable_isharp; else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) && - (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) + (spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) return enable_isharp; else if (!spl_in->is_fullscreen && - spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL) + spl_in->sharpen_policy == SHARPEN_FULLSCREEN_ALL) return enable_isharp; /* @@ -868,6 +868,60 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, return enable_isharp; } +/* Calculate number of tap with adaptive scaling off */ +static void spl_get_taps_non_adaptive_scaler( + struct spl_scratch *spl_scratch, const struct spl_taps *in_taps) +{ + if (in_taps->h_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) + spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz), 8); + else + spl_scratch->scl_data.taps.h_taps = 4; + } else + spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; + + if (in_taps->v_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) + spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps = 4; + } else + spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; + + if (in_taps->v_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) + spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert_c, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps_c = 4; + } else + spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; + + if (in_taps->h_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) + spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz_c), 8); + else + spl_scratch->scl_data.taps.h_taps_c = 4; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + /* Only 1 and even h_taps_c are supported by hw */ + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; + else + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; + + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) + spl_scratch->scl_data.taps.h_taps = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) + spl_scratch->scl_data.taps.v_taps = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)) + spl_scratch->scl_data.taps.h_taps_c = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)) + spl_scratch->scl_data.taps.v_taps_c = 1; + +} + /* Calculate optimal number of taps */ static bool spl_get_optimal_number_of_taps( int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch, @@ -882,8 +936,22 @@ static bool spl_get_optimal_number_of_taps( if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && - spl_scratch->scl_data.viewport.width > max_downscale_src_width) + spl_scratch->scl_data.viewport.width > max_downscale_src_width) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + *enable_easf_v = false; + *enable_easf_h = false; + *enable_isharp = false; return false; + } + + /* Disable adaptive scaler and sharpener when integer scaling is enabled */ + if (spl_in->scaling_quality.integer_scaling) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + *enable_easf_v = false; + *enable_easf_h = false; + *enable_isharp = false; + return true; + } /* Check if we are using EASF or not */ skip_easf = enable_easf(spl_in, spl_scratch); @@ -893,43 +961,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) { - if (in_taps->h_taps == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) - spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( - spl_scratch->scl_data.ratios.horz), 8); - else - spl_scratch->scl_data.taps.h_taps = 4; - } else - spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) - spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( - spl_scratch->scl_data.ratios.vert, 2)), 8); - else - spl_scratch->scl_data.taps.v_taps = 4; - } else - spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) - spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( - spl_scratch->scl_data.ratios.vert_c, 2)), 8); - else - spl_scratch->scl_data.taps.v_taps_c = 4; - } else - spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) - spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( - spl_scratch->scl_data.ratios.horz_c), 8); - else - spl_scratch->scl_data.taps.h_taps_c = 4; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; - else - spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; - } else { + if (skip_easf) + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + else { if (spl_is_yuv420(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; spl_scratch->scl_data.taps.v_taps = 6; @@ -954,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( else lb_config = LB_MEMORY_CONFIG_0; // Determine max vtap support by calculating how much line buffer can fit - spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, + spl_in->callbacks.spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, lb_config, &num_part_y, &num_part_c); /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2) @@ -1590,7 +1624,8 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness, scale_to_sharpness_policy); - dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup); + memcpy(dscl_prog_data->isharp_delta, spl_get_pregen_filter_isharp_1D_lut(setup), + sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE); dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level; dscl_prog_data->isharp_en = 1; // ISHARP_EN @@ -1753,12 +1788,12 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) // Clamp spl_clamp_viewport(&spl_scratch.scl_data.viewport); - if (!res) - return res; - // Save all calculated parameters in dscl_prog_data structure to program hw registers spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp); + if (!res) + return res; + if (spl_in->lls_pref == LLS_PREF_YES) { if (spl_in->is_hdr_on) setup = HDR_L; diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h index afcc66206ca2..89af91e19b6c 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h @@ -7,7 +7,6 @@ #include "dc_spl_types.h" -#define ISHARP_LUT_TABLE_SIZE 32 const uint32_t *spl_get_filter_isharp_1D_lut_0(void); const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void); const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void); diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h index 2a74ff5fdfdb..55d557df4aa5 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h @@ -5,10 +5,8 @@ #ifndef __DC_SPL_TYPES_H__ #define __DC_SPL_TYPES_H__ +#include "spl_debug.h" #include "spl_os_types.h" // swap -#ifndef SPL_ASSERT -#define SPL_ASSERT(_bool) ((void *)0) -#endif #include "spl_fixpt31_32.h" // fixed31_32 and related functions #include "spl_custom_float.h" // custom float and related functions @@ -252,6 +250,7 @@ enum isharp_en { ISHARP_DISABLE, ISHARP_ENABLE }; +#define ISHARP_LUT_TABLE_SIZE 32 // Below struct holds values that can be directly used to program // hardware registers. No conversion/clamping is required struct dscl_prog_data { @@ -402,7 +401,7 @@ struct dscl_prog_data { uint32_t isharp_nl_en; // ISHARP_NL_EN ? TODO:check this struct isharp_lba isharp_lba; // ISHARP_LBA struct isharp_fmt isharp_fmt; // ISHARP_FMT - const uint32_t *isharp_delta; + uint32_t isharp_delta[ISHARP_LUT_TABLE_SIZE]; struct isharp_nldelta_sclip isharp_nldelta_sclip; // ISHARP_NLDELTA_SCLIP /* blur and scale filter */ const uint16_t *filter_blur_scale_v; @@ -498,7 +497,7 @@ enum scale_to_sharpness_policy { SCALE_TO_SHARPNESS_ADJ_YUV = 1, SCALE_TO_SHARPNESS_ADJ_ALL = 2 }; -struct spl_funcs { +struct spl_callbacks { void (*spl_calc_lb_num_partitions) (bool alpha_en, const struct spl_scaler_data *scl_data, @@ -510,7 +509,6 @@ struct spl_funcs { struct spl_debug { int visual_confirm_base_offset; int visual_confirm_dpp_offset; - enum sharpen_policy sharpen_policy; enum scale_to_sharpness_policy scale_to_sharpness_policy; }; @@ -520,7 +518,7 @@ struct spl_in { // Basic slice information int odm_slice_index; // ODM Slice Index using get_odm_split_index struct spl_taps scaling_quality; // Explicit Scaling Quality - struct spl_funcs *funcs; + struct spl_callbacks callbacks; // Inputs for isharp and EASF struct adaptive_sharpness adaptive_sharpness; // Adaptive Sharpness enum linear_light_scaling lls_pref; // Linear Light Scaling @@ -532,6 +530,7 @@ struct spl_in { int h_active; int v_active; int sdr_white_level_nits; + enum sharpen_policy sharpen_policy; }; // end of SPL inputs diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h index 5696dafd0894..a6f6132df241 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h @@ -5,21 +5,26 @@ #ifndef SPL_DEBUG_H #define SPL_DEBUG_H -#ifdef SPL_ASSERT -#undef SPL_ASSERT -#endif -#define SPL_ASSERT(b) +#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB) +#define SPL_ASSERT_CRITICAL(expr) do { \ + if (WARN_ON(!(expr))) { \ + kgdb_breakpoint(); \ + } \ +} while (0) +#else +#define SPL_ASSERT_CRITICAL(expr) do { \ + if (WARN_ON(!(expr))) { \ + ; \ + } \ +} while (0) +#endif /* CONFIG_HAVE_KGDB || CONFIG_KGDB */ -#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0) +#if defined(CONFIG_DEBUG_KERNEL_DC) +#define SPL_ASSERT(expr) SPL_ASSERT_CRITICAL(expr) +#else +#define SPL_ASSERT(expr) WARN_ON(!(expr)) +#endif /* CONFIG_DEBUG_KERNEL_DC */ -#ifdef SPL_DALMSG -#undef SPL_DALMSG -#endif -#define SPL_DALMSG(b) - -#ifdef SPL_DAL_ASSERT_MSG -#undef SPL_DAL_ASSERT_MSG -#endif -#define SPL_DAL_ASSERT_MSG(b, m) +#define SPL_BREAK_TO_DEBUGGER() SPL_ASSERT(0) #endif // SPL_DEBUG_H diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c index a95565df5487..131f1e3949d3 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c @@ -22,14 +22,14 @@ static inline unsigned long long abs_i64( * result = dividend / divisor * *remainder = dividend % divisor */ -static inline unsigned long long complete_integer_division_u64( +static inline unsigned long long spl_complete_integer_division_u64( unsigned long long dividend, unsigned long long divisor, unsigned long long *remainder) { unsigned long long result; - ASSERT(divisor); + SPL_ASSERT(divisor); result = spl_div64_u64_rem(dividend, divisor, remainder); @@ -60,10 +60,10 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den /* determine integer part */ - unsigned long long res_value = complete_integer_division_u64( + unsigned long long res_value = spl_complete_integer_division_u64( arg1_value, arg2_value, &remainder); - ASSERT(res_value <= LONG_MAX); + SPL_ASSERT(res_value <= (unsigned long long)LONG_MAX); /* determine fractional part */ { @@ -85,7 +85,7 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den { unsigned long long summand = (remainder << 1) >= arg2_value; - ASSERT(res_value <= LLONG_MAX - summand); + SPL_ASSERT(res_value <= (unsigned long long)LLONG_MAX - summand); res_value += summand; } @@ -118,19 +118,19 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed res.value = arg1_int * arg2_int; - ASSERT(res.value <= (long long)LONG_MAX); + SPL_ASSERT(res.value <= (long long)LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg1_int * arg2_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg2_int * arg1_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -139,7 +139,7 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)spl_fixpt_half.value); - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -163,17 +163,17 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) res.value = arg_int * arg_int; - ASSERT(res.value <= (long long)LONG_MAX); + SPL_ASSERT(res.value <= (long long)LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg_int * arg_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -182,7 +182,7 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)spl_fixpt_half.value); - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -196,7 +196,7 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg) * Good idea to use Newton's method */ - ASSERT(arg.value); + SPL_ASSERT(arg.value); return spl_fixpt_from_fraction( spl_fixpt_one.value, @@ -286,7 +286,7 @@ struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg) * * Calculated as Taylor series. */ -static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) +static struct spl_fixed31_32 spl_fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) { unsigned int n = 9; @@ -295,7 +295,7 @@ static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed3 n + 1); /* TODO find correct res */ - ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); + SPL_ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); do res = spl_fixpt_add( @@ -337,22 +337,22 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg) spl_fixpt_ln2, m)); - ASSERT(m != 0); + SPL_ASSERT(m != 0); - ASSERT(spl_fixpt_lt( + SPL_ASSERT(spl_fixpt_lt( spl_fixpt_abs(r), spl_fixpt_one)); if (m > 0) return spl_fixpt_shl( - fixed31_32_exp_from_taylor_series(r), + spl_fixed31_32_exp_from_taylor_series(r), (unsigned char)m); else return spl_fixpt_div_int( - fixed31_32_exp_from_taylor_series(r), + spl_fixed31_32_exp_from_taylor_series(r), 1LL << -m); } else if (arg.value != 0) - return fixed31_32_exp_from_taylor_series(arg); + return spl_fixed31_32_exp_from_taylor_series(arg); else return spl_fixpt_one; } @@ -364,7 +364,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) struct spl_fixed31_32 error; - ASSERT(arg.value > 0); + SPL_ASSERT(arg.value > 0); /* TODO if arg is negative, return NaN */ /* TODO if arg is zero, return -INF */ @@ -396,7 +396,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) * part in 32 bits. It is used in hw programming (scaler) */ -static inline unsigned int ux_dy( +static inline unsigned int spl_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits) @@ -415,13 +415,13 @@ static inline unsigned int ux_dy( return result | fractional_part; } -static inline unsigned int clamp_ux_dy( +static inline unsigned int spl_clamp_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits, unsigned int min_clamp) { - unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits); + unsigned int truncated_val = spl_ux_dy(value, integer_bits, fractional_bits); if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) return (1 << (integer_bits + fractional_bits)) - 1; @@ -433,40 +433,40 @@ static inline unsigned int clamp_ux_dy( unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 4, 19); + return spl_ux_dy(arg.value, 4, 19); } unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 3, 19); + return spl_ux_dy(arg.value, 3, 19); } unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 2, 19); + return spl_ux_dy(arg.value, 2, 19); } unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 0, 19); + return spl_ux_dy(arg.value, 0, 19); } unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg) { - return clamp_ux_dy(arg.value, 0, 14, 1); + return spl_clamp_ux_dy(arg.value, 0, 14, 1); } unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg) { - return clamp_ux_dy(arg.value, 0, 10, 1); + return spl_clamp_ux_dy(arg.value, 0, 10, 1); } int spl_fixpt_s4d19(struct spl_fixed31_32 arg) { if (arg.value < 0) - return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19); + return -(int)spl_ux_dy(spl_fixpt_abs(arg).value, 4, 19); else - return ux_dy(arg.value, 4, 19); + return spl_ux_dy(arg.value, 4, 19); } struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h index 8a045e2f8699..ed2647f9a099 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h @@ -5,11 +5,8 @@ #ifndef __SPL_FIXED31_32_H__ #define __SPL_FIXED31_32_H__ -#include "os_types.h" +#include "spl_debug.h" #include "spl_os_types.h" // swap -#ifndef ASSERT -#define ASSERT(_bool) ((void *)0) -#endif #ifndef LLONG_MAX #define LLONG_MAX 9223372036854775807ll @@ -194,7 +191,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp( */ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift) { - ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || + SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift)))); arg.value = arg.value << shift; @@ -231,7 +228,7 @@ static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, st { struct spl_fixed31_32 res; - ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || + SPL_ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value))); res.value = arg1.value + arg2.value; @@ -256,7 +253,7 @@ static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, st { struct spl_fixed31_32 res; - ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || + SPL_ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value))); res.value = arg1.value - arg2.value; @@ -448,7 +445,7 @@ static inline int spl_fixpt_round(struct spl_fixed31_32 arg) const long long summand = spl_fixpt_half.value; - ASSERT(LLONG_MAX - (long long)arg_value >= summand); + SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; @@ -469,7 +466,7 @@ static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg) const long long summand = spl_fixpt_one.value - spl_fixpt_epsilon.value; - ASSERT(LLONG_MAX - (long long)arg_value >= summand); + SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; @@ -504,7 +501,7 @@ static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg bool negative = arg.value < 0; if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) { - ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); + SPL_ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); return arg; } diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h index 709706ed4f2c..2e6ba71960ac 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h @@ -6,6 +6,8 @@ #ifndef _SPL_OS_TYPES_H_ #define _SPL_OS_TYPES_H_ +#include "spl_debug.h" + #include <linux/slab.h> #include <linux/kgdb.h> #include <linux/kref.h> @@ -18,7 +20,6 @@ * general debug capabilities * */ -#define SPL_BREAK_TO_DEBUGGER() ASSERT(0) static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index fe5b6f7a3eb1..b353c4ceb60d 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -301,6 +301,7 @@ struct dmub_srv_hw_params { bool disallow_phy_access; bool disable_sldo_opt; bool enable_non_transparent_setconfig; + bool lower_hbr3_phy_ssc; }; /** @@ -570,6 +571,14 @@ struct dmub_notification { }; }; +/* enum dmub_ips_mode - IPS mode identifier */ +enum dmub_ips_mode { + DMUB_IPS_MODE_IPS1_MAX = 0, + DMUB_IPS_MODE_IPS2, + DMUB_IPS_MODE_IPS1_RCG, + DMUB_IPS_MODE_IPS1_ONO2_ON +}; + /** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index ebcf68bfae2b..b800a507d1e0 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -170,6 +170,11 @@ #pragma pack(push, 1) #define ABM_NUM_OF_ACE_SEGMENTS 5 +/** + * Debug FW state offset + */ +#define DMUB_DEBUG_FW_STATE_OFFSET 0x300 + union abm_flags { struct { /** @@ -490,6 +495,7 @@ struct dmub_feature_caps { uint8_t gecc_enable; uint8_t replay_supported; uint8_t replay_reserved[3]; + uint8_t abm_aux_backlight_support; }; struct dmub_visual_confirm_color { @@ -689,7 +695,8 @@ union dmub_fw_boot_options { uint32_t ips_disable: 3; /* options to disable ips support*/ uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ - uint32_t reserved : 7; /**< reserved */ + uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ + uint32_t reserved : 6; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -721,6 +728,7 @@ enum dmub_shared_state_feature_id { DMUB_SHARED_SHARE_FEATURE__INVALID = 0, DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, + DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3, DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ }; @@ -747,7 +755,8 @@ union dmub_shared_state_ips_driver_signals { uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ - uint32_t reserved_bits : 28; /**< Reversed bits */ + uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */ + uint32_t reserved_bits : 27; /**< Reversed bits */ } bits; uint32_t all; }; @@ -757,6 +766,14 @@ union dmub_shared_state_ips_driver_signals { */ #define DMUB_SHARED_STATE__IPS_FW_VERSION 1 +struct dmub_shared_state_debug_setup { + union { + struct { + uint32_t exclude_points[62]; + } profile_mode; + }; +}; + /** * struct dmub_shared_state_ips_fw - Firmware state for IPS. */ @@ -809,6 +826,7 @@ struct dmub_shared_state_feature_block { struct dmub_shared_state_feature_common common; /**< Generic data */ struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ + struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */ } data; /**< Shared state data. */ }; /* 256-bytes, fixed */ @@ -1051,11 +1069,110 @@ enum dmub_gpint_command { DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119, /** + * DESC: Set IPS residency measurement + * ARGS: 0 - Disable ips measurement + * 1 - Enable ips measurement + */ + DMUB_GPINT__IPS_RESIDENCY = 121, + + /** * DESC: Enable measurements for various task duration * ARGS: 0 - Disable measurement * 1 - Enable measurement */ DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123, + + /** + * DESC: Gets IPS residency in microseconds + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in microseconds - lower 32 bits + */ + DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124, + + /** + * DESC: Gets IPS1 histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125, + + /** + * DESC: Gets IPS2 histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126, + + /** + * DESC: Gets IPS residency + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in milli-percent. + */ + DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127, + + /** + * DESC: Gets IPS1_RCG histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128, + + /** + * DESC: Gets IPS1_ONO2_ON histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129, + + /** + * DESC: Gets IPS entry counter during residency measurement + * ARGS: 0 - Return IPS1 entry counts + * 1 - Return IPS2 entry counts + * 2 - Return IPS1_RCG entry counts + * 3 - Return IPS2_ONO2_ON entry counts + * RETURN: Entry counter for selected IPS mode + */ + DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130, + + /** + * DESC: Gets IPS inactive residency in microseconds + * ARGS: 0 - Return IPS1_MAX residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total inactive residency in microseconds - lower 32 bits + */ + DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131, + + /** + * DESC: Gets IPS inactive residency in microseconds + * ARGS: 0 - Return IPS1_MAX residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total inactive residency in microseconds - upper 32 bits + */ + DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132, + + /** + * DESC: Gets IPS residency in microseconds + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in microseconds - upper 32 bits + */ + DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI = 133, + /** + * DESC: Setup debug configs. + */ + DMUB_GPINT__SETUP_DEBUG_MODE = 136, }; /** @@ -1306,9 +1423,10 @@ enum dmub_out_cmd_type { /* DMUB_CMD__DPIA command sub-types. */ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, - DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, + DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, // will be replaced by DPIA_SET_CONFIG_REQUEST DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3, + DMUB_CMD__DPIA_SET_CONFIG_REQUEST = 4, }; /* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ @@ -2097,7 +2215,7 @@ struct dmub_rb_cmd_dig1_dpia_control { }; /** - * SET_CONFIG Command Payload + * SET_CONFIG Command Payload (deprecated) */ struct set_config_cmd_payload { uint8_t msg_type; /* set config message type */ @@ -2105,7 +2223,7 @@ struct set_config_cmd_payload { }; /** - * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. (deprecated) */ struct dmub_cmd_set_config_control_data { struct set_config_cmd_payload cmd_pkt; @@ -2114,6 +2232,17 @@ struct dmub_cmd_set_config_control_data { }; /** + * SET_CONFIG Request Command Payload + */ +struct set_config_request_cmd_payload { + uint8_t instance; /* DPIA instance */ + uint8_t immed_status; /* Immediate status returned in case of error */ + uint8_t msg_type; /* set config message type */ + uint8_t reserved; + uint32_t msg_data; /* set config message data */ +}; + +/** * DMUB command structure for SET_CONFIG command. */ struct dmub_rb_cmd_set_config_access { @@ -2122,6 +2251,14 @@ struct dmub_rb_cmd_set_config_access { }; /** + * DMUB command structure for SET_CONFIG request command. + */ +struct dmub_rb_cmd_set_config_request { + struct dmub_cmd_header header; /* header */ + struct set_config_request_cmd_payload payload; /* set config request payload */ +}; + +/** * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ struct dmub_cmd_mst_alloc_slots_control_data { @@ -4290,6 +4427,24 @@ struct dmub_rb_cmd_abm_set_pipe { }; /** + * Type of backlight control method to be used by ABM module + */ +enum dmub_backlight_control_type { + /** + * PWM Backlight control + */ + DMU_BACKLIGHT_CONTROL_PWM = 0, + /** + * VESA Aux-based backlight control + */ + DMU_BACKLIGHT_CONTROL_VESA_AUX = 1, + /** + * AMD DPCD Aux-based backlight control + */ + DMU_BACKLIGHT_CONTROL_AMD_AUX = 2, +}; + +/** * Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command. */ struct dmub_cmd_abm_set_backlight_data { @@ -4316,9 +4471,42 @@ struct dmub_cmd_abm_set_backlight_data { uint8_t panel_mask; /** + * AUX HW Instance. + */ + uint8_t aux_inst; + + /** * Explicit padding to 4 byte boundary. */ - uint8_t pad[2]; + uint8_t pad[1]; + + /** + * Backlight control type. + * Value 0 is PWM backlight control. + * Value 1 is VAUX backlight control. + * Value 2 is AMD DPCD AUX backlight control. + */ + enum dmub_backlight_control_type backlight_control_type; + + /** + * Minimum luminance in nits. + */ + uint32_t min_luminance; + + /** + * Maximum luminance in nits. + */ + uint32_t max_luminance; + + /** + * Minimum backlight in pwm. + */ + uint32_t min_backlight_pwm; + + /** + * Maximum backlight in pwm. + */ + uint32_t max_backlight_pwm; }; /** @@ -5022,7 +5210,34 @@ struct dmub_rb_cmd_get_usbc_cable_id { enum dmub_cmd_secure_display_type { DMUB_CMD__SECURE_DISPLAY_TEST_CMD = 0, /* test command to only check if inbox message works */ DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE, - DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY + DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY, + DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE, + DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY +}; + +#define MAX_ROI_NUM 2 + +struct dmub_cmd_roi_info { + uint16_t x_start; + uint16_t x_end; + uint16_t y_start; + uint16_t y_end; + uint8_t otg_id; + uint8_t phy_id; +}; + +struct dmub_cmd_roi_window_ctl { + uint16_t x_start; + uint16_t x_end; + uint16_t y_start; + uint16_t y_end; + bool enable; +}; + +struct dmub_cmd_roi_ctl_info { + uint8_t otg_id; + uint8_t phy_id; + struct dmub_cmd_roi_window_ctl roi_ctl[MAX_ROI_NUM]; }; /** @@ -5033,14 +5248,8 @@ struct dmub_rb_cmd_secure_display { /** * Data passed from driver to dmub firmware. */ - struct dmub_cmd_roi_info { - uint16_t x_start; - uint16_t x_end; - uint16_t y_start; - uint16_t y_end; - uint8_t otg_id; - uint8_t phy_id; - } roi_info; + struct dmub_cmd_roi_info roi_info; + struct dmub_cmd_roi_ctl_info mul_roi_ctl; }; /** @@ -5318,7 +5527,11 @@ union dmub_rb_cmd { /** * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. */ - struct dmub_rb_cmd_set_config_access set_config_access; + struct dmub_rb_cmd_set_config_access set_config_access; // (deprecated) + /** + * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + */ + struct dmub_rb_cmd_set_config_request set_config_request; /** * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 2ccad79053c5..e5e77bd3c31e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -426,6 +426,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; + boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -463,7 +464,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset; uint32_t is_traceport_enabled, is_cw6_enabled; if (!dmub || !diag_data) @@ -513,9 +514,6 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); diag_data->is_dmcub_soft_reset = is_soft_reset; - REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; - REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); diag_data->is_traceport_en = is_traceport_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index db16066bc893..a3f3ff5d49ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -497,6 +497,7 @@ enum dmub_status const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; + uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE; uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 }; if (!dmub->sw_init) @@ -514,6 +515,7 @@ enum dmub_status fw_state_size = fw_info->fw_region_size; trace_buffer_size = fw_info->trace_buffer_size; + shared_state_size = fw_info->shared_state_size; /** * If DM didn't fill in a version, then fill it in based on @@ -534,7 +536,7 @@ enum dmub_status window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; - window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE; + window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index aee5170f5fb2..de8f3cfed6c8 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -164,18 +164,19 @@ enum dpcd_psr_sink_states { PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7, }; -#define DP_SOURCE_SEQUENCE 0x30c -#define DP_SOURCE_TABLE_REVISION 0x310 -#define DP_SOURCE_PAYLOAD_SIZE 0x311 -#define DP_SOURCE_SINK_CAP 0x317 -#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 -#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 -#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E -#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F -#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SOURCE_SEQUENCE 0x30C +#define DP_SOURCE_TABLE_REVISION 0x310 +#define DP_SOURCE_PAYLOAD_SIZE 0x311 +#define DP_SOURCE_SINK_CAP 0x317 +#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 +#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 +#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E +#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F +#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 #define DP_SINK_PR_REPLAY_STATUS 0x378 #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A +#define DP_SINK_EMISSION_RATE 0x37E /* Remove once drm_dp_helper.h is updated upstream */ #ifndef DP_TOTAL_LTTPR_CNT diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h index 02c23b04d34b..058f882d5bdd 100644 --- a/drivers/gpu/drm/amd/display/include/logger_interface.h +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h @@ -52,10 +52,6 @@ void update_surface_trace( void post_surface_trace(struct dc *dc); -void context_timing_trace( - struct dc *dc, - struct resource_context *res_ctx); - void context_clock_trace( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index a48d564d1660..4d68c1c6e210 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -61,11 +61,13 @@ #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) -#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) #define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) +#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) +#define DC_LOG_REGISTER_READ(...) pr_debug("[REGISTER_READ]: "__VA_ARGS__) +#define DC_LOG_REGISTER_WRITE(...) pr_debug("[REGISTER_WRITE]: "__VA_ARGS__) struct dc_log_buffer_ctx { char *buf; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 3699e633801d..a71df052cf25 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1399,71 +1399,6 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb, pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); } -/* todo: all these scale_gamma functions are inherently the same but - * take different structures as params or different format for ramp - * values. We could probably implement it in a more generic fashion - */ -static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb, - const struct regamma_ramp *ramp, - struct dividers dividers) -{ - unsigned short max_driver = 0xFFFF; - unsigned short max_os = 0xFF00; - unsigned short scaler = max_os; - uint32_t i; - struct pwl_float_data *rgb = pwl_rgb; - struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1; - - i = 0; - do { - if (ramp->gamma[i] > max_os || - ramp->gamma[i + 256] > max_os || - ramp->gamma[i + 512] > max_os) { - scaler = max_driver; - break; - } - i++; - } while (i != GAMMA_RGB_256_ENTRIES); - - i = 0; - do { - rgb->r = dc_fixpt_from_fraction( - ramp->gamma[i], scaler); - rgb->g = dc_fixpt_from_fraction( - ramp->gamma[i + 256], scaler); - rgb->b = dc_fixpt_from_fraction( - ramp->gamma[i + 512], scaler); - - ++rgb; - ++i; - } while (i != GAMMA_RGB_256_ENTRIES); - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider1); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider1); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider1); - - ++rgb; - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider2); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider2); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider2); - - ++rgb; - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider3); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider3); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider3); -} - /* * RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here * Input is evenly distributed in the output color space as specified in @@ -1663,106 +1598,6 @@ static bool calculate_interpolated_hardware_curve( return true; } -/* The "old" interpolation uses a complicated scheme to build an array of - * coefficients while also using an array of 0-255 normalized to 0-1 - * Then there's another loop using both of the above + new scaled user ramp - * and we concatenate them. It also searches for points of interpolation and - * uses enums for positions. - * - * This function uses a different approach: - * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255 - * To find index for hwX , we notice the following: - * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1 - * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT - * - * Once the index is known, combined Y is simply: - * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index) - * - * We should switch to this method in all cases, it's simpler and faster - * ToDo one day - for now this only applies to ADL regamma to avoid regression - * for regular use cases (sRGB and PQ) - */ -static void interpolate_user_regamma(uint32_t hw_points_num, - struct pwl_float_data *rgb_user, - bool apply_degamma, - struct dc_transfer_func_distributed_points *tf_pts) -{ - uint32_t i; - uint32_t color = 0; - int32_t index; - int32_t index_next; - struct fixed31_32 *tf_point; - struct fixed31_32 hw_x; - struct fixed31_32 norm_factor = - dc_fixpt_from_int(255); - struct fixed31_32 norm_x; - struct fixed31_32 index_f; - struct fixed31_32 lut1; - struct fixed31_32 lut2; - struct fixed31_32 delta_lut; - struct fixed31_32 delta_index; - const struct fixed31_32 one = dc_fixpt_from_int(1); - - i = 0; - /* fixed_pt library has problems handling too small values */ - while (i != 32) { - tf_pts->red[i] = dc_fixpt_zero; - tf_pts->green[i] = dc_fixpt_zero; - tf_pts->blue[i] = dc_fixpt_zero; - ++i; - } - while (i <= hw_points_num + 1) { - for (color = 0; color < 3; color++) { - if (color == 0) - tf_point = &tf_pts->red[i]; - else if (color == 1) - tf_point = &tf_pts->green[i]; - else - tf_point = &tf_pts->blue[i]; - - if (apply_degamma) { - if (color == 0) - hw_x = coordinates_x[i].regamma_y_red; - else if (color == 1) - hw_x = coordinates_x[i].regamma_y_green; - else - hw_x = coordinates_x[i].regamma_y_blue; - } else - hw_x = coordinates_x[i].x; - - if (dc_fixpt_le(one, hw_x)) - hw_x = one; - - norm_x = dc_fixpt_mul(norm_factor, hw_x); - index = dc_fixpt_floor(norm_x); - if (index < 0 || index > 255) - continue; - - index_f = dc_fixpt_from_int(index); - index_next = (index == 255) ? index : index + 1; - - if (color == 0) { - lut1 = rgb_user[index].r; - lut2 = rgb_user[index_next].r; - } else if (color == 1) { - lut1 = rgb_user[index].g; - lut2 = rgb_user[index_next].g; - } else { - lut1 = rgb_user[index].b; - lut2 = rgb_user[index_next].b; - } - - // we have everything now, so interpolate - delta_lut = dc_fixpt_sub(lut2, lut1); - delta_index = dc_fixpt_sub(norm_x, index_f); - - *tf_point = dc_fixpt_add(lut1, - dc_fixpt_mul(delta_index, delta_lut)); - } - ++i; - } -} - static void build_new_custom_resulted_curve( uint32_t hw_points_num, struct dc_transfer_func_distributed_points *tf_pts) @@ -1784,29 +1619,6 @@ static void build_new_custom_resulted_curve( } } -static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma, - uint32_t hw_points_num, struct calculate_buffer *cal_buffer) -{ - uint32_t i; - - struct gamma_coefficients coeff; - struct pwl_float_data_ex *rgb = rgb_regamma; - const struct hw_x_point *coord_x = coordinates_x; - - build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); - - i = 0; - while (i != hw_points_num + 1) { - rgb->r = translate_from_linear_space_ex( - coord_x->x, &coeff, 0, cal_buffer); - rgb->g = rgb->r; - rgb->b = rgb->r; - ++coord_x; - ++rgb; - ++i; - } -} - static bool map_regamma_hw_to_x_user( const struct dc_gamma *ramp, struct pixel_gamma_point *coeff128, @@ -1855,125 +1667,6 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 -bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp) -{ - struct gamma_coefficients coeff; - const struct hw_x_point *coord_x = coordinates_x; - uint32_t i = 0; - - do { - coeff.a0[i] = dc_fixpt_from_fraction( - regamma->coeff.A0[i], 10000000); - coeff.a1[i] = dc_fixpt_from_fraction( - regamma->coeff.A1[i], 1000); - coeff.a2[i] = dc_fixpt_from_fraction( - regamma->coeff.A2[i], 1000); - coeff.a3[i] = dc_fixpt_from_fraction( - regamma->coeff.A3[i], 1000); - coeff.user_gamma[i] = dc_fixpt_from_fraction( - regamma->coeff.gamma[i], 1000); - - ++i; - } while (i != 3); - - i = 0; - /* fixed_pt library has problems handling too small values */ - while (i != 32) { - output_tf->tf_pts.red[i] = dc_fixpt_zero; - output_tf->tf_pts.green[i] = dc_fixpt_zero; - output_tf->tf_pts.blue[i] = dc_fixpt_zero; - ++coord_x; - ++i; - } - while (i != MAX_HW_POINTS + 1) { - output_tf->tf_pts.red[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 0, cal_buffer); - output_tf->tf_pts.green[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 1, cal_buffer); - output_tf->tf_pts.blue[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 2, cal_buffer); - ++coord_x; - ++i; - } - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts); - - // this function just clamps output to 0-1 - build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts); - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - return true; -} - -bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp) -{ - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; - struct dividers dividers; - - struct pwl_float_data *rgb_user = NULL; - struct pwl_float_data_ex *rgb_regamma = NULL; - bool ret = false; - - if (regamma == NULL) - return false; - - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; - - rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - - scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers); - - if (regamma->flags.bits.applyDegamma == 1) { - apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer); - copy_rgb_regamma_to_coordinates_x(coordinates_x, - MAX_HW_POINTS, rgb_regamma); - } - - interpolate_user_regamma(MAX_HW_POINTS, rgb_user, - regamma->flags.bits.applyDegamma, tf_pts); - - // no custom HDR curves! - tf_pts->end_exponent = 0; - tf_pts->x_point_at_y1_red = 1; - tf_pts->x_point_at_y1_green = 1; - tf_pts->x_point_at_y1_blue = 1; - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts); - - // this function just clamps output to 0-1 - build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts); - - ret = true; - - kfree(rgb_regamma); -rgb_regamma_alloc_fail: - kfree(rgb_user); -rgb_user_alloc_fail: - return ret; -} - bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *input_tf, const struct dc_gamma *ramp, bool map_user_ramp) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index ee5c466613de..97e55278940e 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -115,15 +115,4 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp); - -bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp); - - #endif /* COLOR_MOD_COLOR_GAMMA_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index bbd259cea4f4..f980a84dceef 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -48,6 +48,7 @@ #define VSYNCS_BETWEEN_FLIP_THRESHOLD 2 #define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5 #define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500 +#define MICRO_HZ_TO_HZ(x) (x / 1000000) struct core_freesync { struct mod_freesync public; @@ -128,13 +129,26 @@ unsigned int mod_freesync_calc_v_total_from_refresh( unsigned int v_total; unsigned int frame_duration_in_ns; + if (refresh_in_uhz == 0) + return stream->timing.v_total; + frame_duration_in_ns = ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); - v_total = div64_u64(div64_u64(((unsigned long long)( - frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total) + 500000, 1000000); + if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) { + /* When the target refresh rate is the minimum panel refresh rate, + * round down the vtotal value to avoid stretching vblank over + * panel's vtotal boundary. + */ + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total), 1000000); + } else { + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total) + 500000, 1000000); + } /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index c996365e84b0..1d41dd58f6bc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -27,6 +27,11 @@ #include "hdcp.h" +static inline uint16_t get_hdmi_rxstatus_msg_size(const uint8_t rxstatus[2]) +{ + return HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rxstatus[1]) << 8 | rxstatus[0]; +} + static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp) { uint8_t is_ready = 0; @@ -35,8 +40,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; else is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) && - (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; + get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus) != 0) ? 1 : 0; return is_ready ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; } @@ -84,15 +88,13 @@ static inline enum mod_hdcp_status check_link_integrity_failure_dp( static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint16_t size; if (is_dp_hdcp(hdcp)) { status = MOD_HDCP_STATUS_SUCCESS; } else { status = mod_hdcp_read_rxstatus(hdcp); if (status == MOD_HDCP_STATUS_SUCCESS) { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; @@ -104,7 +106,6 @@ static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) @@ -115,8 +116,7 @@ static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; } else { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; @@ -128,7 +128,6 @@ out: static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) @@ -139,8 +138,7 @@ static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; } else { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; @@ -152,7 +150,6 @@ out: static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE; - uint8_t size; uint16_t max_wait = 20; // units of ms uint16_t num_polls = 5; uint16_t wait_time = max_wait / num_polls; @@ -167,8 +164,7 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) if (status != MOD_HDCP_STATUS_SUCCESS) break; - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; @@ -181,7 +177,6 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; if (is_dp_hdcp(hdcp)) { status = MOD_HDCP_STATUS_INVALID_OPERATION; @@ -189,8 +184,7 @@ static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; @@ -249,8 +243,7 @@ static uint8_t process_rxstatus(struct mod_hdcp *hdcp, sizeof(hdcp->auth.msg.hdcp2.rx_id_list); else hdcp->auth.msg.hdcp2.rx_id_list_size = - HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); } out: return (*status == MOD_HDCP_STATUS_SUCCESS); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 3f91926a50e9..7eefcb0f5070 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -28,6 +28,8 @@ #define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */ +struct amdgpu_ip_block; + /* * Chip flags @@ -337,6 +339,11 @@ enum DC_DEBUG_MASK { * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. */ DC_FORCE_IPS_ENABLE = 0x4000, + /** + * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for + * eDP display from ACPI _DDC method. + */ + DC_DISABLE_ACPI_EDID = 0x8000, }; enum amd_dpm_forced_level; @@ -377,30 +384,30 @@ enum amd_dpm_forced_level; */ struct amd_ip_funcs { char *name; - int (*early_init)(void *handle); - int (*late_init)(void *handle); - int (*sw_init)(void *handle); - int (*sw_fini)(void *handle); - int (*early_fini)(void *handle); - int (*hw_init)(void *handle); - int (*hw_fini)(void *handle); - void (*late_fini)(void *handle); - int (*prepare_suspend)(void *handle); - int (*suspend)(void *handle); - int (*resume)(void *handle); + int (*early_init)(struct amdgpu_ip_block *ip_block); + int (*late_init)(struct amdgpu_ip_block *ip_block); + int (*sw_init)(struct amdgpu_ip_block *ip_block); + int (*sw_fini)(struct amdgpu_ip_block *ip_block); + int (*early_fini)(struct amdgpu_ip_block *ip_block); + int (*hw_init)(struct amdgpu_ip_block *ip_block); + int (*hw_fini)(struct amdgpu_ip_block *ip_block); + void (*late_fini)(struct amdgpu_ip_block *ip_block); + int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); + int (*suspend)(struct amdgpu_ip_block *ip_block); + int (*resume)(struct amdgpu_ip_block *ip_block); bool (*is_idle)(void *handle); - int (*wait_for_idle)(void *handle); - bool (*check_soft_reset)(void *handle); - int (*pre_soft_reset)(void *handle); - int (*soft_reset)(void *handle); - int (*post_soft_reset)(void *handle); + int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); + bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); + int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block); + int (*soft_reset)(struct amdgpu_ip_block *ip_block); + int (*post_soft_reset)(struct amdgpu_ip_block *ip_block); int (*set_clockgating_state)(void *handle, enum amd_clockgating_state state); int (*set_powergating_state)(void *handle, enum amd_powergating_state state); void (*get_clockgating_state)(void *handle, u64 *flags); - void (*dump_ip_state)(void *handle); - void (*print_ip_state)(void *handle, struct drm_printer *p); + void (*dump_ip_state)(struct amdgpu_ip_block *ip_block); + void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p); }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h index f42a276499cd..5d9d5fea6e06 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h @@ -6199,10 +6199,12 @@ #define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS__SHIFT 0x1 #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2 #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3 +#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG__SHIFT 0x4 #define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f #define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS_MASK 0x00000002L #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L +#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG_MASK 0x3FFFFFF0L #define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L //DCHUBBUB_TIMEOUT_DETECTION_CTRL1 #define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0 diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h index 2c3ce243861a..380e44230bda 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h @@ -1232,6 +1232,29 @@ #define mmMC_VM_MX_L1_PERFCOUNTER_HI 0x059d #define mmMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0 +// Stand Alone Walker Registers +#define VMC_TAP_PDE_REQUEST_SNOOP_OFFSET 8 +#define VMC_TAP_PTE_REQUEST_SNOOP_OFFSET 11 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0606 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0607 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0608 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0609 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x060a +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x060b +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_CNTL 0x0604 +#define mmVM_L2_SAW_CONTEXT0_CNTL_BASE_IDX 0 +#define CONTEXT0_CNTL_ENABLE_OFFSET 0 +#define CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET 1 +#define mmVM_L2_SAW_CONTEXTS_DISABLE 0x060c +#define mmVM_L2_SAW_CONTEXTS_DISABLE_BASE_IDX 0 +#define mmVM_L2_SAW_CNTL4 0x0603 +#define mmVM_L2_SAW_CNTL4_BASE_IDX 0 // addressBlock: mmhub_utcl2_atcl2dec // base address: 0x69900 diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 19a48d98830a..bb27c0d2a9ae 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -119,6 +119,8 @@ enum pp_clock_type { OD_ACOUSTIC_TARGET, OD_FAN_TARGET_TEMPERATURE, OD_FAN_MINIMUM_PWM, + OD_FAN_ZERO_RPM_ENABLE, + OD_FAN_ZERO_RPM_STOP_TEMP, }; enum amd_pp_sensors { @@ -199,6 +201,8 @@ enum PP_OD_DPM_TABLE_COMMAND { PP_OD_EDIT_ACOUSTIC_TARGET, PP_OD_EDIT_FAN_TARGET_TEMPERATURE, PP_OD_EDIT_FAN_MINIMUM_PWM, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, }; struct pp_states_info { @@ -336,7 +340,8 @@ enum pp_policy_soc_pstate { #define MAX_CLKS 4 #define NUM_VCN 4 #define NUM_JPEG_ENG 32 - +#define MAX_XCC 8 +#define NUM_XCP 8 struct seq_file; enum amd_pp_clock_type; struct amd_pp_simple_clock_info; @@ -350,6 +355,15 @@ struct pp_smu_wm_range_sets; struct pp_smu_nv_clock_table; struct dpm_clocks; +struct amdgpu_xcp_metrics { + /* Utilization Instantaneous (%) */ + u32 gfx_busy_inst[MAX_XCC]; + u16 jpeg_busy[NUM_JPEG_ENG]; + u16 vcn_busy[NUM_VCN]; + /* Utilization Accumulated (%) */ + u64 gfx_busy_acc[MAX_XCC]; +}; + struct amd_pm_funcs { /* export for dpm on ci and si */ int (*pre_set_power_state)(void *handle); @@ -872,6 +886,97 @@ struct gpu_metrics_v1_5 { uint16_t padding; }; +struct gpu_metrics_v1_6 { + struct metrics_table_header common_header; + + /* Temperature (Celsius) */ + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrsoc; + + /* Power (Watts) */ + uint16_t curr_socket_power; + + /* Utilization (%) */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + + /* Energy (15.259uJ (2^-16) units) */ + uint64_t energy_accumulator; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Accumulation cycle counter */ + uint32_t accumulation_counter; + + /* Accumulated throttler residencies */ + uint32_t prochot_residency_acc; + uint32_t ppt_residency_acc; + uint32_t socket_thm_residency_acc; + uint32_t vr_thm_residency_acc; + uint32_t hbm_thm_residency_acc; + + /* Clock Lock Status. Each bit corresponds to clock instance */ + uint32_t gfxclk_lock_status; + + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + + /* XGMI bus width and bitrate (in Gbps) */ + uint16_t xgmi_link_width; + uint16_t xgmi_link_speed; + + /* Utilization Accumulated (%) */ + uint32_t gfx_activity_acc; + uint32_t mem_activity_acc; + + /*PCIE accumulated bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_acc; + + /*PCIE instantaneous bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_inst; + + /* PCIE L0 to recovery state transition accumulated count */ + uint64_t pcie_l0_to_recov_count_acc; + + /* PCIE replay accumulated count */ + uint64_t pcie_replay_count_acc; + + /* PCIE replay rollover accumulated count */ + uint64_t pcie_replay_rover_count_acc; + + /* PCIE NAK sent accumulated count */ + uint32_t pcie_nak_sent_count_acc; + + /* PCIE NAK received accumulated count */ + uint32_t pcie_nak_rcvd_count_acc; + + /* XGMI accumulated data transfer size(KiloBytes) */ + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; + + /* PMFW attached timestamp (10ns resolution) */ + uint64_t firmware_timestamp; + + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_GFX_CLKS]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + + /* Number of current partition */ + uint16_t num_partition; + + /* XCP metrics stats */ + struct amdgpu_xcp_metrics xcp_stats[NUM_XCP]; + + /* PCIE other end recovery counter */ + uint32_t pcie_lc_perf_other_end_recovery; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 21ceafce1f9b..eb46cb10c24d 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -230,13 +230,23 @@ union MESAPI_SET_HW_RESOURCES { uint32_t disable_add_queue_wptr_mc_addr : 1; uint32_t enable_mes_event_int_logging : 1; uint32_t enable_reg_active_poll : 1; - uint32_t reserved : 21; + uint32_t use_disable_queue_in_legacy_uq_preemption : 1; + uint32_t send_write_data : 1; + uint32_t os_tdr_timeout_override : 1; + uint32_t use_rs64mem_for_proc_gang_ctx : 1; + uint32_t use_add_queue_unmap_flag_addr : 1; + uint32_t enable_mes_sch_stb_log : 1; + uint32_t limit_single_process : 1; + uint32_t is_strix_tmz_wa_enabled :1; + uint32_t reserved : 13; }; uint32_t uint32_t_all; }; uint32_t oversubscription_timer; uint64_t doorbell_info; uint64_t event_intr_history_gpu_mc_ptr; + uint64_t timestamp; + uint32_t os_tdr_timeout_in_sec; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -563,6 +573,11 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__READ_REG, MESAPI_MISC__WAIT_REG_MEM, MESAPI_MISC__SET_SHADER_DEBUGGER, + MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, + MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__MAX, }; @@ -617,6 +632,31 @@ struct SET_SHADER_DEBUGGER { uint32_t trap_en; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 31; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -631,6 +671,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 101e2fe962c6..c9b2ca5cf75f 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -643,6 +643,10 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__SET_SHADER_DEBUGGER, MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__QUERY_HUNG_ENGINE_ID, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__SETUP_MES_DBGEXT, MESAPI_MISC__MAX, }; @@ -713,6 +717,31 @@ struct SET_GANG_SUBMIT { uint32_t slave_gang_context_array_index; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -726,7 +755,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; - + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; uint64_t timestamp; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index d5d6ab484e5a..136e8193867c 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -145,15 +145,12 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -185,11 +182,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_power_state(adev, state); @@ -273,15 +268,12 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; level = amdgpu_dpm_get_performance_level(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -336,11 +328,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { @@ -374,16 +364,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); buf_len = sysfs_emit(buf, "states: %d\n", data.nums); @@ -412,17 +399,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); ret = amdgpu_dpm_get_pp_num_states(adev, &data); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -485,11 +469,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_get_pp_num_states(adev, &data); if (ret) @@ -544,15 +526,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (size <= 0) @@ -580,11 +559,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pp_table(adev, buf, count); @@ -808,11 +785,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } if (amdgpu_dpm_set_fine_grain_clk_vol(adev, type, @@ -865,11 +840,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; for (clk_index = 0 ; clk_index < 6 ; clk_index++) { ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); @@ -888,7 +861,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -929,11 +901,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); @@ -960,17 +930,14 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1029,11 +996,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); if (ret == -ENOENT) @@ -1042,7 +1007,6 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1102,11 +1066,9 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, if (ret) return ret; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_force_clock_level(adev, type, mask); @@ -1283,15 +1245,12 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_sclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1317,11 +1276,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); @@ -1345,15 +1302,12 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_mclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1379,11 +1333,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); @@ -1427,17 +1379,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1492,11 +1441,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); @@ -1520,16 +1467,13 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; /* get the sensor value */ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1639,15 +1583,12 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, if (!adev->asic_funcs->get_pcie_usage) return -ENODATA; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%llu %llu %i\n", @@ -1770,11 +1711,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); if (!ret) @@ -1782,7 +1721,6 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, else size = sysfs_emit(buf, "failed to get thermal limit\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1807,14 +1745,14 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); dev_err(dev, "failed to update thermal limit\n"); return ret; } @@ -1849,15 +1787,12 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1890,11 +1825,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -1906,7 +1839,6 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, memcpy(buf, gpu_metrics, size); out: - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -2008,11 +1940,9 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); + r = pm_runtime_resume_and_get(ddev->dev); + if (r < 0) return r; - } r = kstrtoint(buf, 10, &bias); if (r) @@ -2335,11 +2265,9 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, policy_attr = container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); @@ -2772,15 +2700,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -2817,11 +2742,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = pm_runtime_resume_and_get(adev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -2866,11 +2789,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2907,15 +2828,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -2937,15 +2855,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -3001,15 +2916,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -3036,11 +2948,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -3076,15 +2986,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -3119,11 +3026,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -3248,11 +3153,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); @@ -3262,7 +3165,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, else size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return size; @@ -3339,11 +3241,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ value |= limit_type << 24; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_power_limit(adev, value); @@ -3787,17 +3687,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_print_clock_levels(adev, od_type, buf); if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); return size; @@ -3879,23 +3776,23 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, if (ret) return ret; - ret = pm_runtime_get_sync(adev->dev); + ret = pm_runtime_resume_and_get(adev->dev); if (ret < 0) - goto err_out0; + return ret; ret = amdgpu_dpm_odn_edit_dpm_table(adev, cmd_type, parameter, parameter_size); if (ret) - goto err_out1; + goto err_out; if (cmd_type == PP_OD_COMMIT_DPM_TABLE) { ret = amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); if (ret) - goto err_out1; + goto err_out; } pm_runtime_mark_last_busy(adev->dev); @@ -3903,9 +3800,8 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, return count; -err_out1: +err_out: pm_runtime_mark_last_busy(adev->dev); -err_out0: pm_runtime_put_autosuspend(adev->dev); return ret; @@ -4213,6 +4109,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev) return umode; } +/** + * DOC: fan_zero_rpm_enable + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + */ +static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf); +} + +static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + buf, + count); +} + +static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET) + umode |= S_IWUSR; + + return umode; +} + +/** + * DOC: fan_zero_rpm_stop_temperature + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM stop temperature feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + * + * This setting works only if the Zero RPM setting is enabled. It adjusts the + * temperature below which the fan can stop. + */ +static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf); +} + +static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, + buf, + count); +} + +static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET) + umode |= S_IWUSR; + + return umode; +} + static struct od_feature_set amdgpu_od_set = { .containers = { [0] = { @@ -4258,6 +4265,22 @@ static struct od_feature_set amdgpu_od_set = { .store = fan_minimum_pwm_store, }, }, + [5] = { + .name = "fan_zero_rpm_enable", + .ops = { + .is_visible = fan_zero_rpm_enable_visible, + .show = fan_zero_rpm_enable_show, + .store = fan_zero_rpm_enable_store, + }, + }, + [6] = { + .name = "fan_zero_rpm_stop_temperature", + .ops = { + .is_visible = fan_zero_rpm_stop_temp_visible, + .show = fan_zero_rpm_stop_temp_show, + .store = fan_zero_rpm_stop_temp_store, + }, + }, }, }, }, @@ -4758,11 +4781,9 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(dev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(dev->dev); + r = pm_runtime_resume_and_get(dev->dev); + if (r < 0) return r; - } if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { r = amdgpu_debugfs_pm_info_pp(m, adev); @@ -4777,7 +4798,6 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) seq_printf(m, "\n"); out: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index f5bf41f21c41..363af8990aa2 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -328,6 +328,10 @@ struct config_table_setting #define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET BIT(7) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE BIT(8) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET BIT(9) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE BIT(10) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET BIT(11) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE BIT(12) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET BIT(13) struct amdgpu_pm { struct mutex mutex; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index e8b6989a40f3..8908646ad620 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -2954,9 +2954,9 @@ static int kv_dpm_get_temp(void *handle) return actual_temp; } -static int kv_dpm_early_init(void *handle) +static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &kv_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -2965,10 +2965,10 @@ static int kv_dpm_early_init(void *handle) return 0; } -static int kv_dpm_late_init(void *handle) +static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block) { /* powerdown unused blocks for now */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -2979,11 +2979,10 @@ static int kv_dpm_late_init(void *handle) return 0; } -static int kv_dpm_sw_init(void *handle) +static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -3024,9 +3023,9 @@ dpm_failed: return ret; } -static int kv_dpm_sw_fini(void *handle) +static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -3035,10 +3034,10 @@ static int kv_dpm_sw_fini(void *handle) return 0; } -static int kv_dpm_hw_init(void *handle) +static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; @@ -3053,9 +3052,9 @@ static int kv_dpm_hw_init(void *handle) return ret; } -static int kv_dpm_hw_fini(void *handle) +static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) kv_dpm_disable(adev); @@ -3063,9 +3062,9 @@ static int kv_dpm_hw_fini(void *handle) return 0; } -static int kv_dpm_suspend(void *handle) +static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* disable dpm */ @@ -3076,10 +3075,10 @@ static int kv_dpm_suspend(void *handle) return 0; } -static int kv_dpm_resume(void *handle) +static int kv_dpm_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -3100,17 +3099,6 @@ static bool kv_dpm_is_idle(void *handle) return true; } -static int kv_dpm_wait_for_idle(void *handle) -{ - return 0; -} - - -static int kv_dpm_soft_reset(void *handle) -{ - return 0; -} - static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -3314,12 +3302,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = { .suspend = kv_dpm_suspend, .resume = kv_dpm_resume, .is_idle = kv_dpm_is_idle, - .wait_for_idle = kv_dpm_wait_for_idle, - .soft_reset = kv_dpm_soft_reset, .set_clockgating_state = kv_dpm_set_clockgating_state, .set_powergating_state = kv_dpm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version kv_smu_ip_block = { diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a1baa13ab2c2..ee23a0f897c5 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -4755,13 +4755,15 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, u32 dram_timing; u32 dram_timing2; u32 burst_time; + int ret; arb_regs->mc_arb_rfsh_rate = (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); - amdgpu_atombios_set_engine_dram_timings(adev, - pl->sclk, - pl->mclk); + ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk, + pl->mclk); + if (ret) + return ret; dram_timing = RREG32(MC_ARB_DRAM_TIMING); dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); @@ -7619,10 +7621,10 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int si_dpm_late_init(void *handle) +static int si_dpm_late_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -7716,10 +7718,10 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) return err; } -static int si_dpm_sw_init(void *handle) +static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -7763,9 +7765,9 @@ dpm_failed: return ret; } -static int si_dpm_sw_fini(void *handle) +static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -7774,11 +7776,11 @@ static int si_dpm_sw_fini(void *handle) return 0; } -static int si_dpm_hw_init(void *handle) +static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; @@ -7793,9 +7795,9 @@ static int si_dpm_hw_init(void *handle) return ret; } -static int si_dpm_hw_fini(void *handle) +static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) si_dpm_disable(adev); @@ -7803,9 +7805,9 @@ static int si_dpm_hw_fini(void *handle) return 0; } -static int si_dpm_suspend(void *handle) +static int si_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* disable dpm */ @@ -7816,10 +7818,10 @@ static int si_dpm_suspend(void *handle) return 0; } -static int si_dpm_resume(void *handle) +static int si_dpm_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -7841,17 +7843,12 @@ static bool si_dpm_is_idle(void *handle) return true; } -static int si_dpm_wait_for_idle(void *handle) +static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX */ return 0; } -static int si_dpm_soft_reset(void *handle) -{ - return 0; -} - static int si_dpm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -7928,10 +7925,10 @@ static void si_dpm_print_power_state(void *handle, amdgpu_dpm_print_ps_status(adev, rps); } -static int si_dpm_early_init(void *handle) +static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &si_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -8047,11 +8044,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = { .resume = si_dpm_resume, .is_idle = si_dpm_is_idle, .wait_for_idle = si_dpm_wait_for_idle, - .soft_reset = si_dpm_soft_reset, .set_clockgating_state = si_dpm_set_clockgating_state, .set_powergating_state = si_dpm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version si_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index a71c6117d7e5..26624a716fc6 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -75,11 +75,10 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev) hwmgr = NULL; } -static int pp_early_init(void *handle) +static int pp_early_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amd_powerplay_create(adev); if (ret != 0) @@ -131,9 +130,9 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work) orderly_poweroff(true); } -static int pp_sw_init(void *handle) +static int pp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret = 0; @@ -148,9 +147,9 @@ static int pp_sw_init(void *handle) return ret; } -static int pp_sw_fini(void *handle) +static int pp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; hwmgr_sw_fini(hwmgr); @@ -160,10 +159,10 @@ static int pp_sw_fini(void *handle) return 0; } -static int pp_hw_init(void *handle) +static int pp_hw_init(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; ret = hwmgr_hw_init(hwmgr); @@ -174,10 +173,9 @@ static int pp_hw_init(void *handle) return ret; } -static int pp_hw_fini(void *handle) +static int pp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -217,9 +215,9 @@ static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) } } -static int pp_late_init(void *handle) +static int pp_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; if (hwmgr && hwmgr->pm_en) @@ -231,9 +229,9 @@ static int pp_late_init(void *handle) return 0; } -static void pp_late_fini(void *handle) +static void pp_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.smu_prv_buffer) amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); @@ -246,25 +244,15 @@ static bool pp_is_idle(void *handle) return false; } -static int pp_wait_for_idle(void *handle) -{ - return 0; -} - -static int pp_sw_reset(void *handle) -{ - return 0; -} - static int pp_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } -static int pp_suspend(void *handle) +static int pp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -272,10 +260,9 @@ static int pp_suspend(void *handle) return hwmgr_suspend(hwmgr); } -static int pp_resume(void *handle) +static int pp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; return hwmgr_resume(hwmgr); } @@ -298,12 +285,8 @@ static const struct amd_ip_funcs pp_ip_funcs = { .suspend = pp_suspend, .resume = pp_resume, .is_idle = pp_is_idle, - .wait_for_idle = pp_wait_for_idle, - .soft_reset = pp_sw_reset, .set_clockgating_state = pp_set_clockgating_state, .set_powergating_state = pp_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version pp_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index b56298d9da98..fe24219c3bf4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -28,7 +28,6 @@ #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" -#include "ppevvmath.h" #define MEM_ID_MASK 0xff000000 #define MEM_ID_SHIFT 24 @@ -677,433 +676,6 @@ bool atomctrl_get_pp_assign_pin( return bRet; } -int atomctrl_calculate_voltage_evv_on_sclk( - struct pp_hwmgr *hwmgr, - uint8_t voltage_type, - uint32_t sclk, - uint16_t virtual_voltage_Id, - uint16_t *voltage, - uint16_t dpm_level, - bool debug) -{ - ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; - struct amdgpu_device *adev = hwmgr->adev; - EFUSE_LINEAR_FUNC_PARAM sRO_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; - EFUSE_INPUT_PARAMETER sInput_FuseValues; - READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; - - uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; - fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; - fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; - fInt fLkg_FT, repeat; - fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; - fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; - fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; - fInt fSclk_margin, fSclk, fEVV_V; - fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; - uint32_t ul_FT_Lkg_V0NORM; - fInt fLn_MaxDivMin, fMin, fAverage, fRange; - fInt fRoots[2]; - fInt fStepSize = GetScaledFraction(625, 100000); - - int result; - - getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) - smu_atom_get_data_table(hwmgr->adev, - GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), - NULL, NULL, NULL); - - if (!getASICProfilingInfo) - return -1; - - if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) - return -1; - - /*----------------------------------------------------------- - *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL - *----------------------------------------------------------- - */ - fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); - - switch (dpm_level) { - case 1: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); - break; - case 2: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); - break; - case 3: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); - break; - case 4: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); - break; - case 5: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); - break; - case 6: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); - break; - case 7: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); - break; - default: - pr_err("DPM Level not supported\n"); - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); - } - - /*------------------------- - * DECODING FUSE VALUES - * ------------------------ - */ - /*Decode RO_Fused*/ - sRO_fuse = getASICProfilingInfo->sRoFuse; - - sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - /* Finally, the actual fuse value */ - ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); - fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); - fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); - - sCACm_fuse = getASICProfilingInfo->sCACm; - - sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); - - fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); - - sCACb_fuse = getASICProfilingInfo->sCACb; - - sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); - - fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); - - sKt_Beta_fuse = getASICProfilingInfo->sKt_b; - - sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); - - fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, - fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); - - sKv_m_fuse = getASICProfilingInfo->sKv_m; - - sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - if (result) - return result; - - ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); - fRange = fMultiply(fRange, ConvertToFraction(-1)); - - fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, - fAverage, fRange, sKv_m_fuse.ucEfuseLength); - - sKv_b_fuse = getASICProfilingInfo->sKv_b; - - sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); - - fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, - fAverage, fRange, sKv_b_fuse.ucEfuseLength); - - /* Decoding the Leakage - No special struct container */ - /* - * usLkgEuseIndex=56 - * ucLkgEfuseBitLSB=6 - * ucLkgEfuseLength=10 - * ulLkgEncodeLn_MaxDivMin=69077 - * ulLkgEncodeMax=1000000 - * ulLkgEncodeMin=1000 - * ulEfuseLogisticAlpha=13 - */ - - sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; - sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; - sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); - fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); - - fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, - fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); - fLkg_FT = fFT_Lkg_V0NORM; - - /*------------------------------------------- - * PART 2 - Grabbing all required values - *------------------------------------------- - */ - fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - - fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); - fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); - fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - - fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); - - fMargin_FMAX_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); - fMargin_Plat_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); - fMargin_FMAX_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); - fMargin_Plat_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); - - fMargin_DC_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); - fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); - - fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); - fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); - fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); - fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); - fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); - - fSclk = GetScaledFraction(sclk, 100); - - fV_max = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); - fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); - fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); - fV_FT = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); - fV_min = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); - - /*----------------------- - * PART 3 - *----------------------- - */ - - fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); - fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); - fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), - fAdd(fMultiply(fSM_A3, fSclk), - fSubtract(fSM_A7, fRO_fused))))); - - fVDDC_base = fSubtract(fRO_fused, - fSubtract(fMargin_RO_c, - fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); - fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2)); - - repeat = fSubtract(fVDDC_base, - fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); - - fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, - fGetSquare(repeat)), - fAdd(fMultiply(fMargin_RO_b, repeat), - fMargin_RO_c)); - - fDC_SCLK = fSubtract(fRO_fused, - fSubtract(fRO_DC_margin, - fSubtract(fSM_A3, - fMultiply(fSM_A2, repeat)))); - fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1)); - - fSigma_DC = fSubtract(fSclk, fDC_SCLK); - - fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); - fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); - fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); - fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); - - fSquared_Sigma_DC = fGetSquare(fSigma_DC); - fSquared_Sigma_CR = fGetSquare(fSigma_CR); - fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); - - fSclk_margin = fAdd(fMicro_FMAX, - fAdd(fMicro_CR, - fAdd(fMargin_fixed, - fSqrt(fAdd(fSquared_Sigma_FMAX, - fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); - /* - fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; - fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; - fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; - */ - - fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); - fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); - fC_Term = fAdd(fRO_DC_margin, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), - fAdd(fSclk, fSclk_margin)), - fAdd(fMultiply(fSM_A3, - fAdd(fSclk, fSclk_margin)), - fSubtract(fSM_A7, fRO_fused))))); - - SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); - - if (GreaterThan(fRoots[0], fRoots[1])) - fEVV_V = fRoots[1]; - else - fEVV_V = fRoots[0]; - - if (GreaterThan(fV_min, fEVV_V)) - fEVV_V = fV_min; - else if (GreaterThan(fEVV_V, fV_max)) - fEVV_V = fSubtract(fV_max, fStepSize); - - fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); - - /*----------------- - * PART 4 - *----------------- - */ - - fV_x = fV_min; - - while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { - fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( - fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), - fGetSquare(fV_x)), fDerateTDP); - - fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, - fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, - fT_prod), fKv_b_fused), fV_x)), fV_x))); - fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_prod))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_FT))); - - fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); - - fTDP_Current = fDivide(fTDP_Power, fV_x); - - fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), - ConvertToFraction(10))); - - fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); - - if (GreaterThan(fV_max, fV_NL) && - (GreaterThan(fV_NL, fEVV_V) || - Equal(fV_NL, fEVV_V))) { - fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); - - *voltage = (uint16_t)fV_NL.partial.real; - break; - } else - fV_x = fAdd(fV_x, fStepSize); - } - - return result; -} - /** * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table. * @hwmgr: input: pointer to hwManager diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h index 1f987e846628..22b0ac12df97 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h @@ -316,8 +316,6 @@ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, pp_atomctrl_clock_dividers_kong *dividers); extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t *efuse); -extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers); extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h deleted file mode 100644 index 409aeec6baa9..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h +++ /dev/null @@ -1,561 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <asm/div64.h> - -enum ppevvmath_constants { - /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ - SHIFT_AMOUNT = 16, - - /* Change this value to change the number of decimal places in the final output - 5 is a good default */ - PRECISION = 5, - - SHIFTED_2 = (2 << SHIFT_AMOUNT), - - /* 32767 - Might change in the future */ - MAX = (1 << (SHIFT_AMOUNT - 1)) - 1, -}; - -/* ------------------------------------------------------------------------------- - * NEW TYPE - fINT - * ------------------------------------------------------------------------------- - * A variable of type fInt can be accessed in 3 ways using the dot (.) operator - * fInt A; - * A.full => The full number as it is. Generally not easy to read - * A.partial.real => Only the integer portion - * A.partial.decimal => Only the fractional portion - */ -typedef union _fInt { - int full; - struct _partial { - unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/ - int real: 32 - SHIFT_AMOUNT; - } partial; -} fInt; - -/* ------------------------------------------------------------------------------- - * Function Declarations - * ------------------------------------------------------------------------------- - */ -static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -static fInt fNegate(fInt); /* Returns -1 * input fInt value */ -static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -static fInt fDivide (fInt A, fInt B); /* Returns A/B */ -static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -static int uAbs(int); /* Returns the Absolute value of the Int */ -static int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ - -/* Fuse decoding functions - * ------------------------------------------------------------------------------------- - */ -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); - -/* Internal Support Functions - Use these ONLY for testing or adding to internal functions - * ------------------------------------------------------------------------------------- - * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. - */ -static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ -static fInt fNegate(fInt); - -static int uGetScaledDecimal (fInt); /* Internal function */ -static int GetReal (fInt A); /* Internal function */ - -/* ------------------------------------------------------------------------------------- - * TROUBLESHOOTING INFORMATION - * ------------------------------------------------------------------------------------- - * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767) - * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767) - * 3) fMultiply - OutputOutOfRangeException: - * 4) fGetSquare - OutputOutOfRangeException: - * 5) fDivide - DivideByZeroException - * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number - */ - -/* ------------------------------------------------------------------------------------- - * START OF CODE - * ------------------------------------------------------------------------------------- - */ -static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ -{ - uint32_t i; - bool bNegated = false; - - fInt fPositiveOne = ConvertToFraction(1); - fInt fZERO = ConvertToFraction(0); - - fInt lower_bound = Divide(78, 10000); - fInt solution = fPositiveOne; /*Starting off with baseline of 1 */ - fInt error_term; - - static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - - if (GreaterThan(fZERO, exponent)) { - exponent = fNegate(exponent); - bNegated = true; - } - - while (GreaterThan(exponent, lower_bound)) { - for (i = 0; i < 11; i++) { - if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) { - exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000)); - solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000)); - } - } - } - - error_term = fAdd(fPositiveOne, exponent); - - solution = fMultiply(solution, error_term); - - if (bNegated) - solution = fDivide(fPositiveOne, solution); - - return solution; -} - -static fInt fNaturalLog(fInt value) -{ - uint32_t i; - fInt upper_bound = Divide(8, 1000); - fInt fNegativeOne = ConvertToFraction(-1); - fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */ - fInt error_term; - - static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - - while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) { - for (i = 0; i < 10; i++) { - if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) { - value = fDivide(value, GetScaledFraction(k_array[i], 10000)); - solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000)); - } - } - } - - error_term = fAdd(fNegativeOne, value); - - return fAdd(solution, error_term); -} - -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_decoded_value; - - f_decoded_value = fDivide(f_fuse_value, f_bit_max_value); - f_decoded_value = fMultiply(f_decoded_value, f_range); - f_decoded_value = fAdd(f_decoded_value, f_min); - - return f_decoded_value; -} - - -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_CONSTANT_NEG13 = ConvertToFraction(-13); - fInt f_CONSTANT1 = ConvertToFraction(1); - - fInt f_decoded_value; - - f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1); - f_decoded_value = fNaturalLog(f_decoded_value); - f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13)); - f_decoded_value = fAdd(f_decoded_value, f_average); - - return f_decoded_value; -} - -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) -{ - fInt fLeakage; - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse)); - fLeakage = fDivide(fLeakage, f_bit_max_value); - fLeakage = fExponential(fLeakage); - fLeakage = fMultiply(fLeakage, f_min); - - return fLeakage; -} - -static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt fNegate(fInt X) -{ - fInt CONSTANT_NEGONE = ConvertToFraction(-1); - return fMultiply(X, CONSTANT_NEGONE); -} - -static fInt Convert_ULONG_ToFraction(uint32_t X) -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt GetScaledFraction(int X, int factor) -{ - int times_shifted, factor_shifted; - bool bNEGATED; - fInt fValue; - - times_shifted = 0; - factor_shifted = 0; - bNEGATED = false; - - if (X < 0) { - X = -1*X; - bNEGATED = true; - } - - if (factor < 0) { - factor = -1*factor; - bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */ - } - - if ((X > MAX) || factor > MAX) { - if ((X/factor) <= MAX) { - while (X > MAX) { - X = X >> 1; - times_shifted++; - } - - while (factor > MAX) { - factor = factor >> 1; - factor_shifted++; - } - } else { - fValue.full = 0; - return fValue; - } - } - - if (factor == 1) - return ConvertToFraction(X); - - fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); - - fValue.full = fValue.full << times_shifted; - fValue.full = fValue.full >> factor_shifted; - - return fValue; -} - -/* Addition using two fInts */ -static fInt fAdd (fInt X, fInt Y) -{ - fInt Sum; - - Sum.full = X.full + Y.full; - - return Sum; -} - -/* Addition using two fInts */ -static fInt fSubtract (fInt X, fInt Y) -{ - fInt Difference; - - Difference.full = X.full - Y.full; - - return Difference; -} - -static bool Equal(fInt A, fInt B) -{ - if (A.full == B.full) - return true; - else - return false; -} - -static bool GreaterThan(fInt A, fInt B) -{ - if (A.full > B.full) - return true; - else - return false; -} - -static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ -{ - fInt Product; - int64_t tempProduct; - - /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/ - /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION - bool X_LessThanOne, Y_LessThanOne; - - X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0); - Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0); - - if (X_LessThanOne && Y_LessThanOne) { - Product.full = X.full * Y.full; - return Product - }*/ - - tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */ - tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */ - Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */ - - return Product; -} - -static fInt fDivide (fInt X, fInt Y) -{ - fInt fZERO, fQuotient; - int64_t longlongX, longlongY; - - fZERO = ConvertToFraction(0); - - if (Equal(Y, fZERO)) - return fZERO; - - longlongX = (int64_t)X.full; - longlongY = (int64_t)Y.full; - - longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */ - - div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */ - - fQuotient.full = (int)longlongX; - return fQuotient; -} - -static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ -{ - fInt fullNumber, scaledDecimal, scaledReal; - - scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */ - - scaledDecimal.full = uGetScaledDecimal(A); - - fullNumber = fAdd(scaledDecimal, scaledReal); - - return fullNumber.full; -} - -static fInt fGetSquare(fInt A) -{ - return fMultiply(A, A); -} - -/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -static fInt fSqrt(fInt num) -{ - fInt F_divide_Fprime, Fprime; - fInt test; - fInt twoShifted; - int seed, counter, error; - fInt x_new, x_old, C, y; - - fInt fZERO = ConvertToFraction(0); - - /* (0 > num) is the same as (num < 0), i.e., num is negative */ - - if (GreaterThan(fZERO, num) || Equal(fZERO, num)) - return fZERO; - - C = num; - - if (num.partial.real > 3000) - seed = 60; - else if (num.partial.real > 1000) - seed = 30; - else if (num.partial.real > 100) - seed = 10; - else - seed = 2; - - counter = 0; - - if (Equal(num, fZERO)) /*Square Root of Zero is zero */ - return fZERO; - - twoShifted = ConvertToFraction(2); - x_new = ConvertToFraction(seed); - - do { - counter++; - - x_old.full = x_new.full; - - test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */ - y = fSubtract(test, C); /*y = f(x) = x^2 - C; */ - - Fprime = fMultiply(twoShifted, x_old); - F_divide_Fprime = fDivide(y, Fprime); - - x_new = fSubtract(x_old, F_divide_Fprime); - - error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old); - - if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/ - return x_new; - - } while (uAbs(error) > 0); - - return x_new; -} - -static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) -{ - fInt *pRoots = &Roots[0]; - fInt temp, root_first, root_second; - fInt f_CONSTANT10, f_CONSTANT100; - - f_CONSTANT100 = ConvertToFraction(100); - f_CONSTANT10 = ConvertToFraction(10); - - while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { - A = fDivide(A, f_CONSTANT10); - B = fDivide(B, f_CONSTANT10); - C = fDivide(C, f_CONSTANT10); - } - - temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */ - temp = fMultiply(temp, C); /* root = 4*A*C */ - temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */ - temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */ - - root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */ - root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */ - - root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - *(pRoots + 0) = root_first; - *(pRoots + 1) = root_second; -} - -/* ----------------------------------------------------------------------------- - * SUPPORT FUNCTIONS - * ----------------------------------------------------------------------------- - */ - -/* Conversion Functions */ -static int GetReal (fInt A) -{ - return (A.full >> SHIFT_AMOUNT); -} - -static fInt Divide (int X, int Y) -{ - fInt A, B, Quotient; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Quotient = fDivide(A, B); - - return Quotient; -} - -static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ -{ - int dec[PRECISION]; - int i, scaledDecimal = 0, tmp = A.partial.decimal; - - for (i = 0; i < PRECISION; i++) { - dec[i] = tmp / (1 << SHIFT_AMOUNT); - tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]); - tmp *= 10; - scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i); - } - - return scaledDecimal; -} - -static int uPow(int base, int power) -{ - if (power == 0) - return 1; - else - return (base)*uPow(base, power - 1); -} - -static int uAbs(int X) -{ - if (X < 0) - return (X * -1); - else - return X; -} - -static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) -{ - fInt solution; - - solution = fDivide(A, fStepSize); - solution.partial.decimal = 0; /*All fractional digits changes to 0 */ - - if (error_term) - solution.partial.real += 1; /*Error term of 1 added */ - - solution = fMultiply(solution, fStepSize); - solution = fAdd(solution, fStepSize); - - return solution; -} - diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c index 79c817752a33..2b446f8866ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c @@ -62,578 +62,6 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) return table_address; } -#if 0 -static void dump_pptable(PPTable_t *pptable) -{ - int i; - - pr_info("Version = 0x%08x\n", pptable->Version); - - pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0); - pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau); - pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1); - pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau); - pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2); - pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau); - pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3); - pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau); - pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc); - pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau); - pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - pr_info("TedgeLimit = %d\n", pptable->TedgeLimit); - pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit); - pr_info("ThbmLimit = %d\n", pptable->ThbmLimit); - pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit); - pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit); - pr_info("TplxLimit = %d\n", pptable->TplxLimit); - pr_info("FitLimit = %d\n", pptable->FitLimit); - - pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage); - pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits); - pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit); - - pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc); - pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - - pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid); - pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid); - pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - pr_info("Padding234 = 0x%02x\n", pptable->Padding234); - - pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - pr_info("[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c); - - pr_info("[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c); - - pr_info("[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c); - - pr_info("[PPCLK_ECLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_ECLK].padding, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c); - - pr_info("[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c); - - pr_info("[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c); - - pr_info("[PPCLK_DCEFCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCEFCLK].padding, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c); - - pr_info("[PPCLK_DISPCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DISPCLK].padding, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c); - - pr_info("[PPCLK_PIXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PIXCLK].padding, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c); - - pr_info("[PPCLK_PHYCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PHYCLK].padding, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c); - - pr_info("[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c); - - - pr_info("FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - pr_info("FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - pr_info("FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - pr_info("FreqTableEclk\n"); - for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]); - - pr_info("FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - pr_info("FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - pr_info("FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - pr_info("FreqTableDcefclk\n"); - for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]); - - pr_info("FreqTableDispclk\n"); - for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]); - - pr_info("FreqTablePixclk\n"); - for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]); - - pr_info("FreqTablePhyclk\n"); - for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]); - - pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]); - pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]); - pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]); - pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]); - pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]); - pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]); - pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks); - - pr_info("Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - pr_info("Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq); - pr_info("Padding789 = 0x%x\n", pptable->Padding789); - pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n", - pptable->CksVoltageOffset.a, - pptable->CksVoltageOffset.b, - pptable->CksVoltageOffset.c); - pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]); - pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]); - pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]); - pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]); - pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource); - pr_info("Padding456 = 0x%x\n", pptable->Padding456); - - pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv); - pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]); - pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]); - pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]); - - pr_info("PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]); - - pr_info("PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]); - - pr_info("LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]); - - pr_info("EnableTdpm = %d\n", pptable->EnableTdpm); - pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - pr_info("FanStopTemp = %d\n", pptable->FanStopTemp); - pr_info("FanStartTemp = %d\n", pptable->FanStartTemp); - - pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); - pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); - pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid); - pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - pr_info("FanGainPlx = %d\n", pptable->FanGainPlx); - pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); - pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); - pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - - pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOn.a, - pptable->dBtcGbGfxCksOn.b, - pptable->dBtcGbGfxCksOn.c); - pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOff.a, - pptable->dBtcGbGfxCksOff.b, - pptable->dBtcGbGfxCksOff.c); - pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - pr_info("XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - pr_info("XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - pr_info("XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - pr_info("XgmiUclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]); - pr_info("XgmiSocclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]); - pr_info("XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - - pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides); - pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc); - - pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm); - pr_info("padding16_Fan = %d\n", pptable->padding16_Fan); - - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - - pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - for (i = 0; i < 11; i++) - pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); - - for (i = 0; i < 3; i++) - pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]); - - pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - - pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V); - - pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset); - pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - pr_info("SocOffset = 0x%x\n", pptable->SocOffset); - pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset); - pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset); - pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - pr_info("AcDcGpio = %d\n", pptable->AcDcGpio); - pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity); - pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio); - pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - - pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio); - pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - pr_info("Padding1 = 0x%x\n", pptable->Padding1); - pr_info("Padding2 = 0x%x\n", pptable->Padding2); - - pr_info("LedPin0 = %d\n", pptable->LedPin0); - pr_info("LedPin1 = %d\n", pptable->LedPin1); - pr_info("LedPin2 = %d\n", pptable->LedPin2); - pr_info("padding8_4 = 0x%x\n", pptable->padding8_4); - - pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { - pr_info("I2cControllers[%d]:\n", i); - pr_info(" .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - pr_info(" .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - pr_info(" .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - pr_info(" .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - pr_info(" .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrottler); - pr_info(" .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - pr_info(" .I2cSpeed = %d\n", - pptable->I2cControllers[i].I2cSpeed); - } - - for (i = 0; i < 10; i++) - pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]); - - for (i = 0; i < 8; i++) - pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]); -} -#endif - static int check_powerplay_tables( struct pp_hwmgr *hwmgr, const ATOM_Vega20_POWERPLAYTABLE *powerplay_table) @@ -652,8 +80,6 @@ static int check_powerplay_tables( return -EINVAL; } - //dump_pptable(&powerplay_table->smcPPTable); - return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 80e60ea2d11e..64f917959576 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -140,7 +140,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, min, - max); + max, + false); return ret; } @@ -251,7 +252,7 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu, if (atomic_read(&power_gate->vcn_gated) ^ enable) return 0; - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff); if (!ret) atomic_set(&power_gate->vcn_gated, !enable); @@ -549,7 +550,8 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return false; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) && + amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC)) return true; return false; @@ -741,9 +743,9 @@ static int smu_set_funcs(struct amdgpu_device *adev) return 0; } -static int smu_early_init(void *handle) +static int smu_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu; int r; @@ -825,9 +827,9 @@ static int smu_apply_default_config_table_settings(struct smu_context *smu) return smu_set_config_table(smu, &adev->pm.config_table); } -static int smu_late_init(void *handle) +static int smu_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; @@ -1242,9 +1244,9 @@ static bool smu_is_workload_profile_available(struct smu_context *smu, return smu->workload_map && smu->workload_map[profile].valid_mapping; } -static int smu_sw_init(void *handle) +static int smu_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1259,26 +1261,33 @@ static int smu_sw_init(void *handle) smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; + smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; if (smu->is_apu || - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - else - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + } else { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -1326,9 +1335,9 @@ static int smu_sw_init(void *handle) return 0; } -static int smu_sw_fini(void *handle) +static int smu_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1799,10 +1808,10 @@ static int smu_start_smc_engine(struct smu_context *smu) return ret; } -static int smu_hw_init(void *handle) +static int smu_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { @@ -2021,9 +2030,9 @@ static int smu_reset_mp1_state(struct smu_context *smu) return ret; } -static int smu_hw_fini(void *handle) +static int smu_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -2054,9 +2063,9 @@ static int smu_hw_fini(void *handle) return 0; } -static void smu_late_fini(void *handle) +static void smu_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; kfree(smu); @@ -2065,26 +2074,31 @@ static void smu_late_fini(void *handle) static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + struct amdgpu_ip_block *ip_block; int ret; - ret = smu_hw_fini(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC); + if (!ip_block) + return -EINVAL; + + ret = smu_hw_fini(ip_block); if (ret) return ret; - ret = smu_hw_init(adev); + ret = smu_hw_init(ip_block); if (ret) return ret; - ret = smu_late_init(adev); + ret = smu_late_init(ip_block); if (ret) return ret; return 0; } -static int smu_suspend(void *handle) +static int smu_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; uint64_t count; @@ -2116,10 +2130,10 @@ static int smu_suspend(void *handle) return 0; } -static int smu_resume(void *handle) +static int smu_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) @@ -2348,17 +2362,20 @@ static int smu_switch_power_profile(void *handle, return -EINVAL; if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); + smu->driver_workload_mask |= (1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, workload, 0); @@ -2878,6 +2895,10 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; case OD_FAN_MINIMUM_PWM: clk_type = SMU_OD_FAN_MINIMUM_PWM; break; + case OD_FAN_ZERO_RPM_ENABLE: + clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break; + case OD_FAN_ZERO_RPM_STOP_TEMP: + clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break; default: clk_type = SMU_CLK_COUNT; break; } @@ -3049,12 +3070,23 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (smu->user_dpm_profile.user_workload_mask & + (1 << smu->workload_priority[param[param_size]])) + return 0; + + smu->user_dpm_profile.user_workload_mask = + (1 << smu->workload_priority[param[param_size]]); + smu->workload_mask = smu->user_dpm_profile.user_workload_mask | + smu->driver_workload_mask; + ret = smu_bump_power_profile_mode(smu, param, param_size); + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index b44a185d07e8..d665c47f19b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,6 +240,7 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; + uint32_t user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,7 +558,8 @@ struct smu_context { bool disable_uclk_switch; uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t driver_workload_mask; + uint32_t workload_priority[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t power_profile_mode; uint32_t default_power_profile_mode; @@ -739,7 +741,7 @@ struct pptable_funcs { * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power * management. */ - int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst); /** * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power @@ -859,11 +861,6 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); /** - * @dump_pptable: Print the power play table to the system log. - */ - void (*dump_pptable)(struct smu_context *smu); - - /** * @get_power_limit: Get the device's power limits. */ int (*get_power_limit)(struct smu_context *smu, @@ -1260,7 +1257,8 @@ struct pptable_funcs { * @set_soft_freq_limited_range: Set the soft frequency range of a clock * domain in MHz. */ - int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, + bool automatic); /** * @set_power_source: Notify the SMU of the current power source. diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 822c6425d90e..0f96b8c59a0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xD +#define SMU_METRICS_TABLE_VERSION 0xE typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -231,6 +231,9 @@ typedef struct __attribute__((packed, aligned(4))) { // PER XCD ACTIVITY uint32_t GfxBusy[8]; uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index e71a721c12b9..a299dc4a8071 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -313,6 +313,8 @@ enum smu_clk_type { SMU_OD_ACOUSTIC_TARGET, SMU_OD_FAN_TARGET_TEMPERATURE, SMU_OD_FAN_MINIMUM_PWM, + SMU_OD_FAN_ZERO_RPM_ENABLE, + SMU_OD_FAN_ZERO_RPM_STOP_TEMP, SMU_CLK_COUNT, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index c2ab336bb530..ed8304d82831 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -255,7 +255,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h index 1ad2dff71090..0886d8cffbd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h @@ -56,7 +56,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu); int smu_v12_0_mode2_reset(struct smu_context *smu); int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v12_0_set_driver_table_location(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index e58220a7ee2f..ae3563d71fa0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -219,7 +219,7 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -255,7 +255,8 @@ int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v13_0_set_jpeg_enable(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 727d5b405435..0546b02e198d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -186,7 +186,7 @@ int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -210,7 +210,8 @@ int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v14_0_set_jpeg_enable(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index c0f6b59369b7..4b36c230e43a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return -EINVAL; } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { if (size != 10) @@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } @@ -1559,437 +1558,6 @@ static int arcturus_set_performance_level(struct smu_context *smu, return smu_v11_0_set_performance_level(smu, level); } -static void arcturus_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]); - } - - dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit); - dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit); - dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit); - dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit); - dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit); - - dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding); - - dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0]); - dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1]); - dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0]); - dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1]); - dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2]); - dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3]); - dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456); - - dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm); - dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge); - dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot); - dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem); - dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm); - - dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv); - - dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1); - dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2); - - dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow); - dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping); - dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping); - - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent); - dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset); - dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem); - - dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent); - dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset); - dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput); - - dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .Speed = %d\n", - pptable->I2cControllers[i].Speed); - } - - dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth); - - dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - -} - static bool arcturus_is_dpm_running(struct smu_context *smu) { int ret = 0; @@ -2002,7 +1570,9 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -2365,8 +1935,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, .set_performance_level = arcturus_set_performance_level, - /* debug (internal used) */ - .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, .dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16af1a329621..211635dabed8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1135,7 +1135,9 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) return 0; } -static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int navi10_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1689,7 +1691,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return 0; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return 0; break; @@ -2081,10 +2083,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 9c3c48297cba..d0ed0d060a8a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1152,7 +1152,9 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) return 0; } -static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; @@ -1469,7 +1471,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, if (ret) goto forec_level_out; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto forec_level_out; break; @@ -1786,10 +1788,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } @@ -2493,1274 +2498,6 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) return val != 0x0; } -static void beige_goby_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_beige_goby_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - -static void sienna_cichlid_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(11, 0, 13)) { - beige_goby_dump_pptable(smu); - return; - } - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - dev_info(smu->adev->dev, "PaddingLIVmin = 0x%x\n", pptable->PaddingLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - dev_info(smu->adev->dev, "paddingRlcUlvParams[0] = 0x%x\n", pptable->paddingRlcUlvParams[0]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[1] = 0x%x\n", pptable->paddingRlcUlvParams[1]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[2] = 0x%x\n", pptable->paddingRlcUlvParams[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { @@ -4397,7 +3134,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_disable_memory_clock_switch = sienna_cichlid_display_disable_memory_clock_switch, .get_power_limit = sienna_cichlid_get_power_limit, .update_pcie_parameters = sienna_cichlid_update_pcie_parameters, - .dump_pptable = sienna_cichlid_dump_pptable, .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, .fini_microcode = smu_v11_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 16fcd9dcd202..480cf3cb204d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1616,7 +1616,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) break; default: if (!ras || !adev->ras_enabled || - adev->gmc.xgmi.pending_reset) { + (adev->init_lvl->level == + AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); @@ -1763,7 +1764,8 @@ failed: int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1778,7 +1780,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1786,7 +1791,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1854,6 +1862,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, uint32_t mclk_min = 0, mclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; int ret = 0; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1873,6 +1882,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, mclk_max = mem_table->max; socclk_min = soc_table->min; socclk_max = soc_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1905,13 +1915,15 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { mclk_min = mclk_max = 0; socclk_min = socclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; } @@ -1920,7 +1932,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; } @@ -1929,7 +1942,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 1fe020f1f4db..f89c487dce72 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -461,7 +461,9 @@ static int vangogh_init_smc_tables(struct smu_context *smu) return smu_v11_0_init_smc_tables(smu); } -static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1079,7 +1081,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", @@ -1087,15 +1089,16 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { int ret = 0; @@ -1301,7 +1304,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1337,7 +1340,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; @@ -1356,7 +1359,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false); if (ret) return ret; @@ -1364,7 +1367,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false); if (ret) return ret; @@ -1372,7 +1375,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false); if (ret) return ret; @@ -1380,7 +1383,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index cc0504b063fa..75a9ea87f419 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -645,7 +645,9 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context return pm_type; } -static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int renoir_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -707,7 +709,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -740,7 +742,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -890,14 +892,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } @@ -911,7 +913,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -919,7 +921,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -961,13 +963,13 @@ static int renior_set_dpm_profile_freq(struct smu_context *smu, } if (sclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false); if (socclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false); if (fclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index ed15f5a0fd11..3d3cd546f0ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -211,7 +211,7 @@ int smu_v12_0_mode2_reset(struct smu_context *smu) } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool automatic) { int ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 2c35eb31475a..f6b029354327 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1297,9 +1297,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu, } static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1328,7 +1329,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return 0; ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min, max); + min, max, false); if (!ret) { pstate_table->gfxclk_pstate.curr.min = min; pstate_table->gfxclk_pstate.curr.max = max; @@ -1348,7 +1349,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, /* Restore default min/max clocks and enable determinism */ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); if (!ret) { usleep_range(500, 1000); ret = smu_cmn_send_smc_msg_with_param(smu, @@ -1422,7 +1423,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1441,7 +1442,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index e17466cc1952..2bfea740dace 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1608,7 +1608,8 @@ failed: int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1623,7 +1624,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1631,7 +1635,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1708,6 +1715,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1739,6 +1747,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1780,13 +1789,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, vclk_min = vclk_max = 0; dclk_min = dclk_max = 0; fclk_min = fclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1798,7 +1809,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1810,7 +1822,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1825,7 +1838,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1840,7 +1854,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1852,7 +1867,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -2088,7 +2104,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) } int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index d53e162dcd8d..80c6b1e523aa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -107,6 +107,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -736,19 +738,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_0_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static int smu_v13_0_0_system_features_control(struct smu_context *smu, bool en) { @@ -1143,6 +1132,14 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1463,6 +1460,42 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1560,6 +1593,16 @@ static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1853,6 +1896,48 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_0_od_restore_table_single(smu, input[0]); @@ -1975,7 +2060,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2122,7 +2208,11 @@ static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) @@ -2188,6 +2278,10 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_0_set_supported_od_feature_mask(smu); @@ -2485,7 +2579,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; - u32 workload_mask, selected_workload_mask; + u32 workload_mask; smu->power_profile_mode = input[size]; @@ -2552,7 +2646,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - selected_workload_mask = workload_mask = 1 << workload_type; + workload_mask = 1 << workload_type; /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && @@ -2567,12 +2661,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask |= 1 << workload_type; } + smu->workload_mask |= workload_mask; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - workload_mask, + smu->workload_mask, NULL); - if (!ret) - smu->workload_mask = selected_workload_mask; + if (!ret) { + smu_cmn_assign_power_profile(smu); + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_FULLSCREEN3D); + smu->power_profile_mode = smu->workload_mask & (1 << workload_type) + ? PP_SMC_POWER_PROFILE_FULLSCREEN3D + : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + } + } return ret; } @@ -3024,7 +3128,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .i2c_init = smu_v13_0_0_i2c_control_init, .i2c_fini = smu_v13_0_0_i2c_control_fini, .is_dpm_running = smu_v13_0_0_is_dpm_running, - .dump_pptable = smu_v13_0_0_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 9c2c43bfed0b..f5db181ef489 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -193,7 +193,9 @@ static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en) return ret; } -static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -811,9 +813,10 @@ failed: } static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -950,7 +953,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1046,9 +1049,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = smu_v13_0_5_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1060,7 +1064,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1069,7 +1074,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 55ed6247eb61..fa30a9e1f27a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -102,6 +102,24 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define MCA_BANK_IPID(_ip, _hwid, _type) \ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } +static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu) +{ + return (smu->adev->flags & AMD_IS_APU) && + smu->smc_fw_version <= 0x4556900; +} + +static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu) +{ + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + return smu->smc_fw_version >= 0x557600; + case IP_VERSION(13, 0, 14): + return smu->smc_fw_version >= 0x05550E00; + default: + return false; + } +} + struct mca_bank_ipid { enum amdgpu_mca_ip ip; uint16_t hwid; @@ -253,7 +271,7 @@ struct PPTable_t { #define SMUQ10_TO_UINT(x) ((x) >> 10) #define SMUQ10_FRAC(x) ((x) & 0x3ff) #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) -#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ +#define GET_METRIC_FIELD(field, flag) ((flag) ?\ (metrics_a->field) : (metrics_x->field)) struct smu_v13_0_6_dpm_map { @@ -352,7 +370,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_6); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -583,7 +601,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - struct amdgpu_device *adev = smu->adev; + bool flag = smu_v13_0_6_is_unified_metrics(smu); int ret, i, retry = 100; uint32_t table_version; @@ -595,7 +613,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (GET_METRIC_FIELD(AccumulationCounter)) + if (GET_METRIC_FIELD(AccumulationCounter, flag)) break; usleep_range(1000, 1100); @@ -612,29 +630,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) table_version; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - GET_METRIC_FIELD(SocclkFrequencyTable)[i]); + GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; + pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0]; pptable->Init = true; } @@ -957,6 +975,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + bool flag = smu_v13_0_6_is_unified_metrics(smu); struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -971,50 +990,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_GFXCLK: if (smu->smc_fw_version >= 0x552F00) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -1739,7 +1758,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { /* Min UCLK is not expected to be changed */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, uclk_table->max); + smu, SMU_UCLK, 0, uclk_table->max, false); if (ret) return ret; pstate_table->uclk_pstate.curr.max = uclk_table->max; @@ -1758,7 +1777,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1806,7 +1826,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, max); + smu, SMU_UCLK, 0, max, false); if (!ret) pstate_table->uclk_pstate.curr.max = max; } @@ -1946,7 +1966,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = dpm_context->dpm_tables.gfx_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1954,7 +1974,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.uclk_table.min; max_clk = dpm_context->dpm_tables.uclk_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); if (ret) return ret; pstate_table->uclk_pstate.custom.max = 0; @@ -1978,7 +1998,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = pstate_table->gfxclk_pstate.custom.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1989,7 +2009,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->uclk_pstate.curr.min; max_clk = pstate_table->uclk_pstate.custom.max; return smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); } break; default: @@ -2299,14 +2319,18 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { + bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst; struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_5 *gpu_metrics = - (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_6 *gpu_metrics = + (struct gpu_metrics_v1_6 *)smu_table->gpu_metrics_table; + bool flag = smu_v13_0_6_is_unified_metrics(smu); + int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - int ret = 0, xcc_id, inst, i, j; MetricsTableX_t *metrics_x; MetricsTableA_t *metrics_a; + struct amdgpu_xcp *xcp; u16 link_width_level; + u32 inst_mask; metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); @@ -2317,53 +2341,60 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_a = (MetricsTableA_t *)metrics_x; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 6); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); + + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag); - /* Throttle status is not reported through metrics now */ - gpu_metrics->throttle_status = 0; + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag); + gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag); + gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag); + gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag); + gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag); /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { /*Check smu version, PCIE link speed and width will be reported from pmfw metric @@ -2399,41 +2430,77 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_x->PCIeNAKSentCountAcc; gpu_metrics->pcie_nak_rcvd_count_acc = metrics_x->PCIeNAKReceivedCountAcc; + if (smu_v13_0_6_is_other_end_count_available(smu)) + gpu_metrics->pcie_lc_perf_other_end_recovery = + metrics_x->PCIeOtherEndRecoveryAcc; + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag)); for (i = 0; i < NUM_XGMI_LINKS; i++) { gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]); gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); - } + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]); + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00); + smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) && + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 6)) && + (smu->smc_fw_version >= 0x556F00); + smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) && + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 14)) && + (smu->smc_fw_version >= 0x05550B00); + + per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst; + + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * adev->jpeg.num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag) + [(inst * adev->jpeg.num_jpeg_rings) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]); + idx++; - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - inst = GET_INST(JPEG, i); - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { - gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) - [(inst * adev->jpeg.num_jpeg_rings) + j]); } - } - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - inst = GET_INST(VCN, i); - gpu_metrics->vcn_activity[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(metrics_x->GfxBusy[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]); + idx++; + } + } } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); + gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag)); + gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag)); - gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag); *table = (void *)gpu_metrics; kfree(metrics_x); @@ -2974,6 +3041,16 @@ static int mmhub_err_codes[] = { CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, }; +static int vcn_err_codes[] = { + CODE_VIDD, CODE_VIDV, +}; +static int jpeg_err_codes[] = { + CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D, + CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D, + CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D, + CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D, +}; + static const struct mca_ras_info mca_ras_table[] = { { .blkid = AMDGPU_RAS_BLOCK__UMC, @@ -3002,6 +3079,20 @@ static const struct mca_ras_info mca_ras_table[] = { .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, .ip = AMDGPU_MCA_IP_PCS_XGMI, .get_err_count = mca_pcs_xgmi_mca_get_err_count, + }, { + .blkid = AMDGPU_RAS_BLOCK__VCN, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = vcn_err_codes, + .err_code_count = ARRAY_SIZE(vcn_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, + }, { + .blkid = AMDGPU_RAS_BLOCK__JPEG, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = jpeg_err_codes, + .err_code_count = ARRAY_SIZE(jpeg_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, }, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b891a5e0a396..c5d3e25cc967 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -83,6 +83,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -734,19 +736,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_7_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -1132,6 +1121,14 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1452,6 +1449,42 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1548,6 +1581,16 @@ static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1841,6 +1884,48 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_7_od_restore_table_single(smu, input[0]); @@ -1964,7 +2049,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2106,7 +2192,11 @@ static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) @@ -2172,6 +2262,10 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_7_set_supported_od_feature_mask(smu); @@ -2499,13 +2593,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); else - smu->workload_mask = (1 << workload_type); + smu_cmn_assign_power_profile(smu); return ret; } @@ -2605,7 +2700,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, .is_dpm_running = smu_v13_0_7_is_dpm_running, - .dump_pptable = smu_v13_0_7_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 260c339f89c5..73b4506ef5a8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -220,7 +220,9 @@ static int yellow_carp_system_features_control(struct smu_context *smu, bool en) return ret; } -static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -945,9 +947,10 @@ failed: } static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -1134,7 +1137,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1254,9 +1257,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1266,18 +1270,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (fclk_min && fclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_FCLK, - fclk_min, - fclk_max); + SMU_FCLK, + fclk_min, + fclk_max, + false); if (ret) return ret; } if (socclk_min && socclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SOCCLK, - socclk_min, - socclk_max); + SMU_SOCCLK, + socclk_min, + socclk_max, + false); if (ret) return ret; } @@ -1286,7 +1292,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1295,7 +1302,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 865e916fc425..ecb0164d533e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1102,7 +1102,8 @@ failed: int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1117,7 +1118,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1125,7 +1129,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1202,6 +1209,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1233,6 +1241,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1268,7 +1277,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1280,7 +1290,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1292,7 +1303,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1307,7 +1319,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1322,7 +1335,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1334,7 +1348,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -1492,7 +1507,8 @@ int smu_v14_0_set_single_dpm_table(struct smu_context *smu, } int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 1e16a281f2dc..59b369eff30f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -670,19 +670,6 @@ static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v14_0_2_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); -} - static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -1457,7 +1444,8 @@ static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -1807,12 +1795,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + smu->workload_mask, NULL); + if (!ret) - smu->workload_mask = 1 << workload_type; + smu_cmn_assign_power_profile(smu); return ret; } @@ -2726,7 +2713,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .i2c_init = smu_v14_0_2_i2c_control_init, .i2c_fini = smu_v14_0_2_i2c_control_fini, .is_dpm_running = smu_v14_0_2_is_dpm_running, - .dump_pptable = smu_v14_0_2_dump_pptable, .init_microcode = smu_v14_0_init_microcode, .load_microcode = smu_v14_0_load_microcode, .fini_microcode = smu_v14_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 91ad434bcdae..f1ab1a6bb467 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1078,6 +1078,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(1, 5): structure_size = sizeof(struct gpu_metrics_v1_5); break; + case METRICS_VERSION(1, 6): + structure_size = sizeof(struct gpu_metrics_v1_6); + break; case METRICS_VERSION(2, 0): structure_size = sizeof(struct gpu_metrics_v2_0); break; @@ -1138,6 +1141,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } +void smu_cmn_assign_power_profile(struct smu_context *smu) +{ + uint32_t index; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; +} + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) { struct pci_dev *p = NULL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 1de685defe85..8a801e389659 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +void smu_cmn_assign_power_profile(struct smu_context *smu); + /* * Helper function to make sysfs_emit_at() happy. Align buf to * the current page boundary and record the offset. diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 6f4d212607d7..c09ecf1a68a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -78,7 +78,6 @@ #define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu) #define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max) #define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level) -#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu) #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap) #define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src) #define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu) diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index ddf20708370f..c901ac00c0c3 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -6,6 +6,7 @@ config DRM_HDLCD tristate "ARM HDLCD" depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER help @@ -27,6 +28,7 @@ config DRM_MALI_DISPLAY tristate "ARM Mali Display Processor" depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig index 4acc4285a4eb..415c10a6374b 100644 --- a/drivers/gpu/drm/arm/display/Kconfig +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -3,6 +3,7 @@ config DRM_KOMEDA tristate "ARM Komeda display driver" depends on DRM && OF depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 55c3773befde..6d475bb34002 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -9,7 +9,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_fbdev_dma.h> +#include <drm/drm_client_setup.h> #include <drm/drm_module.h> #include <drm/drm_of.h> #include "komeda_dev.h" @@ -84,7 +84,7 @@ static int komeda_platform_probe(struct platform_device *pdev) } dev_set_drvdata(dev, mdrv); - drm_fbdev_dma_setup(&mdrv->kms->base, 32); + drm_client_setup(&mdrv->kms->base, NULL); return 0; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index e5eb5d672bcd..1e7b1fcb2848 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> @@ -58,6 +59,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) static const struct drm_driver komeda_kms_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &komeda_cma_fops, .name = "komeda", .desc = "Arm Komeda Display Processor driver", diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 32be9e370049..cd4389809d42 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -9,6 +9,7 @@ * ARM HDLCD Driver */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/clk.h> @@ -21,8 +22,8 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> @@ -228,6 +229,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver hdlcd_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "hdlcd", .desc = "ARM HDLCD Controller DRM", @@ -285,7 +287,7 @@ static int hdlcd_drm_bind(struct device *dev) */ if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) { hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_aperture_remove_framebuffers(&hdlcd_driver); + aperture_remove_all_conflicting_devices(hdlcd_driver.name); } drm_mode_config_reset(drm); @@ -299,7 +301,7 @@ static int hdlcd_drm_bind(struct device *dev) if (ret) goto err_register; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 6682131d2910..4cb25004b84f 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -562,6 +563,7 @@ static void malidp_debugfs_init(struct drm_minor *minor) static const struct drm_driver malidp_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, #ifdef CONFIG_DEBUG_FS .debugfs_init = malidp_debugfs_init, #endif @@ -852,7 +854,7 @@ static int malidp_bind(struct device *dev) if (ret) goto register_fail; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig index e5597d7c9ae1..b22c891a670b 100644 --- a/drivers/gpu/drm/armada/Kconfig +++ b/drivers/gpu/drm/armada/Kconfig @@ -2,6 +2,7 @@ config DRM_ARMADA tristate "DRM support for Marvell Armada SoCs" depends on DRM && HAVE_CLK && ARM && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION help diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index c303e8c7ff6c..3c0ff221a43b 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -16,6 +16,8 @@ struct armada_crtc; struct armada_gem_object; struct clk; struct drm_display_mode; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; static inline void armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr) @@ -74,10 +76,13 @@ struct armada_private { #define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm) #if defined(CONFIG_DRM_FBDEV_EMULATION) -void armada_fbdev_setup(struct drm_device *dev); +int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes); +#define ARMADA_FBDEV_DRIVER_OPS \ + .fbdev_probe = armada_fbdev_driver_fbdev_probe #else -static inline void armada_fbdev_setup(struct drm_device *dev) -{ } +#define ARMADA_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif int armada_overlay_plane_create(struct drm_device *, unsigned long); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index e51ecc4f7ef4..5c26f0409478 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -3,6 +3,7 @@ * Copyright (C) 2012 Russell King */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> @@ -10,8 +11,8 @@ #include <linux/of_graph.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> @@ -39,6 +40,7 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops); static const struct drm_driver armada_drm_driver = { .gem_prime_import = armada_gem_prime_import, .dumb_create = armada_gem_dumb_create, + ARMADA_FBDEV_DRIVER_OPS, .major = 1, .minor = 0, .name = "armada-drm", @@ -91,7 +93,7 @@ static int armada_drm_bind(struct device *dev) } /* Remove early framebuffers */ - ret = drm_aperture_remove_framebuffers(&armada_drm_driver); + ret = aperture_remove_all_conflicting_devices(armada_drm_driver.name); if (ret) { dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", __func__, ret); @@ -137,7 +139,7 @@ static int armada_drm_bind(struct device *dev) armada_drm_debugfs_init(priv->drm.primary); #endif - armada_fbdev_setup(&priv->drm); + drm_client_setup(&priv->drm, NULL); return 0; diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index d223176912b6..6ee7ce04ee71 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -39,8 +39,10 @@ static const struct fb_ops armada_fb_ops = { .fb_destroy = armada_fbdev_fb_destroy, }; -static int armada_fbdev_create(struct drm_fb_helper *fbh, - struct drm_fb_helper_surface_size *sizes) +static const struct drm_fb_helper_funcs armada_fbdev_helper_funcs; + +int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fbh->dev; struct drm_mode_fb_cmd2 mode; @@ -98,6 +100,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh, info->fix.smem_len = obj->obj.size; info->screen_size = obj->obj.size; info->screen_base = ptr; + fbh->funcs = &armada_fbdev_helper_funcs; fbh->fb = &dfb->fb; drm_fb_helper_fill_info(info, fbh, sizes); @@ -112,109 +115,3 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh, dfb->fb.funcs->destroy(&dfb->fb); return ret; } - -static int armada_fb_probe(struct drm_fb_helper *fbh, - struct drm_fb_helper_surface_size *sizes) -{ - int ret = 0; - - if (!fbh->fb) { - ret = armada_fbdev_create(fbh, sizes); - if (ret == 0) - ret = 1; - } - return ret; -} - -static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { - .fb_probe = armada_fb_probe, -}; - -/* - * Fbdev client and struct drm_client_funcs - */ - -static void armada_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fbh = drm_fb_helper_from_client(client); - - if (fbh->info) { - drm_fb_helper_unregister_info(fbh); - } else { - drm_client_release(&fbh->client); - drm_fb_helper_unprepare(fbh); - kfree(fbh); - } -} - -static int armada_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int armada_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fbh = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fbh); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fbh); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fbh); -err_drm_err: - drm_err(dev, "armada: Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs armada_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = armada_fbdev_client_unregister, - .restore = armada_fbdev_client_restore, - .hotplug = armada_fbdev_client_hotplug, -}; - -void armada_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *fbh; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fbh = kzalloc(sizeof(*fbh), GFP_KERNEL); - if (!fbh) - return; - drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs); - - ret = drm_client_init(dev, &fbh->client, "fbdev", &armada_fbdev_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fbh->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fbh); - kfree(fbh); - return; -} diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig index 8137c39b057b..6e68f20aac21 100644 --- a/drivers/gpu/drm/aspeed/Kconfig +++ b/drivers/gpu/drm/aspeed/Kconfig @@ -4,6 +4,7 @@ config DRM_ASPEED_GFX depends on DRM && OF depends on (COMPILE_TEST || ARCH_ASPEED) depends on MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DMA_CMA if HAVE_DMA_CONTIGUOUS diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index a7a6b70220eb..109023815fa2 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -14,6 +14,7 @@ #include <linux/reset.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_device.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -247,6 +248,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "aspeed-gfx-drm", .desc = "ASPEED GFX DRM", @@ -339,7 +341,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(&priv->drm, 32); + drm_client_setup(&priv->drm, NULL); return 0; err_unload: diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index 563fa7a3b546..da0663542e8a 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig @@ -2,6 +2,7 @@ config DRM_AST tristate "AST server chips" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER select I2C diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 00b364f9a71e..0e282b7b167c 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -149,28 +149,22 @@ int ast_dp_launch(struct ast_device *ast) return 0; } -static bool ast_dp_power_is_on(struct ast_device *ast) +static bool ast_dp_get_phy_sleep(struct ast_device *ast) { - u8 vgacre3; + u8 vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3); - vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3); - - return !(vgacre3 & AST_DP_PHY_SLEEP); + return (vgacre3 & AST_IO_VGACRE3_DP_PHY_SLEEP); } -static void ast_dp_power_on_off(struct drm_device *dev, bool on) +static void ast_dp_set_phy_sleep(struct ast_device *ast, bool sleep) { - struct ast_device *ast = to_ast_device(dev); - // Read and Turn off DP PHY sleep - u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, AST_DP_VIDEO_ENABLE); - - // Turn on DP PHY sleep - if (!on) - bE3 |= AST_DP_PHY_SLEEP; + u8 vgacre3 = 0x00; - // DP Power on/off - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3); + if (sleep) + vgacre3 |= AST_IO_VGACRE3_DP_PHY_SLEEP; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe3, (u8)~AST_IO_VGACRE3_DP_PHY_SLEEP, + vgacre3); msleep(50); } @@ -192,23 +186,39 @@ static void ast_dp_link_training(struct ast_device *ast) drm_err(dev, "Link training failed\n"); } -static void ast_dp_set_on_off(struct drm_device *dev, bool on) +static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled) { - struct ast_device *ast = to_ast_device(dev); - u8 video_on_off = on; - u32 i = 0; - - // Video On/Off - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on); - - video_on_off <<= 4; - while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, - ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) { - // wait 1 ms - mdelay(1); - if (++i > 200) - break; + u8 vgacrdf_test = 0x00; + u8 vgacrdf; + unsigned int i; + + if (enabled) + vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE; + + for (i = 0; i < 200; ++i) { + if (i) + mdelay(1); + vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf, + AST_IO_VGACRDF_DP_VIDEO_ENABLE); + if (vgacrdf == vgacrdf_test) + return true; } + + return false; +} + +static void ast_dp_set_enable(struct ast_device *ast, bool enabled) +{ + struct drm_device *dev = &ast->base; + u8 vgacre3 = 0x00; + + if (enabled) + vgacre3 |= AST_IO_VGACRE3_DP_VIDEO_ENABLE; + + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe3, (u8)~AST_IO_VGACRE3_DP_VIDEO_ENABLE, + vgacre3); + + drm_WARN_ON(dev, !__ast_dp_wait_enable(ast, enabled)); } static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode) @@ -317,26 +327,25 @@ static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; - struct ast_device *ast = to_ast_device(dev); + struct ast_device *ast = to_ast_device(encoder->dev); struct ast_connector *ast_connector = &ast->output.astdp.connector; if (ast_connector->physical_status == connector_status_connected) { - ast_dp_power_on_off(dev, AST_DP_POWER_ON); + ast_dp_set_phy_sleep(ast, false); ast_dp_link_training(ast); ast_wait_for_vretrace(ast); - ast_dp_set_on_off(dev, 1); + ast_dp_set_enable(ast, true); } } static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(encoder->dev); - ast_dp_set_on_off(dev, 0); - ast_dp_power_on_off(dev, AST_DP_POWER_OFF); + ast_dp_set_enable(ast, false); + ast_dp_set_phy_sleep(ast, true); } static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = { @@ -383,22 +392,21 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector bool force) { struct ast_connector *ast_connector = to_ast_connector(connector); - struct drm_device *dev = connector->dev; struct ast_device *ast = to_ast_device(connector->dev); enum drm_connector_status status = connector_status_disconnected; - bool power_is_on; + bool phy_sleep; mutex_lock(&ast->modeset_lock); - power_is_on = ast_dp_power_is_on(ast); - if (!power_is_on) - ast_dp_power_on_off(dev, true); + phy_sleep = ast_dp_get_phy_sleep(ast); + if (phy_sleep) + ast_dp_set_phy_sleep(ast, false); if (ast_astdp_is_connected(ast)) status = connector_status_connected; - if (!power_is_on && status == connector_status_disconnected) - ast_dp_power_on_off(dev, false); + if (phy_sleep && status == connector_status_disconnected) + ast_dp_set_phy_sleep(ast, true); mutex_unlock(&ast->modeset_lock); @@ -414,6 +422,10 @@ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs .detect_ctx = ast_astdp_connector_helper_detect_ctx, }; +/* + * Output + */ + static const struct drm_connector_funcs ast_astdp_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, @@ -422,34 +434,18 @@ static const struct drm_connector_funcs ast_astdp_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - int ret; - - ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} - int ast_astdp_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.astdp.encoder; - struct ast_connector *ast_connector = &ast->output.astdp.connector; - struct drm_connector *connector = &ast_connector->base; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* encoder */ + + encoder = &ast->output.astdp.encoder; ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) @@ -458,9 +454,20 @@ int ast_astdp_output_init(struct ast_device *ast) encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = ast_astdp_connector_init(dev, connector); + /* connector */ + + ast_connector = &ast->output.astdp.connector; + connector = &ast_connector->base; + ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); if (ret) return ret; + drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index e4c636f45082..9e19d8c17730 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -21,9 +21,9 @@ static void ast_release_firmware(void *data) ast->dp501_fw = NULL; } -static int ast_load_dp501_microcode(struct drm_device *dev) +static int ast_load_dp501_microcode(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; int ret; ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev); @@ -109,10 +109,10 @@ static bool wait_fw_ready(struct ast_device *ast) } #endif -static bool ast_write_cmd(struct drm_device *dev, u8 data) +static bool ast_write_cmd(struct ast_device *ast, u8 data) { - struct ast_device *ast = to_ast_device(dev); int retry = 0; + if (wait_nack(ast)) { send_nack(ast); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data); @@ -131,10 +131,8 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data) return false; } -static bool ast_write_data(struct drm_device *dev, u8 data) +static bool ast_write_data(struct ast_device *ast, u8 data) { - struct ast_device *ast = to_ast_device(dev); - if (wait_nack(ast)) { send_nack(ast); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data); @@ -175,10 +173,10 @@ static void clear_cmd(struct ast_device *ast) } #endif -static void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) +static void ast_set_dp501_video_output(struct ast_device *ast, u8 mode) { - ast_write_cmd(dev, 0x40); - ast_write_data(dev, mode); + ast_write_cmd(ast, 0x40); + ast_write_data(ast, mode); msleep(10); } @@ -188,9 +186,8 @@ static u32 get_fw_base(struct ast_device *ast) return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff; } -bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) +bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size) { - struct ast_device *ast = to_ast_device(dev); u32 i, data; u32 boot_address; @@ -207,9 +204,8 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) return false; } -static bool ast_launch_m68k(struct drm_device *dev) +static bool ast_launch_m68k(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u32 i, data, len = 0; u32 boot_address; u8 *fw_addr = NULL; @@ -226,7 +222,7 @@ static bool ast_launch_m68k(struct drm_device *dev) len = 32*1024; } else { if (!ast->dp501_fw && - ast_load_dp501_microcode(dev) < 0) + ast_load_dp501_microcode(ast) < 0) return false; fw_addr = (u8 *)ast->dp501_fw->data; @@ -348,9 +344,8 @@ static int ast_dp512_read_edid_block(void *data, u8 *buf, unsigned int block, si return true; } -static bool ast_init_dvo(struct drm_device *dev) +static bool ast_init_dvo(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u8 jreg; u32 data; ast_write32(ast, 0xf004, 0x1e6e0000); @@ -421,9 +416,8 @@ static bool ast_init_dvo(struct drm_device *dev) } -static void ast_init_analog(struct drm_device *dev) +static void ast_init_analog(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u32 data; /* @@ -448,28 +442,28 @@ static void ast_init_analog(struct drm_device *dev) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x00); } -void ast_init_3rdtx(struct drm_device *dev) +void ast_init_3rdtx(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); - u8 jreg; + u8 vgacrd1; if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) { - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); - switch (jreg & 0x0e) { - case 0x04: - ast_init_dvo(dev); + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); + switch (vgacrd1) { + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + ast_init_dvo(ast); break; - case 0x08: - ast_launch_m68k(dev); + case AST_IO_VGACRD1_TX_DP501_VBIOS: + ast_launch_m68k(ast); break; - case 0x0c: - ast_init_dvo(dev); + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + ast_init_dvo(ast); break; default: - if (ast->tx_chip_types & BIT(AST_TX_SIL164)) - ast_init_dvo(dev); + if (ast->tx_chip == AST_TX_SIL164) + ast_init_dvo(ast); else - ast_init_analog(dev); + ast_init_analog(ast); } } } @@ -485,17 +479,17 @@ static const struct drm_encoder_funcs ast_dp501_encoder_funcs = { static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(encoder->dev); - ast_set_dp501_video_output(dev, 1); + ast_set_dp501_video_output(ast, 1); } static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(encoder->dev); - ast_set_dp501_video_output(dev, 0); + ast_set_dp501_video_output(ast, 0); } static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = { @@ -567,34 +561,22 @@ static const struct drm_connector_funcs ast_dp501_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - int ret; - - ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} +/* + * Output + */ int ast_dp501_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.dp501.encoder; - struct ast_connector *ast_connector = &ast->output.dp501.connector; - struct drm_connector *connector = &ast_connector->base; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* encoder */ + + encoder = &ast->output.dp501.encoder; ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) @@ -603,9 +585,20 @@ int ast_dp501_output_init(struct ast_device *ast) encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = ast_dp501_connector_init(dev, connector); + /* connector */ + + ast_connector = &ast->output.dp501.connector; + connector = &ast_connector->base; + ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); if (ret) return ret; + drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 3a908bb015fe..4afe4be072ef 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -26,12 +26,13 @@ * Authors: Dave Airlie <airlied@redhat.com> */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_gem_shmem_helper.h> @@ -64,7 +65,8 @@ static const struct drm_driver ast_driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - DRM_GEM_SHMEM_DRIVER_OPS + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; /* @@ -279,7 +281,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *drm; bool need_post = false; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, ast_driver.name); if (ret) return ret; @@ -360,7 +362,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - drm_fbdev_shmem_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; } @@ -396,7 +398,7 @@ static int ast_drm_thaw(struct drm_device *dev) ast_enable_vga(ast->ioregs); ast_open_key(ast->ioregs); ast_enable_mmio(dev->dev, ast->ioregs); - ast_post_gpu(dev); + ast_post_gpu(ast); return drm_mode_config_helper_resume(dev); } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 91fe07cf7b07..21ce3769bf0d 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -91,11 +91,6 @@ enum ast_tx_chip { AST_TX_ASTDP, }; -#define AST_TX_NONE_BIT BIT(AST_TX_NONE) -#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) -#define AST_TX_DP501_BIT BIT(AST_TX_DP501) -#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) - enum ast_config_mode { ast_use_p2a, ast_use_dt, @@ -187,10 +182,12 @@ struct ast_device { struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ + enum ast_tx_chip tx_chip; + struct ast_plane primary_plane; struct ast_plane cursor_plane; struct drm_crtc crtc; - struct { + union { struct { struct drm_encoder encoder; struct ast_connector connector; @@ -211,7 +208,6 @@ struct ast_device { bool support_wide_screen; - unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; @@ -407,9 +403,6 @@ int ast_mode_config_init(struct ast_device *ast); #define AST_DP501_LINKRATE 0xf014 #define AST_DP501_EDID_DATA 0xf020 -#define AST_DP_POWER_ON true -#define AST_DP_POWER_OFF false - /* * ASTDP resoultion table: * EX: ASTDP_A_B_C: @@ -453,7 +446,7 @@ int ast_mode_config_init(struct ast_device *ast); int ast_mm_init(struct ast_device *ast); /* ast post */ -void ast_post_gpu(struct drm_device *dev); +void ast_post_gpu(struct ast_device *ast); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); void ast_patch_ahb_2500(void __iomem *regs); @@ -462,8 +455,8 @@ int ast_vga_output_init(struct ast_device *ast); int ast_sil164_output_init(struct ast_device *ast); /* ast dp501 */ -bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); -void ast_init_3rdtx(struct drm_device *dev); +bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size); +void ast_init_3rdtx(struct ast_device *ast); int ast_dp501_output_init(struct ast_device *ast); /* aspeed DP */ diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d836f2a4f9f3..bc37c65305d4 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -68,11 +68,33 @@ static void ast_detect_widescreen(struct ast_device *ast) static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) { + static const char * const info_str[] = { + "analog VGA", + "Sil164 TMDS transmitter", + "DP501 DisplayPort transmitter", + "ASPEED DisplayPort transmitter", + }; + struct drm_device *dev = &ast->base; - u8 jreg; + u8 jreg, vgacrd1; + + /* + * Several of the listed TX chips are not explicitly supported + * by the ast driver. If these exist in real-world devices, they + * are most likely reported as VGA or SIL164 outputs. We warn here + * to get bug reports for these devices. If none come in for some + * time, we can begin to fail device probing on these values. + */ + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK); + drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ITE66121_VBIOS, + "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_CH7003_VBIOS, + "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ANX9807_VBIOS, + "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip_types |= AST_TX_NONE_BIT; + ast->tx_chip = AST_TX_NONE; /* * VGACRA3 Enhanced Color Mode Register, check if DVO is already @@ -85,7 +107,7 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) if (!need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); if (jreg & 0x80) - ast->tx_chip_types = AST_TX_SIL164_BIT; + ast->tx_chip = AST_TX_SIL164; } if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) { @@ -94,49 +116,42 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) * the SOC scratch register #1 bits 11:8 (interestingly marked * as "reserved" in the spec) */ - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); + jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); switch (jreg) { - case 0x04: - ast->tx_chip_types = AST_TX_SIL164_BIT; + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + ast->tx_chip = AST_TX_SIL164; break; - case 0x08: + case AST_IO_VGACRD1_TX_DP501_VBIOS: ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); if (ast->dp501_fw_addr) { /* backup firmware */ - if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) { + if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) { drmm_kfree(dev, ast->dp501_fw_addr); ast->dp501_fw_addr = NULL; } } fallthrough; - case 0x0c: - ast->tx_chip_types = AST_TX_DP501_BIT; + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + ast->tx_chip = AST_TX_DP501; } } else if (IS_AST_GEN7(ast)) { - if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) == - ASTDP_DPMCU_TX) { + if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK) == + AST_IO_VGACRD1_TX_ASTDP) { int ret = ast_dp_launch(ast); if (!ret) - ast->tx_chip_types = AST_TX_ASTDP_BIT; + ast->tx_chip = AST_TX_ASTDP; } } - /* Print stuff for diagnostic purposes */ - if (ast->tx_chip_types & AST_TX_NONE_BIT) - drm_info(dev, "Using analog VGA\n"); - if (ast->tx_chip_types & AST_TX_SIL164_BIT) - drm_info(dev, "Using Sil164 TMDS transmitter\n"); - if (ast->tx_chip_types & AST_TX_DP501_BIT) - drm_info(dev, "Using DP501 DisplayPort transmitter\n"); - if (ast->tx_chip_types & AST_TX_ASTDP_BIT) - drm_info(dev, "Using ASPEED DisplayPort transmitter\n"); + drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); } -static int ast_get_dram_info(struct drm_device *dev) +static int ast_get_dram_info(struct ast_device *ast) { + struct drm_device *dev = &ast->base; struct device_node *np = dev->dev->of_node; - struct ast_device *ast = to_ast_device(dev); uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; uint32_t denum, num, div, ref_pll, dsel; @@ -278,7 +293,7 @@ struct drm_device *ast_device_create(struct pci_dev *pdev, ast_detect_widescreen(ast); ast_detect_tx_chip(ast, need_post); - ret = ast_get_dram_info(dev); + ret = ast_get_dram_info(ast); if (ret) return ERR_PTR(ret); @@ -286,7 +301,7 @@ struct drm_device *ast_device_create(struct pci_dev *pdev, ast->mclk, ast->dram_type, ast->dram_bus_width); if (need_post) - ast_post_gpu(dev); + ast_post_gpu(ast); ret = ast_mm_init(ast); if (ret) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index ed496fb32bf3..9d5321c81e68 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1287,9 +1287,9 @@ static const struct drm_crtc_funcs ast_crtc_funcs = { .atomic_destroy_state = ast_crtc_atomic_destroy_state, }; -static int ast_crtc_init(struct drm_device *dev) +static int ast_crtc_init(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; int ret; @@ -1396,28 +1396,26 @@ int ast_mode_config_init(struct ast_device *ast) if (ret) return ret; - ast_crtc_init(dev); + ret = ast_crtc_init(ast); + if (ret) + return ret; - if (ast->tx_chip_types & AST_TX_NONE_BIT) { + switch (ast->tx_chip) { + case AST_TX_NONE: ret = ast_vga_output_init(ast); - if (ret) - return ret; - } - if (ast->tx_chip_types & AST_TX_SIL164_BIT) { + break; + case AST_TX_SIL164: ret = ast_sil164_output_init(ast); - if (ret) - return ret; - } - if (ast->tx_chip_types & AST_TX_DP501_BIT) { + break; + case AST_TX_DP501: ret = ast_dp501_output_init(ast); - if (ret) - return ret; - } - if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + break; + case AST_TX_ASTDP: ret = ast_astdp_output_init(ast); - if (ret) - return ret; + break; } + if (ret) + return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 65755798ab94..364030f97571 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -34,16 +34,14 @@ #include "ast_dram_tables.h" #include "ast_drv.h" -static void ast_post_chip_2300(struct drm_device *dev); -static void ast_post_chip_2500(struct drm_device *dev); +static void ast_post_chip_2300(struct ast_device *ast); +static void ast_post_chip_2500(struct ast_device *ast); static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; -static void -ast_set_def_ext_reg(struct drm_device *dev) +static void ast_set_def_ext_reg(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u8 i, index, reg; const u8 *ext_reg_info; @@ -252,9 +250,8 @@ cbr_start: -static void ast_init_dram_reg(struct drm_device *dev) +static void ast_init_dram_reg(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u8 j; u32 data, temp, i; const struct ast_dramstruct *dram_reg_info; @@ -343,26 +340,24 @@ static void ast_init_dram_reg(struct drm_device *dev) } while ((j & 0x40) == 0); } -void ast_post_gpu(struct drm_device *dev) +void ast_post_gpu(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); - - ast_set_def_ext_reg(dev); + ast_set_def_ext_reg(ast); if (IS_AST_GEN7(ast)) { - if (ast->tx_chip_types & AST_TX_ASTDP_BIT) + if (ast->tx_chip == AST_TX_ASTDP) ast_dp_launch(ast); } else if (ast->config_mode == ast_use_p2a) { if (IS_AST_GEN6(ast)) - ast_post_chip_2500(dev); + ast_post_chip_2500(ast); else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) - ast_post_chip_2300(dev); + ast_post_chip_2300(ast); else - ast_init_dram_reg(dev); + ast_init_dram_reg(ast); - ast_init_3rdtx(dev); + ast_init_3rdtx(ast); } else { - if (ast->tx_chip_types & AST_TX_SIL164_BIT) + if (ast->tx_chip == AST_TX_SIL164) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */ } } @@ -1569,9 +1564,8 @@ ddr2_init_start: } -static void ast_post_chip_2300(struct drm_device *dev) +static void ast_post_chip_2300(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); struct ast2300_dram_param param; u32 temp; u8 reg; @@ -2038,9 +2032,9 @@ void ast_patch_ahb_2500(void __iomem *regs) __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ } -void ast_post_chip_2500(struct drm_device *dev) +void ast_post_chip_2500(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; u32 temp; u8 reg; diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 040961cc1a19..2aadf07d135a 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -37,28 +37,29 @@ #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) -#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5) +#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5) +/* Display Transmitter Type */ +#define AST_IO_VGACRD1_TX_TYPE_MASK GENMASK(3, 1) +#define AST_IO_VGACRD1_NO_TX 0x00 +#define AST_IO_VGACRD1_TX_ITE66121_VBIOS 0x02 +#define AST_IO_VGACRD1_TX_SIL164_VBIOS 0x04 +#define AST_IO_VGACRD1_TX_CH7003_VBIOS 0x06 +#define AST_IO_VGACRD1_TX_DP501_VBIOS 0x08 +#define AST_IO_VGACRD1_TX_ANX9807_VBIOS 0x0a +#define AST_IO_VGACRD1_TX_FW_EMBEDDED_FW 0x0c /* special case of DP501 */ +#define AST_IO_VGACRD1_TX_ASTDP 0x0e + #define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0) #define AST_IO_VGACRDC_LINK_SUCCESS BIT(0) #define AST_IO_VGACRDF_HPD BIT(0) +#define AST_IO_VGACRDF_DP_VIDEO_ENABLE BIT(4) /* mirrors AST_IO_VGACRE3_DP_VIDEO_ENABLE */ +#define AST_IO_VGACRE3_DP_VIDEO_ENABLE BIT(0) +#define AST_IO_VGACRE3_DP_PHY_SLEEP BIT(4) #define AST_IO_VGACRE5_EDID_READ_DONE BIT(0) #define AST_IO_VGAIR1_R (0x5A) #define AST_IO_VGAIR1_VREFRESH BIT(3) -/* - * Display Transmitter Type - */ - -#define TX_TYPE_MASK GENMASK(3, 1) -#define NO_TX (0 << 1) -#define ITE66121_VBIOS_TX (1 << 1) -#define SI164_VBIOS_TX (2 << 1) -#define CH7003_VBIOS_TX (3 << 1) -#define DP501_VBIOS_TX (4 << 1) -#define ANX9807_VBIOS_TX (5 << 1) -#define TX_FW_EMBEDDED_FW_TX (6 << 1) -#define ASTDP_DPMCU_TX (7 << 1) #define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6) //#define AST_VRAM_INIT_BY_BMC BIT(7) @@ -68,18 +69,6 @@ * AST DisplayPort */ -/* Define for Soc scratched reg used on ASTDP */ -#define AST_DP_PHY_SLEEP BIT(4) -#define AST_DP_VIDEO_ENABLE BIT(0) - -/* - * CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE - * Precondition: A. ~AST_DP_PHY_SLEEP && - * B. DP_HPD && - * C. DP_LINK_SUCCESS - */ -#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4) - /* * ASTDP setmode registers: * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c index c231389936bd..be01254dd48a 100644 --- a/drivers/gpu/drm/ast/ast_sil164.c +++ b/drivers/gpu/drm/ast/ast_sil164.c @@ -73,52 +73,49 @@ static const struct drm_connector_funcs ast_sil164_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector) +/* + * Output + */ + +int ast_sil164_output_init(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; struct i2c_adapter *ddc; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* DDC */ + ddc = ast_ddc_create(ast); - if (IS_ERR(ddc)) { - ret = PTR_ERR(ddc); - drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + if (IS_ERR(ddc)) + return PTR_ERR(ddc); + + /* encoder */ + + encoder = &ast->output.sil164.encoder; + ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) return ret; - } + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* connector */ + ast_connector = &ast->output.sil164.connector; + connector = &ast_connector->base; ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, DRM_MODE_CONNECTOR_DVII, ddc); if (ret) return ret; - drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - return 0; -} - -int ast_sil164_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.sil164.encoder; - struct ast_connector *ast_connector = &ast->output.sil164.connector; - struct drm_connector *connector = &ast_connector->base; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_sil164_connector_init(dev, connector); - if (ret) - return ret; ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c index dd389a0a8f4a..abe0fff8485c 100644 --- a/drivers/gpu/drm/ast/ast_vga.c +++ b/drivers/gpu/drm/ast/ast_vga.c @@ -73,52 +73,49 @@ static const struct drm_connector_funcs ast_vga_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector) +/* + * Output + */ + +int ast_vga_output_init(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; struct i2c_adapter *ddc; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* DDC */ + ddc = ast_ddc_create(ast); - if (IS_ERR(ddc)) { - ret = PTR_ERR(ddc); - drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + if (IS_ERR(ddc)) + return PTR_ERR(ddc); + + /* encoder */ + + encoder = &ast->output.vga.encoder; + ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) return ret; - } + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* connector */ + ast_connector = &ast->output.vga.connector; + connector = &ast_connector->base; ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, ddc); if (ret) return ret; - drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - return 0; -} - -int ast_vga_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.vga.encoder; - struct ast_connector *ast_connector = &ast->output.vga.connector; - struct drm_connector *connector = &ast_connector->base; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_vga_connector_init(dev, connector); - if (ret) - return ret; ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 945f3aa7bb24..f8b9c91907d8 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -2,6 +2,7 @@ config DRM_ATMEL_HLCDC tristate "DRM Support for ATMEL HLCDC Display Controller" depends on DRM && OF && COMMON_CLK && ((MFD_ATMEL_HLCDC && ARM) || COMPILE_TEST) + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 9ce429f889ca..792dcc19e8e7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -18,8 +18,10 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -840,6 +842,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "atmel-hlcdc", .desc = "Atmel HLCD Controller DRM", @@ -865,7 +868,7 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(ddev, 24); + drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB888); return 0; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3eb955333c80..6b4664d91faa 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -90,6 +90,17 @@ config DRM_FSL_LDB help Support for i.MX8MP DPI-to-LVDS on-SoC encoder. +config DRM_ITE_IT6263 + tristate "ITE IT6263 LVDS/HDMI bridge" + depends on OF + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR + select DRM_KMS_HELPER + select REGMAP_I2C + help + ITE IT6263 LVDS to HDMI bridge chip driver. + config DRM_ITE_IT6505 tristate "ITE IT6505 DisplayPort bridge" depends on OF @@ -140,6 +151,8 @@ config DRM_LONTIUM_LT9611 select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_MIPI_DSI + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER select REGMAP_I2C help Driver for Lontium LT9611 DSI to HDMI bridge @@ -368,6 +381,13 @@ config DRM_TI_DLPC3433 It supports up to 720p resolution with 60 and 120 Hz refresh rates. +config DRM_TI_TDP158 + tristate "TI TDP158 HDMI/TMDS bridge" + depends on OF + select DRM_PANEL_BRIDGE + help + Texas Instruments TDP158 HDMI/TMDS Bridge driver + config DRM_TI_TFP410 tristate "TI TFP410 DVI/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 7df87b582dca..97304b429a53 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o +obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o @@ -32,6 +33,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ obj-$(CONFIG_DRM_TI_DLPC3433) += ti-dlpc3433.o obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o +obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index a2e9bb485c36..a2675b121fe4 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2551,6 +2551,8 @@ static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev) mutex_lock(&ctx->lock); anx7625_stop_dp_work(ctx); + if (!ctx->pdata.panel_bridge) + anx7625_remove_edid(ctx); anx7625_power_standby(ctx); mutex_unlock(&ctx->lock); diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index 295e9d031e2d..015983c015e5 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -121,6 +121,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev, data->bridge.funcs = &drm_aux_bridge_funcs; data->bridge.of_node = data->dev->of_node; + /* passthrough data, allow everything */ + data->bridge.interlace_allowed = true; + data->bridge.ycbcr_420_allowed = true; + return devm_drm_bridge_add(data->dev, &data->bridge); } diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 6886db2d9e00..48f297c78ee6 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -180,6 +180,10 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev, data->bridge.ops = DRM_BRIDGE_OP_HPD; data->bridge.type = id->driver_data; + /* passthrough data, allow everything */ + data->bridge.interlace_allowed = true; + data->bridge.ycbcr_420_allowed = true; + auxiliary_set_drvdata(auxdev, data); return devm_drm_bridge_add(data->dev, &data->bridge); diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index ab8e00baf3f1..aab9ce7be94c 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -270,6 +270,10 @@ static int display_connector_probe(struct platform_device *pdev) /* All the supported connector types support interlaced modes. */ conn->bridge.interlace_allowed = true; + if (type == DRM_MODE_CONNECTOR_HDMIA || + type == DRM_MODE_CONNECTOR_DisplayPort) + conn->bridge.ycbcr_420_allowed = true; + /* Get the optional connector label. */ of_property_read_string(pdev->dev.of_node, "label", &label); diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 8dd89efa8ea7..9a480c6abb85 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -3,6 +3,16 @@ if ARCH_MXC || COMPILE_TEST config DRM_IMX_LDB_HELPER tristate +config DRM_IMX_LEGACY_BRIDGE + tristate + depends on DRM_IMX + help + This is a DRM bridge implementation for the DRM i.MX IPUv3 driver, + that uses of_get_drm_display_mode to acquire display mode. + + Newer designs should not use this bridge and should use proper panel + driver instead. + config DRM_IMX8MP_DW_HDMI_BRIDGE tristate "Freescale i.MX8MP HDMI-TX bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile index edb0a7b71b30..dd5d48584806 100644 --- a/drivers/gpu/drm/bridge/imx/Makefile +++ b/drivers/gpu/drm/bridge/imx/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o +obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c new file mode 100644 index 000000000000..3ebf0b9866de --- /dev/null +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Freescale i.MX drm driver + * + * bridge driver for legacy DT bindings, utilizing display-timings node + */ + +#include <drm/drm_bridge.h> +#include <drm/drm_modes.h> +#include <drm/drm_probe_helper.h> +#include <drm/bridge/imx.h> + +#include <video/of_display_timing.h> +#include <video/of_videomode.h> + +struct imx_legacy_bridge { + struct drm_bridge base; + + struct drm_display_mode mode; + u32 bus_flags; +}; + +#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base) + +static int imx_legacy_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + return 0; +} + +static int imx_legacy_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct imx_legacy_bridge *imx_bridge = to_imx_legacy_bridge(bridge); + int ret; + + ret = drm_connector_helper_get_modes_fixed(connector, &imx_bridge->mode); + if (ret) + return ret; + + connector->display_info.bus_flags = imx_bridge->bus_flags; + + return 0; +} + +struct drm_bridge_funcs imx_legacy_bridge_funcs = { + .attach = imx_legacy_bridge_attach, + .get_modes = imx_legacy_bridge_get_modes, +}; + +struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, + struct device_node *np, + int type) +{ + struct imx_legacy_bridge *imx_bridge; + int ret; + + imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL); + if (!imx_bridge) + return ERR_PTR(-ENOMEM); + + ret = of_get_drm_display_mode(np, + &imx_bridge->mode, + &imx_bridge->bus_flags, + OF_USE_NATIVE_MODE); + if (ret) + return ERR_PTR(ret); + + imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER; + + imx_bridge->base.funcs = &imx_legacy_bridge_funcs; + imx_bridge->base.of_node = np; + imx_bridge->base.ops = DRM_BRIDGE_OP_MODES; + imx_bridge->base.type = type; + + ret = devm_drm_bridge_add(dev, &imx_bridge->base); + if (ret) + return ERR_PTR(ret); + + return &imx_bridge->base; +} +EXPORT_SYMBOL_GPL(devm_imx_drm_legacy_bridge); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Freescale i.MX DRM bridge driver for legacy DT bindings"); diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 13bc570c5473..8fcc6d18f4ab 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -23,6 +23,7 @@ imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data, const struct drm_display_mode *mode) { struct imx8mp_hdmi *hdmi = (struct imx8mp_hdmi *)data; + long round_rate; if (mode->clock < 13500) return MODE_CLOCK_LOW; @@ -30,8 +31,14 @@ imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data, if (mode->clock > 297000) return MODE_CLOCK_HIGH; - if (clk_round_rate(hdmi->pixclk, mode->clock * 1000) != - mode->clock * 1000) + round_rate = clk_round_rate(hdmi->pixclk, mode->clock * 1000); + /* imx8mp's pixel clock generator (fsl-samsung-hdmi) cannot generate + * all possible frequencies, so allow some tolerance to support more + * modes. + * Allow 0.5% difference allowed in various standards (VESA, CEA861) + * 0.5% = 5/1000 tolerance (mode->clock is 1/1000) + */ + if (abs(round_rate - mode->clock * 1000) > mode->clock * 5) return MODE_CLOCK_RANGE; /* We don't support double-clocked and Interlaced modes */ @@ -111,12 +118,12 @@ static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) dw_hdmi_remove(hdmi->dw_hdmi); } -static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev) +static int imx8mp_dw_hdmi_pm_suspend(struct device *dev) { return 0; } -static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev) +static int imx8mp_dw_hdmi_pm_resume(struct device *dev) { struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); @@ -126,8 +133,7 @@ static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev) } static const struct dev_pm_ops imx8mp_dw_hdmi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend, - imx8mp_dw_hdmi_pm_resume) + SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend, imx8mp_dw_hdmi_pm_resume) }; static const struct of_device_id imx8mp_dw_hdmi_of_table[] = { @@ -142,7 +148,7 @@ static struct platform_driver imx8mp_dw_hdmi_platform_driver = { .driver = { .name = "imx8mp-dw-hdmi-tx", .of_match_table = imx8mp_dw_hdmi_of_table, - .pm = &imx8mp_dw_hdmi_pm_ops, + .pm = pm_ptr(&imx8mp_dw_hdmi_pm_ops), }, }; diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c index 21471a9a28b2..c879e37f5811 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c @@ -542,12 +542,12 @@ static void imx8qm_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev) +static int imx8qm_ldb_runtime_suspend(struct device *dev) { return 0; } -static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev) +static int imx8qm_ldb_runtime_resume(struct device *dev) { struct imx8qm_ldb *imx8qm_ldb = dev_get_drvdata(dev); struct ldb *ldb = &imx8qm_ldb->base; @@ -559,8 +559,7 @@ static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qm_ldb_pm_ops = { - SET_RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend, - imx8qm_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend, imx8qm_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qm_ldb_dt_ids[] = { @@ -573,7 +572,7 @@ static struct platform_driver imx8qm_ldb_driver = { .probe = imx8qm_ldb_probe, .remove_new = imx8qm_ldb_remove, .driver = { - .pm = &imx8qm_ldb_pm_ops, + .pm = pm_ptr(&imx8qm_ldb_pm_ops), .name = DRIVER_NAME, .of_match_table = imx8qm_ldb_dt_ids, }, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 7984da9c0a35..b33011f397f0 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -678,12 +678,12 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev) +static int imx8qxp_ldb_runtime_suspend(struct device *dev) { return 0; } -static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev) +static int imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); struct ldb *ldb = &imx8qxp_ldb->base; @@ -695,8 +695,7 @@ static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { - SET_RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, - imx8qxp_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { @@ -709,7 +708,7 @@ static struct platform_driver imx8qxp_ldb_driver = { .probe = imx8qxp_ldb_probe, .remove_new = imx8qxp_ldb_remove, .driver = { - .pm = &imx8qxp_ldb_pm_ops, + .pm = pm_ptr(&imx8qxp_ldb_pm_ops), .name = DRIVER_NAME, .of_match_table = imx8qxp_ldb_dt_ids, }, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c index e6dbbdc87ce2..ce43e4069e21 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c @@ -371,7 +371,7 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev) +static int imx8qxp_pc_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx8qxp_pc *pc = platform_get_drvdata(pdev); @@ -393,7 +393,7 @@ static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev) return ret; } -static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev) +static int imx8qxp_pc_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx8qxp_pc *pc = platform_get_drvdata(pdev); @@ -415,8 +415,7 @@ static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_pc_pm_ops = { - SET_RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend, - imx8qxp_pc_runtime_resume, NULL) + RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend, imx8qxp_pc_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_pc_dt_ids[] = { @@ -430,7 +429,7 @@ static struct platform_driver imx8qxp_pc_bridge_driver = { .probe = imx8qxp_pc_bridge_probe, .remove_new = imx8qxp_pc_bridge_remove, .driver = { - .pm = &imx8qxp_pc_pm_ops, + .pm = pm_ptr(&imx8qxp_pc_pm_ops), .name = DRIVER_NAME, .of_match_table = imx8qxp_pc_dt_ids, }, diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c new file mode 100644 index 000000000000..cbabd4e20d3e --- /dev/null +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -0,0 +1,898 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024 NXP + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> + +/* ----------------------------------------------------------------------------- + * LVDS registers + */ + +/* LVDS software reset registers */ +#define LVDS_REG_05 0x05 +#define REG_SOFT_P_RST BIT(1) + +/* LVDS system configuration registers */ +/* 0x0b */ +#define LVDS_REG_0B 0x0b +#define REG_SSC_PCLK_RF BIT(0) +#define REG_LVDS_IN_SWAP BIT(1) + +/* LVDS test pattern gen control registers */ +/* 0x2c */ +#define LVDS_REG_2C 0x2c +#define REG_COL_DEP GENMASK(1, 0) +#define BIT8 FIELD_PREP(REG_COL_DEP, 1) +#define OUT_MAP BIT(4) +#define JEIDA 0 +#define REG_DESSC_ENB BIT(6) +#define DMODE BIT(7) +#define DISO BIT(7) +#define SISO 0 + +#define LVDS_REG_3C 0x3c +#define LVDS_REG_3F 0x3f +#define LVDS_REG_47 0x47 +#define LVDS_REG_48 0x48 +#define LVDS_REG_4F 0x4f +#define LVDS_REG_52 0x52 + +/* ----------------------------------------------------------------------------- + * HDMI registers are separated into three banks: + * 1) HDMI register common bank: 0x00 ~ 0x2f + */ + +/* HDMI genernal registers */ +#define HDMI_REG_SW_RST 0x04 +#define SOFTREF_RST BIT(5) +#define SOFTA_RST BIT(4) +#define SOFTV_RST BIT(3) +#define AUD_RST BIT(2) +#define HDCP_RST BIT(0) +#define HDMI_RST_ALL (SOFTREF_RST | SOFTA_RST | SOFTV_RST | \ + AUD_RST | HDCP_RST) + +#define HDMI_REG_SYS_STATUS 0x0e +#define HPDETECT BIT(6) +#define TXVIDSTABLE BIT(4) + +#define HDMI_REG_BANK_CTRL 0x0f +#define REG_BANK_SEL BIT(0) + +/* HDMI System DDC control registers */ +#define HDMI_REG_DDC_MASTER_CTRL 0x10 +#define MASTER_SEL_HOST BIT(0) + +#define HDMI_REG_DDC_HEADER 0x11 + +#define HDMI_REG_DDC_REQOFF 0x12 +#define HDMI_REG_DDC_REQCOUNT 0x13 +#define HDMI_REG_DDC_EDIDSEG 0x14 + +#define HDMI_REG_DDC_CMD 0x15 +#define DDC_CMD_EDID_READ 0x3 +#define DDC_CMD_FIFO_CLR 0x9 + +#define HDMI_REG_DDC_STATUS 0x16 +#define DDC_DONE BIT(7) +#define DDC_NOACK BIT(5) +#define DDC_WAITBUS BIT(4) +#define DDC_ARBILOSE BIT(3) +#define DDC_ERROR (DDC_NOACK | DDC_WAITBUS | DDC_ARBILOSE) + +#define HDMI_DDC_FIFO_BYTES 32 +#define HDMI_REG_DDC_READFIFO 0x17 +#define HDMI_REG_LVDS_PORT 0x1d /* LVDS input control I2C addr */ +#define HDMI_REG_LVDS_PORT_EN 0x1e +#define LVDS_INPUT_CTRL_I2C_ADDR 0x33 + +/* ----------------------------------------------------------------------------- + * 2) HDMI register bank0: 0x30 ~ 0xff + */ + +/* HDMI AFE registers */ +#define HDMI_REG_AFE_DRV_CTRL 0x61 +#define AFE_DRV_PWD BIT(5) +#define AFE_DRV_RST BIT(4) + +#define HDMI_REG_AFE_XP_CTRL 0x62 +#define AFE_XP_GAINBIT BIT(7) +#define AFE_XP_ER0 BIT(4) +#define AFE_XP_RESETB BIT(3) + +#define HDMI_REG_AFE_ISW_CTRL 0x63 + +#define HDMI_REG_AFE_IP_CTRL 0x64 +#define AFE_IP_GAINBIT BIT(7) +#define AFE_IP_ER0 BIT(3) +#define AFE_IP_RESETB BIT(2) + +/* HDMI input data format registers */ +#define HDMI_REG_INPUT_MODE 0x70 +#define IN_RGB 0x00 + +/* HDMI general control registers */ +#define HDMI_REG_HDMI_MODE 0xc0 +#define TX_HDMI_MODE BIT(0) + +#define HDMI_REG_GCP 0xc1 +#define AVMUTE BIT(0) +#define HDMI_COLOR_DEPTH GENMASK(6, 4) +#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4) + +#define HDMI_REG_PKT_GENERAL_CTRL 0xc6 +#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd +#define ENABLE_PKT BIT(0) +#define REPEAT_PKT BIT(1) + +/* ----------------------------------------------------------------------------- + * 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers) + */ + +/* AVI packet registers */ +#define HDMI_REG_AVI_DB1 0x158 +#define HDMI_REG_AVI_DB2 0x159 +#define HDMI_REG_AVI_DB3 0x15a +#define HDMI_REG_AVI_DB4 0x15b +#define HDMI_REG_AVI_DB5 0x15c +#define HDMI_REG_AVI_CSUM 0x15d +#define HDMI_REG_AVI_DB6 0x15e +#define HDMI_REG_AVI_DB7 0x15f +#define HDMI_REG_AVI_DB8 0x160 +#define HDMI_REG_AVI_DB9 0x161 +#define HDMI_REG_AVI_DB10 0x162 +#define HDMI_REG_AVI_DB11 0x163 +#define HDMI_REG_AVI_DB12 0x164 +#define HDMI_REG_AVI_DB13 0x165 + +#define HDMI_AVI_DB_CHUNK1_SIZE (HDMI_REG_AVI_DB5 - HDMI_REG_AVI_DB1 + 1) +#define HDMI_AVI_DB_CHUNK2_SIZE (HDMI_REG_AVI_DB13 - HDMI_REG_AVI_DB6 + 1) + +/* IT6263 data sheet Rev0.8: LVDS RX supports input clock rate up to 150MHz. */ +#define MAX_PIXEL_CLOCK_KHZ 150000 + +/* IT6263 programming guide Ver0.90: PCLK_HIGH for TMDS clock over 80MHz. */ +#define HIGH_PIXEL_CLOCK_KHZ 80000 + +/* + * IT6263 data sheet Rev0.8: HDMI TX supports link speeds of up to 2.25Gbps + * (link clock rate of 225MHz). + */ +#define MAX_HDMI_TMDS_CHAR_RATE_HZ 225000000 + +struct it6263 { + struct device *dev; + struct i2c_client *hdmi_i2c; + struct i2c_client *lvds_i2c; + struct regmap *hdmi_regmap; + struct regmap *lvds_regmap; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + int lvds_data_mapping; + bool lvds_dual_link; + bool lvds_link12_swap; +}; + +static inline struct it6263 *bridge_to_it6263(struct drm_bridge *bridge) +{ + return container_of(bridge, struct it6263, bridge); +} + +static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case HDMI_REG_SW_RST: + case HDMI_REG_BANK_CTRL: + case HDMI_REG_DDC_MASTER_CTRL: + case HDMI_REG_DDC_HEADER: + case HDMI_REG_DDC_REQOFF: + case HDMI_REG_DDC_REQCOUNT: + case HDMI_REG_DDC_EDIDSEG: + case HDMI_REG_DDC_CMD: + case HDMI_REG_LVDS_PORT: + case HDMI_REG_LVDS_PORT_EN: + case HDMI_REG_AFE_DRV_CTRL: + case HDMI_REG_AFE_XP_CTRL: + case HDMI_REG_AFE_ISW_CTRL: + case HDMI_REG_AFE_IP_CTRL: + case HDMI_REG_INPUT_MODE: + case HDMI_REG_HDMI_MODE: + case HDMI_REG_GCP: + case HDMI_REG_PKT_GENERAL_CTRL: + case HDMI_REG_AVI_INFOFRM_CTRL: + case HDMI_REG_AVI_DB1: + case HDMI_REG_AVI_DB2: + case HDMI_REG_AVI_DB3: + case HDMI_REG_AVI_DB4: + case HDMI_REG_AVI_DB5: + case HDMI_REG_AVI_CSUM: + case HDMI_REG_AVI_DB6: + case HDMI_REG_AVI_DB7: + case HDMI_REG_AVI_DB8: + case HDMI_REG_AVI_DB9: + case HDMI_REG_AVI_DB10: + case HDMI_REG_AVI_DB11: + case HDMI_REG_AVI_DB12: + case HDMI_REG_AVI_DB13: + return true; + default: + return false; + } +} + +static bool it6263_hdmi_readable_reg(struct device *dev, unsigned int reg) +{ + if (it6263_hdmi_writeable_reg(dev, reg)) + return true; + + switch (reg) { + case HDMI_REG_SYS_STATUS: + case HDMI_REG_DDC_STATUS: + case HDMI_REG_DDC_READFIFO: + return true; + default: + return false; + } +} + +static bool it6263_hdmi_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case HDMI_REG_SW_RST: + case HDMI_REG_SYS_STATUS: + case HDMI_REG_DDC_STATUS: + case HDMI_REG_DDC_READFIFO: + return true; + default: + return false; + } +} + +static const struct regmap_range_cfg it6263_hdmi_range_cfg = { + .range_min = 0x00, + .range_max = HDMI_REG_AVI_DB13, + .selector_reg = HDMI_REG_BANK_CTRL, + .selector_mask = REG_BANK_SEL, + .selector_shift = 0, + .window_start = 0x00, + .window_len = 0x100, +}; + +static const struct regmap_config it6263_hdmi_regmap_config = { + .name = "it6263-hdmi", + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = it6263_hdmi_writeable_reg, + .readable_reg = it6263_hdmi_readable_reg, + .volatile_reg = it6263_hdmi_volatile_reg, + .max_register = HDMI_REG_AVI_DB13, + .ranges = &it6263_hdmi_range_cfg, + .num_ranges = 1, + .cache_type = REGCACHE_MAPLE, +}; + +static bool it6263_lvds_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case LVDS_REG_05: + case LVDS_REG_0B: + case LVDS_REG_2C: + case LVDS_REG_3C: + case LVDS_REG_3F: + case LVDS_REG_47: + case LVDS_REG_48: + case LVDS_REG_4F: + case LVDS_REG_52: + return true; + default: + return false; + } +} + +static bool it6263_lvds_readable_reg(struct device *dev, unsigned int reg) +{ + return it6263_lvds_writeable_reg(dev, reg); +} + +static bool it6263_lvds_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == LVDS_REG_05; +} + +static const struct regmap_config it6263_lvds_regmap_config = { + .name = "it6263-lvds", + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = it6263_lvds_writeable_reg, + .readable_reg = it6263_lvds_readable_reg, + .volatile_reg = it6263_lvds_volatile_reg, + .max_register = LVDS_REG_52, + .cache_type = REGCACHE_MAPLE, +}; + +static const char * const it6263_supplies[] = { + "ivdd", "ovdd", "txavcc18", "txavcc33", "pvcc1", "pvcc2", + "avcc", "anvdd", "apvdd" +}; + +static int it6263_parse_dt(struct it6263 *it) +{ + struct device *dev = it->dev; + struct device_node *port0, *port1; + int ret = 0; + + it->lvds_data_mapping = drm_of_lvds_get_data_mapping(dev->of_node); + if (it->lvds_data_mapping < 0) { + dev_err(dev, "%pOF: invalid or missing %s DT property: %d\n", + dev->of_node, "data-mapping", it->lvds_data_mapping); + return it->lvds_data_mapping; + } + + it->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0); + if (IS_ERR(it->next_bridge)) + return dev_err_probe(dev, PTR_ERR(it->next_bridge), + "failed to get next bridge\n"); + + port0 = of_graph_get_port_by_id(dev->of_node, 0); + port1 = of_graph_get_port_by_id(dev->of_node, 1); + if (port0 && port1) { + int order; + + it->lvds_dual_link = true; + order = drm_of_lvds_get_dual_link_pixel_order_sink(port0, port1); + if (order < 0) { + dev_err(dev, + "failed to get dual link pixel order: %d\n", + order); + ret = order; + } else if (order == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { + it->lvds_link12_swap = true; + } + } else if (port1) { + ret = -EINVAL; + dev_err(dev, "single input LVDS port1 is not supported\n"); + } else if (!port0) { + ret = -EINVAL; + dev_err(dev, "no input LVDS port\n"); + } + + of_node_put(port0); + of_node_put(port1); + + return ret; +} + +static inline void it6263_hw_reset(struct gpio_desc *reset_gpio) +{ + if (!reset_gpio) + return; + + gpiod_set_value_cansleep(reset_gpio, 0); + fsleep(1000); + gpiod_set_value_cansleep(reset_gpio, 1); + /* The chip maker says the low pulse should be at least 40ms. */ + fsleep(40000); + gpiod_set_value_cansleep(reset_gpio, 0); + /* addtional time to wait the high voltage to be stable */ + fsleep(5000); +} + +static inline int it6263_lvds_set_i2c_addr(struct it6263 *it) +{ + int ret; + + ret = regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT, + LVDS_INPUT_CTRL_I2C_ADDR << 1); + if (ret) + return ret; + + return regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT_EN, BIT(0)); +} + +static inline void it6263_lvds_reset(struct it6263 *it) +{ + /* AFE PLL reset */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), 0x0); + fsleep(1000); + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), BIT(0)); + + /* software pixel clock domain reset */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, + REG_SOFT_P_RST); + fsleep(1000); + regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, 0x0); + fsleep(10000); +} + +static inline void it6263_lvds_set_interface(struct it6263 *it) +{ + /* color depth */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8); + /* output mapping */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, JEIDA); + + if (it->lvds_dual_link) { + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO); + regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), BIT(1)); + } else { + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, SISO); + regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), 0); + } +} + +static inline void it6263_lvds_set_afe(struct it6263 *it) +{ + regmap_write(it->lvds_regmap, LVDS_REG_3C, 0xaa); + regmap_write(it->lvds_regmap, LVDS_REG_3F, 0x02); + regmap_write(it->lvds_regmap, LVDS_REG_47, 0xaa); + regmap_write(it->lvds_regmap, LVDS_REG_48, 0x02); + regmap_write(it->lvds_regmap, LVDS_REG_4F, 0x11); + + regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_SSC_PCLK_RF, + REG_SSC_PCLK_RF); + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, 0x07, 0); + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_DESSC_ENB, + REG_DESSC_ENB); +} + +static inline void it6263_lvds_sys_cfg(struct it6263 *it) +{ + regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_LVDS_IN_SWAP, + it->lvds_link12_swap ? REG_LVDS_IN_SWAP : 0); +} + +static inline void it6263_lvds_config(struct it6263 *it) +{ + it6263_lvds_reset(it); + it6263_lvds_set_interface(it); + it6263_lvds_set_afe(it); + it6263_lvds_sys_cfg(it); +} + +static inline void it6263_hdmi_config(struct it6263 *it) +{ + regmap_write(it->hdmi_regmap, HDMI_REG_SW_RST, HDMI_RST_ALL); + regmap_write(it->hdmi_regmap, HDMI_REG_INPUT_MODE, IN_RGB); + regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, HDMI_COLOR_DEPTH, + HDMI_COLOR_DEPTH_24); +} + +static enum drm_connector_status it6263_detect(struct it6263 *it) +{ + unsigned int val; + + regmap_read(it->hdmi_regmap, HDMI_REG_SYS_STATUS, &val); + if (val & HPDETECT) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct it6263 *it = data; + struct regmap *regmap = it->hdmi_regmap; + unsigned int start = (block % 2) * EDID_LENGTH; + unsigned int segment = block >> 1; + unsigned int count, val; + int ret; + + regmap_write(regmap, HDMI_REG_DDC_MASTER_CTRL, MASTER_SEL_HOST); + regmap_write(regmap, HDMI_REG_DDC_HEADER, DDC_ADDR << 1); + regmap_write(regmap, HDMI_REG_DDC_EDIDSEG, segment); + + while (len) { + /* clear DDC FIFO */ + regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_FIFO_CLR); + + ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, + val, val & DDC_DONE, + 2000, 10000); + if (ret) { + dev_err(it->dev, "failed to clear DDC FIFO:%d\n", ret); + return ret; + } + + count = len > HDMI_DDC_FIFO_BYTES ? HDMI_DDC_FIFO_BYTES : len; + + /* fire the read command */ + regmap_write(regmap, HDMI_REG_DDC_REQOFF, start); + regmap_write(regmap, HDMI_REG_DDC_REQCOUNT, count); + regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_EDID_READ); + + start += count; + len -= count; + + ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, val, + val & (DDC_DONE | DDC_ERROR), + 20000, 250000); + if (ret && !(val & DDC_ERROR)) { + dev_err(it->dev, "failed to read EDID:%d\n", ret); + return ret; + } + + if (val & DDC_ERROR) { + dev_err(it->dev, "DDC error\n"); + return -EIO; + } + + /* cache to buffer */ + for (; count > 0; count--) { + regmap_read(regmap, HDMI_REG_DDC_READFIFO, &val); + *(buf++) = val; + } + } + + return 0; +} + +static int it6263_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); +} + +static void +it6263_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, AVMUTE, AVMUTE); + regmap_write(it->hdmi_regmap, HDMI_REG_PKT_GENERAL_CTRL, 0); + regmap_write(it->hdmi_regmap, HDMI_REG_AFE_DRV_CTRL, + AFE_DRV_RST | AFE_DRV_PWD); +} + +static void +it6263_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct it6263 *it = bridge_to_it6263(bridge); + const struct drm_crtc_state *crtc_state; + struct regmap *regmap = it->hdmi_regmap; + const struct drm_display_mode *mode; + struct drm_connector *connector; + bool is_stable = false; + struct drm_crtc *crtc; + unsigned int val; + bool pclk_high; + int i, ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + mode = &crtc_state->adjusted_mode; + + regmap_write(regmap, HDMI_REG_HDMI_MODE, TX_HDMI_MODE); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); + + /* HDMI AFE setup */ + pclk_high = mode->clock > HIGH_PIXEL_CLOCK_KHZ; + regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, AFE_DRV_RST); + if (pclk_high) + regmap_write(regmap, HDMI_REG_AFE_XP_CTRL, + AFE_XP_GAINBIT | AFE_XP_RESETB); + else + regmap_write(regmap, HDMI_REG_AFE_XP_CTRL, + AFE_XP_ER0 | AFE_XP_RESETB); + regmap_write(regmap, HDMI_REG_AFE_ISW_CTRL, 0x10); + if (pclk_high) + regmap_write(regmap, HDMI_REG_AFE_IP_CTRL, + AFE_IP_GAINBIT | AFE_IP_RESETB); + else + regmap_write(regmap, HDMI_REG_AFE_IP_CTRL, + AFE_IP_ER0 | AFE_IP_RESETB); + + /* HDMI software video reset */ + regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, SOFTV_RST); + fsleep(1000); + regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, 0); + + /* reconfigure LVDS and retry several times in case video is instable */ + for (i = 0; i < 3; i++) { + ret = regmap_read_poll_timeout(regmap, HDMI_REG_SYS_STATUS, val, + val & TXVIDSTABLE, + 20000, 500000); + if (!ret) { + is_stable = true; + break; + } + + it6263_lvds_config(it); + } + + if (!is_stable) + dev_warn(it->dev, "failed to wait for video stable\n"); + + /* HDMI AFE reset release and power up */ + regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, 0); + + regmap_write_bits(regmap, HDMI_REG_GCP, AVMUTE, 0); + + regmap_write(regmap, HDMI_REG_PKT_GENERAL_CTRL, ENABLE_PKT | REPEAT_PKT); +} + +static enum drm_mode_status +it6263_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + unsigned long long rate; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + if (rate == 0) + return MODE_NOCLOCK; + + return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate); +} + +static int it6263_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct it6263 *it = bridge_to_it6263(bridge); + struct drm_connector *connector; + int ret; + + ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge, + flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) + return ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) + return 0; + + connector = drm_bridge_connector_init(bridge->dev, bridge->encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + dev_err(it->dev, "failed to initialize bridge connector: %d\n", + ret); + return ret; + } + + drm_connector_attach_encoder(connector, bridge->encoder); + + return 0; +} + +static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + return it6263_detect(it); +} + +static const struct drm_edid * +it6263_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + return drm_edid_read_custom(connector, it6263_read_edid, it); +} + +static u32 * +it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct it6263 *it = bridge_to_it6263(bridge); + u32 *input_fmts; + + *num_input_fmts = 0; + + if (it->lvds_data_mapping != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) + return NULL; + + input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + *num_input_fmts = 1; + + return input_fmts; +} + +static enum drm_mode_status +it6263_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + if (mode->clock > MAX_PIXEL_CLOCK_KHZ) + return MODE_CLOCK_HIGH; + + if (tmds_rate > MAX_HDMI_TMDS_CHAR_RATE_HZ) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + if (type == HDMI_INFOFRAME_TYPE_AVI) + regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0); + else + dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + + return 0; +} + +static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct it6263 *it = bridge_to_it6263(bridge); + struct regmap *regmap = it->hdmi_regmap; + + if (type != HDMI_INFOFRAME_TYPE_AVI) { + dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + return 0; + } + + /* write the first AVI infoframe data byte chunk(DB1-DB5) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB1, + &buffer[HDMI_INFOFRAME_HEADER_SIZE], + HDMI_AVI_DB_CHUNK1_SIZE); + + /* write the second AVI infoframe data byte chunk(DB6-DB13) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB6, + &buffer[HDMI_INFOFRAME_HEADER_SIZE + + HDMI_AVI_DB_CHUNK1_SIZE], + HDMI_AVI_DB_CHUNK2_SIZE); + + /* write checksum */ + regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]); + + regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT); + + return 0; +} + +static const struct drm_bridge_funcs it6263_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = it6263_bridge_attach, + .mode_valid = it6263_bridge_mode_valid, + .atomic_disable = it6263_bridge_atomic_disable, + .atomic_enable = it6263_bridge_atomic_enable, + .atomic_check = it6263_bridge_atomic_check, + .detect = it6263_bridge_detect, + .edid_read = it6263_bridge_edid_read, + .atomic_get_input_bus_fmts = it6263_bridge_atomic_get_input_bus_fmts, + .hdmi_tmds_char_rate_valid = it6263_hdmi_tmds_char_rate_valid, + .hdmi_clear_infoframe = it6263_hdmi_clear_infoframe, + .hdmi_write_infoframe = it6263_hdmi_write_infoframe, +}; + +static int it6263_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct gpio_desc *reset_gpio; + struct it6263 *it; + int ret; + + it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL); + if (!it) + return -ENOMEM; + + it->dev = dev; + it->hdmi_i2c = client; + + it->hdmi_regmap = devm_regmap_init_i2c(client, + &it6263_hdmi_regmap_config); + if (IS_ERR(it->hdmi_regmap)) + return dev_err_probe(dev, PTR_ERR(it->hdmi_regmap), + "failed to init I2C regmap for HDMI\n"); + + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) + return dev_err_probe(dev, PTR_ERR(reset_gpio), + "failed to get reset gpio\n"); + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it6263_supplies), + it6263_supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get power supplies\n"); + + ret = it6263_parse_dt(it); + if (ret) + return ret; + + it6263_hw_reset(reset_gpio); + + ret = it6263_lvds_set_i2c_addr(it); + if (ret) + return dev_err_probe(dev, ret, "failed to set I2C addr\n"); + + it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter, + LVDS_INPUT_CTRL_I2C_ADDR); + if (IS_ERR(it->lvds_i2c)) + dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c), + "failed to allocate I2C device for LVDS\n"); + + it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c, + &it6263_lvds_regmap_config); + if (IS_ERR(it->lvds_regmap)) + return dev_err_probe(dev, PTR_ERR(it->lvds_regmap), + "failed to init I2C regmap for LVDS\n"); + + it6263_lvds_config(it); + it6263_hdmi_config(it); + + i2c_set_clientdata(client, it); + + it->bridge.funcs = &it6263_bridge_funcs; + it->bridge.of_node = dev->of_node; + /* IT6263 chip doesn't support HPD interrupt. */ + it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HDMI; + it->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + it->bridge.vendor = "ITE"; + it->bridge.product = "IT6263"; + + return devm_drm_bridge_add(dev, &it->bridge); +} + +static const struct of_device_id it6263_of_match[] = { + { .compatible = "ite,it6263", }, + { } +}; +MODULE_DEVICE_TABLE(of, it6263_of_match); + +static const struct i2c_device_id it6263_i2c_ids[] = { + { "it6263", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids); + +static struct i2c_driver it6263_driver = { + .probe = it6263_probe, + .driver = { + .name = "it6263", + .of_match_table = it6263_of_match, + }, + .id_table = it6263_i2c_ids, +}; +module_i2c_driver(it6263_driver); + +MODULE_DESCRIPTION("ITE Tech. Inc. IT6263 LVDS/HDMI bridge"); +MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 87b8545fccc0..008d86cc562a 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2614,9 +2614,9 @@ static int it6505_poweron(struct it6505 *it6505) /* time interval between OVDD and SYSRSTN at least be 10ms */ if (pdata->gpiod_reset) { usleep_range(10000, 20000); - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); - usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_reset, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); usleep_range(25000, 35000); } @@ -2647,7 +2647,7 @@ static int it6505_poweroff(struct it6505 *it6505) disable_irq_nosync(it6505->irq); if (pdata->gpiod_reset) - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); if (pdata->pwr18) { err = regulator_disable(pdata->pwr18); @@ -3107,6 +3107,8 @@ static __maybe_unused int it6505_bridge_suspend(struct device *dev) { struct it6505 *it6505 = dev_get_drvdata(dev); + it6505_remove_edid(it6505); + return it6505_poweroff(it6505); } @@ -3133,7 +3135,7 @@ static int it6505_init_pdata(struct it6505 *it6505) return PTR_ERR(pdata->ovdd); } - pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pdata->gpiod_reset)) { dev_err(dev, "gpiod_reset gpio not found"); return PTR_ERR(pdata->gpiod_reset); @@ -3505,6 +3507,7 @@ static const struct of_device_id it6505_of_match[] = { { .compatible = "ite,it6505" }, { } }; +MODULE_DEVICE_TABLE(of, it6505_of_match); static struct i2c_driver it6505_i2c_driver = { .driver = { diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 925e42f46cd8..35ae3f0e8f51 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -770,8 +770,6 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, mutex_lock(&ctx->lock); - hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe); - ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector, adjusted_mode); if (ret) { diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 73983f9b50cb..1b31fdebe164 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -23,6 +23,8 @@ #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> #define EDID_SEG_SIZE 256 #define EDID_LEN 32 @@ -333,49 +335,6 @@ end: return temp; } -static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611, - struct drm_connector *connector, - struct drm_display_mode *mode) -{ - union hdmi_infoframe infoframe; - ssize_t len; - u8 iframes = 0x0a; /* UD1 infoframe */ - u8 buf[32]; - int ret; - int i; - - ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, - connector, - mode); - if (ret < 0) - goto out; - - len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); - if (len < 0) - goto out; - - for (i = 0; i < len; i++) - regmap_write(lt9611->regmap, 0x8440 + i, buf[i]); - - ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, - connector, - mode); - if (ret < 0) - goto out; - - len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); - if (len < 0) - goto out; - - for (i = 0; i < len; i++) - regmap_write(lt9611->regmap, 0x8474 + i, buf[i]); - - iframes |= 0x20; - -out: - regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */ -} - static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi) { if (is_hdmi) @@ -719,7 +678,7 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge, } lt9611_mipi_input_analog(lt9611); - lt9611_hdmi_set_infoframes(lt9611, connector, mode); + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi); lt9611_hdmi_tx_phy(lt9611); @@ -798,22 +757,25 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned long long rate; if (mode->hdisplay > 3840) return MODE_BAD_HVALUE; - if (mode->vdisplay > 2160) - return MODE_BAD_VVALUE; - - if (mode->hdisplay == 3840 && - mode->vdisplay == 2160 && - drm_mode_vrefresh(mode) > 30) - return MODE_CLOCK_HIGH; - if (mode->hdisplay > 2000 && !lt9611->dsi1_node) return MODE_PANEL; - else - return MODE_OK; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate); +} + +static int lt9611_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); } static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge, @@ -887,6 +849,99 @@ lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge, return input_fmts; } +/* + * Other working frames: + * - 0x01, 0x84df + * - 0x04, 0x84c0 + */ +#define LT9611_INFOFRAME_AUDIO 0x02 +#define LT9611_INFOFRAME_AVI 0x08 +#define LT9611_INFOFRAME_SPD 0x10 +#define LT9611_INFOFRAME_VENDOR 0x20 + +static int lt9611_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned int mask; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + mask = LT9611_INFOFRAME_AVI; + break; + + case HDMI_INFOFRAME_TYPE_SPD: + mask = LT9611_INFOFRAME_SPD; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + mask = LT9611_INFOFRAME_VENDOR; + break; + + default: + drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); + mask = 0; + break; + } + + if (mask) + regmap_update_bits(lt9611->regmap, 0x843d, mask, 0); + + return 0; +} + +static int lt9611_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned int mask, addr; + int i; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + mask = LT9611_INFOFRAME_AVI; + addr = 0x8440; + break; + + case HDMI_INFOFRAME_TYPE_SPD: + mask = LT9611_INFOFRAME_SPD; + addr = 0x8493; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + mask = LT9611_INFOFRAME_VENDOR; + addr = 0x8474; + break; + + default: + drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); + mask = 0; + break; + } + + if (mask) { + for (i = 0; i < len; i++) + regmap_write(lt9611->regmap, addr + i, buffer[i]); + + regmap_update_bits(lt9611->regmap, 0x843d, mask, mask); + } + + return 0; +} + +static enum drm_mode_status +lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + /* 297 MHz for 4k@30 mode */ + if (tmds_rate > 297000000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, .mode_valid = lt9611_bridge_mode_valid, @@ -894,6 +949,7 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .edid_read = lt9611_bridge_edid_read, .hpd_enable = lt9611_bridge_hpd_enable, + .atomic_check = lt9611_bridge_atomic_check, .atomic_pre_enable = lt9611_bridge_atomic_pre_enable, .atomic_enable = lt9611_bridge_atomic_enable, .atomic_disable = lt9611_bridge_atomic_disable, @@ -902,6 +958,10 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = lt9611_atomic_get_input_bus_fmts, + + .hdmi_tmds_char_rate_valid = lt9611_hdmi_tmds_char_rate_valid, + .hdmi_write_infoframe = lt9611_hdmi_write_infoframe, + .hdmi_clear_infoframe = lt9611_hdmi_clear_infoframe, }; static int lt9611_parse_dt(struct device *dev, @@ -1116,8 +1176,11 @@ static int lt9611_probe(struct i2c_client *client) lt9611->bridge.funcs = <9611_bridge_funcs; lt9611->bridge.of_node = client->dev.of_node; lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | - DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES; + DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES | + DRM_BRIDGE_OP_HDMI; lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + lt9611->bridge.vendor = "Lontium"; + lt9611->bridge.product = "LT9611"; drm_bridge_add(<9611->bridge); diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 430f8adebf9c..4416d0be7272 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -2043,7 +2043,7 @@ void samsung_dsim_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(samsung_dsim_remove); -static int __maybe_unused samsung_dsim_suspend(struct device *dev) +static int samsung_dsim_suspend(struct device *dev) { struct samsung_dsim *dsi = dev_get_drvdata(dev); const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; @@ -2073,7 +2073,7 @@ static int __maybe_unused samsung_dsim_suspend(struct device *dev) return 0; } -static int __maybe_unused samsung_dsim_resume(struct device *dev) +static int samsung_dsim_resume(struct device *dev) { struct samsung_dsim *dsi = dev_get_drvdata(dev); const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; @@ -2108,7 +2108,7 @@ err_clk: } const struct dev_pm_ops samsung_dsim_pm_ops = { - SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL) + RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; @@ -2142,7 +2142,7 @@ static struct platform_driver samsung_dsim_driver = { .remove_new = samsung_dsim_remove, .driver = { .name = "samsung-dsim", - .pm = &samsung_dsim_pm_ops, + .pm = pm_ptr(&samsung_dsim_pm_ops), .of_match_table = samsung_dsim_of_match, }, }; diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 7f91b0db161e..9be9cc5b9025 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -180,6 +180,8 @@ struct sii902x { struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; bool sink_is_hdmi; + u32 bus_width; + /* * Mutex protects audio and video functions from interfering * each other, by keeping their i2c command sequences atomic. @@ -477,6 +479,8 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, u32 output_fmt, unsigned int *num_input_fmts) { + + struct sii902x *sii902x = bridge_to_sii902x(bridge); u32 *input_fmts; *num_input_fmts = 0; @@ -485,7 +489,20 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, if (!input_fmts) return NULL; - input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + switch (sii902x->bus_width) { + case 16: + input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16; + break; + case 18: + input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18; + break; + case 24: + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + break; + default: + return NULL; + } + *num_input_fmts = 1; return input_fmts; @@ -1167,6 +1184,11 @@ static int sii902x_probe(struct i2c_client *client) return PTR_ERR(sii902x->reset_gpio); } + sii902x->bus_width = 24; + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); + if (endpoint) + of_property_read_u32(endpoint, "bus-width", &sii902x->bus_width); + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); if (endpoint) { struct device_node *remote = of_graph_get_remote_port_parent(endpoint); diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 15fc182d05ef..ca416dab156d 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -46,6 +46,14 @@ config DRM_DW_HDMI_CEC Support the CE interface which is part of the Synopsys Designware HDMI block. +config DRM_DW_HDMI_QP + tristate + select DRM_DISPLAY_HDMI_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_KMS_HELPER + select REGMAP_MMIO + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile index ce715562e9e5..9869d9651ed1 100644 --- a/drivers/gpu/drm/bridge/synopsys/Makefile +++ b/drivers/gpu/drm/bridge/synopsys/Makefile @@ -5,4 +5,6 @@ obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o +obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o + obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c index 673661160e54..d4614de1ae1e 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c @@ -312,7 +312,7 @@ static void dw_hdmi_cec_remove(struct platform_device *pdev) cec_unregister_adapter(cec->adap); } -static int __maybe_unused dw_hdmi_cec_resume(struct device *dev) +static int dw_hdmi_cec_resume(struct device *dev) { struct dw_hdmi_cec *cec = dev_get_drvdata(dev); @@ -328,7 +328,7 @@ static int __maybe_unused dw_hdmi_cec_resume(struct device *dev) return 0; } -static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev) +static int dw_hdmi_cec_suspend(struct device *dev) { struct dw_hdmi_cec *cec = dev_get_drvdata(dev); @@ -341,7 +341,7 @@ static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev) } static const struct dev_pm_ops dw_hdmi_cec_pm = { - SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume) + SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume) }; static struct platform_driver dw_hdmi_cec_driver = { @@ -349,7 +349,7 @@ static struct platform_driver dw_hdmi_cec_driver = { .remove_new = dw_hdmi_cec_remove, .driver = { .name = "dw-hdmi-cec", - .pm = &dw_hdmi_cec_pm, + .pm = pm_ptr(&dw_hdmi_cec_pm), }, }; module_platform_driver(dw_hdmi_cec_driver); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c new file mode 100644 index 000000000000..181c5164b231 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * + * Author: Algea Cao <algea.cao@rock-chips.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> + */ +#include <linux/completion.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/workqueue.h> + +#include <drm/bridge/dw_hdmi_qp.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> +#include <drm/drm_modes.h> + +#include <sound/hdmi-codec.h> + +#include "dw-hdmi-qp.h" + +#define DDC_CI_ADDR 0x37 +#define DDC_SEGMENT_ADDR 0x30 + +#define HDMI14_MAX_TMDSCLK 340000000 + +#define SCRAMB_POLL_DELAY_MS 3000 + +struct dw_hdmi_qp_i2c { + struct i2c_adapter adap; + + struct mutex lock; /* used to serialize data transfers */ + struct completion cmp; + u8 stat; + + u8 slave_reg; + bool is_regaddr; + bool is_segment; +}; + +struct dw_hdmi_qp { + struct drm_bridge bridge; + + struct device *dev; + struct dw_hdmi_qp_i2c *i2c; + + struct { + const struct dw_hdmi_qp_phy_ops *ops; + void *data; + } phy; + + struct regmap *regm; +}; + +static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val, + int offset) +{ + regmap_write(hdmi->regm, offset, val); +} + +static unsigned int dw_hdmi_qp_read(struct dw_hdmi_qp *hdmi, int offset) +{ + unsigned int val = 0; + + regmap_read(hdmi->regm, offset, &val); + + return val; +} + +static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data, + unsigned int mask, unsigned int reg) +{ + regmap_update_bits(hdmi->regm, reg, mask, data); +} + +static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi, + unsigned char *buf, unsigned int length) +{ + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + int stat; + + if (!i2c->is_regaddr) { + dev_dbg(hdmi->dev, "set read register address to 0\n"); + i2c->slave_reg = 0x00; + i2c->is_regaddr = true; + } + + while (length--) { + reinit_completion(&i2c->cmp); + + dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR, + I2CM_INTERFACE_CONTROL0); + + if (i2c->is_segment) + dw_hdmi_qp_mod(hdmi, I2CM_EXT_READ, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + else + dw_hdmi_qp_mod(hdmi, I2CM_FM_READ, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + + stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); + if (!stat) { + dev_err(hdmi->dev, "i2c read timed out\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EAGAIN; + } + + /* Check for error condition on the bus */ + if (i2c->stat & I2CM_NACK_RCVD_IRQ) { + dev_err(hdmi->dev, "i2c read error\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EIO; + } + + *buf++ = dw_hdmi_qp_read(hdmi, I2CM_INTERFACE_RDDATA_0_3) & 0xff; + dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0); + } + + i2c->is_segment = false; + + return 0; +} + +static int dw_hdmi_qp_i2c_write(struct dw_hdmi_qp *hdmi, + unsigned char *buf, unsigned int length) +{ + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + int stat; + + if (!i2c->is_regaddr) { + /* Use the first write byte as register address */ + i2c->slave_reg = buf[0]; + length--; + buf++; + i2c->is_regaddr = true; + } + + while (length--) { + reinit_completion(&i2c->cmp); + + dw_hdmi_qp_write(hdmi, *buf++, I2CM_INTERFACE_WRDATA_0_3); + dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR, + I2CM_INTERFACE_CONTROL0); + dw_hdmi_qp_mod(hdmi, I2CM_FM_WRITE, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + + stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); + if (!stat) { + dev_err(hdmi->dev, "i2c write time out!\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EAGAIN; + } + + /* Check for error condition on the bus */ + if (i2c->stat & I2CM_NACK_RCVD_IRQ) { + dev_err(hdmi->dev, "i2c write nack!\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EIO; + } + + dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0); + } + + return 0; +} + +static int dw_hdmi_qp_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct dw_hdmi_qp *hdmi = i2c_get_adapdata(adap); + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + u8 addr = msgs[0].addr; + int i, ret = 0; + + if (addr == DDC_CI_ADDR) + /* + * The internal I2C controller does not support the multi-byte + * read and write operations needed for DDC/CI. + * FIXME: Blacklist the DDC/CI address until we filter out + * unsupported I2C operations. + */ + return -EOPNOTSUPP; + + for (i = 0; i < num; i++) { + if (msgs[i].len == 0) { + dev_err(hdmi->dev, + "unsupported transfer %d/%d, no data\n", + i + 1, num); + return -EOPNOTSUPP; + } + } + + guard(mutex)(&i2c->lock); + + /* Unmute DONE and ERROR interrupts */ + dw_hdmi_qp_mod(hdmi, I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N, + I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N, + MAINUNIT_1_INT_MASK_N); + + /* Set slave device address taken from the first I2C message */ + if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1) + addr = DDC_ADDR; + + dw_hdmi_qp_mod(hdmi, addr << 5, I2CM_SLVADDR, I2CM_INTERFACE_CONTROL0); + + /* Set slave device register address on transfer */ + i2c->is_regaddr = false; + + /* Set segment pointer for I2C extended read mode operation */ + i2c->is_segment = false; + + for (i = 0; i < num; i++) { + if (msgs[i].addr == DDC_SEGMENT_ADDR && msgs[i].len == 1) { + i2c->is_segment = true; + dw_hdmi_qp_mod(hdmi, DDC_SEGMENT_ADDR, I2CM_SEG_ADDR, + I2CM_INTERFACE_CONTROL1); + dw_hdmi_qp_mod(hdmi, *msgs[i].buf << 7, I2CM_SEG_PTR, + I2CM_INTERFACE_CONTROL1); + } else { + if (msgs[i].flags & I2C_M_RD) + ret = dw_hdmi_qp_i2c_read(hdmi, msgs[i].buf, + msgs[i].len); + else + ret = dw_hdmi_qp_i2c_write(hdmi, msgs[i].buf, + msgs[i].len); + } + if (ret < 0) + break; + } + + if (!ret) + ret = num; + + /* Mute DONE and ERROR interrupts */ + dw_hdmi_qp_mod(hdmi, 0, I2CM_OP_DONE_MASK_N | I2CM_NACK_RCVD_MASK_N, + MAINUNIT_1_INT_MASK_N); + + return ret; +} + +static u32 dw_hdmi_qp_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm dw_hdmi_qp_algorithm = { + .master_xfer = dw_hdmi_qp_i2c_xfer, + .functionality = dw_hdmi_qp_i2c_func, +}; + +static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi) +{ + struct dw_hdmi_qp_i2c *i2c; + struct i2c_adapter *adap; + int ret; + + i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return ERR_PTR(-ENOMEM); + + mutex_init(&i2c->lock); + init_completion(&i2c->cmp); + + adap = &i2c->adap; + adap->owner = THIS_MODULE; + adap->dev.parent = hdmi->dev; + adap->algo = &dw_hdmi_qp_algorithm; + strscpy(adap->name, "DesignWare HDMI QP", sizeof(adap->name)); + + i2c_set_adapdata(adap, hdmi); + + ret = devm_i2c_add_adapter(hdmi->dev, adap); + if (ret) { + dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); + devm_kfree(hdmi->dev, i2c); + return ERR_PTR(ret); + } + + hdmi->i2c = i2c; + dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name); + + return adap; +} + +static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + u32 val, i, j; + + if (len != HDMI_INFOFRAME_SIZE(AVI)) { + dev_err(hdmi->dev, "failed to configure avi infoframe\n"); + return -EINVAL; + } + + /* + * DW HDMI QP IP uses a different byte format from standard AVI info + * frames, though generally the bits are in the correct bytes. + */ + val = buffer[1] << 8 | buffer[2] << 16; + dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + if (i * 4 + j >= 14) + break; + if (!j) + val = buffer[i * 4 + j + 3]; + val |= buffer[i * 4 + j + 3] << (8 * j); + } + + dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4); + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + + dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN); + + return 0; +} + +static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + u32 val, i; + + if (len != HDMI_INFOFRAME_SIZE(DRM)) { + dev_err(hdmi->dev, "failed to configure drm infoframe\n"); + return -EINVAL; + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); + + val = buffer[1] << 8 | buffer[2] << 16; + dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0); + + for (i = 0; i <= buffer[2]; i++) { + if (i % 4 == 0) + val = buffer[3 + i]; + val |= buffer[3 + i] << ((i % 4) * 8); + + if ((i % 4 == 3) || i == buffer[2]) + dw_hdmi_qp_write(hdmi, val, + PKT_DRMI_CONTENTS1 + ((i / 4) * 4)); + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN, + PKTSCHED_PKT_EN); + + return 0; +} + +static int dw_hdmi_qp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + int ret; + + ret = drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); + if (ret) + dev_dbg(hdmi->dev, "%s failed: %d\n", __func__, ret); + + return ret; +} + +static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + struct drm_atomic_state *state = old_state->base.state; + struct drm_connector_state *conn_state; + struct drm_connector *connector; + unsigned int op_mode; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + if (connector->display_info.is_hdmi) { + dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", + __func__, conn_state->hdmi.tmds_char_rate); + op_mode = 0; + } else { + dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__); + op_mode = OPMODE_DVI; + } + + hdmi->phy.ops->init(hdmi, hdmi->phy.data); + + dw_hdmi_qp_mod(hdmi, HDCP2_BYPASS, HDCP2_BYPASS, HDCP2LOGIC_CONFIG0); + dw_hdmi_qp_mod(hdmi, op_mode, OPMODE_DVI, LINK_CONFIG0); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); +} + +static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + hdmi->phy.ops->disable(hdmi, hdmi->phy.data); +} + +static enum drm_connector_status +dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); +} + +static const struct drm_edid * +dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + const struct drm_edid *drm_edid; + + drm_edid = drm_edid_read_ddc(connector, bridge->ddc); + if (!drm_edid) + dev_dbg(hdmi->dev, "failed to get edid\n"); + + return drm_edid; +} + +static enum drm_mode_status +dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + unsigned long long rate; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + if (rate > HDMI14_MAX_TMDSCLK) { + dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock); + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_PKT_EN); + break; + + case HDMI_INFOFRAME_TYPE_DRM: + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); + break; + + default: + dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); + } + + return 0; +} + +static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + dw_hdmi_qp_bridge_clear_infoframe(bridge, type); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len); + + case HDMI_INFOFRAME_TYPE_DRM: + return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); + + default: + dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); + return 0; + } +} + +static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_check = dw_hdmi_qp_bridge_atomic_check, + .atomic_enable = dw_hdmi_qp_bridge_atomic_enable, + .atomic_disable = dw_hdmi_qp_bridge_atomic_disable, + .detect = dw_hdmi_qp_bridge_detect, + .edid_read = dw_hdmi_qp_bridge_edid_read, + .mode_valid = dw_hdmi_qp_bridge_mode_valid, + .hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe, + .hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe, +}; + +static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, MAINUNIT_1_INT_STATUS); + + i2c->stat = stat & (I2CM_OP_DONE_IRQ | I2CM_READ_REQUEST_IRQ | + I2CM_NACK_RCVD_IRQ); + + if (i2c->stat) { + dw_hdmi_qp_write(hdmi, i2c->stat, MAINUNIT_1_INT_CLEAR); + complete(&i2c->cmp); + } + + if (stat) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct regmap_config dw_hdmi_qp_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = EARCRX_1_INT_FORCE, +}; + +static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) +{ + dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); + dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); + dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + + /* Software reset */ + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + + dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); + + dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); + + /* Clear DONE and ERROR interrupts */ + dw_hdmi_qp_write(hdmi, I2CM_OP_DONE_CLEAR | I2CM_NACK_RCVD_CLEAR, + MAINUNIT_1_INT_CLEAR); + + if (hdmi->phy.ops->setup_hpd) + hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); +} + +struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_qp_plat_data *plat_data) +{ + struct device *dev = &pdev->dev; + struct dw_hdmi_qp *hdmi; + void __iomem *regs; + int ret; + + if (!plat_data->phy_ops || !plat_data->phy_ops->init || + !plat_data->phy_ops->disable || !plat_data->phy_ops->read_hpd) { + dev_err(dev, "Missing platform PHY ops\n"); + return ERR_PTR(-ENODEV); + } + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return ERR_PTR(-ENOMEM); + + hdmi->dev = dev; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return ERR_CAST(regs); + + hdmi->regm = devm_regmap_init_mmio(dev, regs, &dw_hdmi_qp_regmap_config); + if (IS_ERR(hdmi->regm)) { + dev_err(dev, "Failed to configure regmap\n"); + return ERR_CAST(hdmi->regm); + } + + hdmi->phy.ops = plat_data->phy_ops; + hdmi->phy.data = plat_data->phy_data; + + dw_hdmi_qp_init_hw(hdmi); + + ret = devm_request_threaded_irq(dev, plat_data->main_irq, + dw_hdmi_qp_main_hardirq, NULL, + IRQF_SHARED, dev_name(dev), hdmi); + if (ret) + return ERR_PTR(ret); + + hdmi->bridge.driver_private = hdmi; + hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs; + hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HDMI | + DRM_BRIDGE_OP_HPD; + hdmi->bridge.of_node = pdev->dev.of_node; + hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + hdmi->bridge.vendor = "Synopsys"; + hdmi->bridge.product = "DW HDMI QP TX"; + + hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); + if (IS_ERR(hdmi->bridge.ddc)) + return ERR_CAST(hdmi->bridge.ddc); + + ret = devm_drm_bridge_add(dev, &hdmi->bridge); + if (ret) + return ERR_PTR(ret); + + ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ERR_PTR(ret); + + return hdmi; +} +EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind); + +void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi) +{ + dw_hdmi_qp_init_hw(hdmi); +} +EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume); + +MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>"); +MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>"); +MODULE_DESCRIPTION("DW HDMI QP transmitter library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h new file mode 100644 index 000000000000..2115b8ef0bd6 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Rockchip Electronics Co.Ltd + * Author: + * Algea Cao <algea.cao@rock-chips.com> + */ +#ifndef __DW_HDMI_QP_H__ +#define __DW_HDMI_QP_H__ + +#include <linux/bits.h> + +/* Main Unit Registers */ +#define CORE_ID 0x0 +#define VER_NUMBER 0x4 +#define VER_TYPE 0x8 +#define CONFIG_REG 0xc +#define CONFIG_CEC BIT(28) +#define CONFIG_AUD_UD BIT(23) +#define CORE_TIMESTAMP_HHMM 0x14 +#define CORE_TIMESTAMP_MMDD 0x18 +#define CORE_TIMESTAMP_YYYY 0x1c +/* Reset Manager Registers */ +#define GLOBAL_SWRESET_REQUEST 0x40 +#define EARCRX_CMDC_SWINIT_P BIT(27) +#define AVP_DATAPATH_PACKET_AUDIO_SWINIT_P BIT(10) +#define GLOBAL_SWDISABLE 0x44 +#define CEC_SWDISABLE BIT(17) +#define AVP_DATAPATH_PACKET_AUDIO_SWDISABLE BIT(10) +#define AVP_DATAPATH_VIDEO_SWDISABLE BIT(6) +#define RESET_MANAGER_CONFIG0 0x48 +#define RESET_MANAGER_STATUS0 0x50 +#define RESET_MANAGER_STATUS1 0x54 +#define RESET_MANAGER_STATUS2 0x58 +/* Timer Base Registers */ +#define TIMER_BASE_CONFIG0 0x80 +#define TIMER_BASE_STATUS0 0x84 +/* CMU Registers */ +#define CMU_CONFIG0 0xa0 +#define CMU_CONFIG1 0xa4 +#define CMU_CONFIG2 0xa8 +#define CMU_CONFIG3 0xac +#define CMU_STATUS 0xb0 +#define DISPLAY_CLK_MONITOR 0x3f +#define DISPLAY_CLK_LOCKED 0X15 +#define EARC_BPCLK_OFF BIT(9) +#define AUDCLK_OFF BIT(7) +#define LINKQPCLK_OFF BIT(5) +#define VIDQPCLK_OFF BIT(3) +#define IPI_CLK_OFF BIT(1) +#define CMU_IPI_CLK_FREQ 0xb4 +#define CMU_VIDQPCLK_FREQ 0xb8 +#define CMU_LINKQPCLK_FREQ 0xbc +#define CMU_AUDQPCLK_FREQ 0xc0 +#define CMU_EARC_BPCLK_FREQ 0xc4 +/* I2CM Registers */ +#define I2CM_SM_SCL_CONFIG0 0xe0 +#define I2CM_FM_SCL_CONFIG0 0xe4 +#define I2CM_CONFIG0 0xe8 +#define I2CM_CONTROL0 0xec +#define I2CM_STATUS0 0xf0 +#define I2CM_INTERFACE_CONTROL0 0xf4 +#define I2CM_ADDR 0xff000 +#define I2CM_SLVADDR 0xfe0 +#define I2CM_WR_MASK 0x1e +#define I2CM_EXT_READ BIT(4) +#define I2CM_SHORT_READ BIT(3) +#define I2CM_FM_READ BIT(2) +#define I2CM_FM_WRITE BIT(1) +#define I2CM_FM_EN BIT(0) +#define I2CM_INTERFACE_CONTROL1 0xf8 +#define I2CM_SEG_PTR 0x7f80 +#define I2CM_SEG_ADDR 0x7f +#define I2CM_INTERFACE_WRDATA_0_3 0xfc +#define I2CM_INTERFACE_WRDATA_4_7 0x100 +#define I2CM_INTERFACE_WRDATA_8_11 0x104 +#define I2CM_INTERFACE_WRDATA_12_15 0x108 +#define I2CM_INTERFACE_RDDATA_0_3 0x10c +#define I2CM_INTERFACE_RDDATA_4_7 0x110 +#define I2CM_INTERFACE_RDDATA_8_11 0x114 +#define I2CM_INTERFACE_RDDATA_12_15 0x118 +/* SCDC Registers */ +#define SCDC_CONFIG0 0x140 +#define SCDC_I2C_FM_EN BIT(12) +#define SCDC_UPD_FLAGS_AUTO_CLR BIT(6) +#define SCDC_UPD_FLAGS_POLL_EN BIT(4) +#define SCDC_CONTROL0 0x148 +#define SCDC_STATUS0 0x150 +#define STATUS_UPDATE BIT(0) +#define FRL_START BIT(4) +#define FLT_UPDATE BIT(5) +/* FLT Registers */ +#define FLT_CONFIG0 0x160 +#define FLT_CONFIG1 0x164 +#define FLT_CONFIG2 0x168 +#define FLT_CONTROL0 0x170 +/* Main Unit 2 Registers */ +#define MAINUNIT_STATUS0 0x180 +/* Video Interface Registers */ +#define VIDEO_INTERFACE_CONFIG0 0x800 +#define VIDEO_INTERFACE_CONFIG1 0x804 +#define VIDEO_INTERFACE_CONFIG2 0x808 +#define VIDEO_INTERFACE_CONTROL0 0x80c +#define VIDEO_INTERFACE_STATUS0 0x814 +/* Video Packing Registers */ +#define VIDEO_PACKING_CONFIG0 0x81c +/* Audio Interface Registers */ +#define AUDIO_INTERFACE_CONFIG0 0x820 +#define AUD_IF_SEL_MSK 0x3 +#define AUD_IF_SPDIF 0x2 +#define AUD_IF_I2S 0x1 +#define AUD_IF_PAI 0x0 +#define AUD_FIFO_INIT_ON_OVF_MSK BIT(2) +#define AUD_FIFO_INIT_ON_OVF_EN BIT(2) +#define I2S_LINES_EN_MSK GENMASK(7, 4) +#define I2S_LINES_EN(x) BIT((x) + 4) +#define I2S_BPCUV_RCV_MSK BIT(12) +#define I2S_BPCUV_RCV_EN BIT(12) +#define I2S_BPCUV_RCV_DIS 0 +#define SPDIF_LINES_EN GENMASK(19, 16) +#define AUD_FORMAT_MSK GENMASK(26, 24) +#define AUD_3DOBA (0x7 << 24) +#define AUD_3DASP (0x6 << 24) +#define AUD_MSOBA (0x5 << 24) +#define AUD_MSASP (0x4 << 24) +#define AUD_HBR (0x3 << 24) +#define AUD_DST (0x2 << 24) +#define AUD_OBA (0x1 << 24) +#define AUD_ASP (0x0 << 24) +#define AUDIO_INTERFACE_CONFIG1 0x824 +#define AUDIO_INTERFACE_CONTROL0 0x82c +#define AUDIO_FIFO_CLR_P BIT(0) +#define AUDIO_INTERFACE_STATUS0 0x834 +/* Frame Composer Registers */ +#define FRAME_COMPOSER_CONFIG0 0x840 +#define FRAME_COMPOSER_CONFIG1 0x844 +#define FRAME_COMPOSER_CONFIG2 0x848 +#define FRAME_COMPOSER_CONFIG3 0x84c +#define FRAME_COMPOSER_CONFIG4 0x850 +#define FRAME_COMPOSER_CONFIG5 0x854 +#define FRAME_COMPOSER_CONFIG6 0x858 +#define FRAME_COMPOSER_CONFIG7 0x85c +#define FRAME_COMPOSER_CONFIG8 0x860 +#define FRAME_COMPOSER_CONFIG9 0x864 +#define FRAME_COMPOSER_CONTROL0 0x86c +/* Video Monitor Registers */ +#define VIDEO_MONITOR_CONFIG0 0x880 +#define VIDEO_MONITOR_STATUS0 0x884 +#define VIDEO_MONITOR_STATUS1 0x888 +#define VIDEO_MONITOR_STATUS2 0x88c +#define VIDEO_MONITOR_STATUS3 0x890 +#define VIDEO_MONITOR_STATUS4 0x894 +#define VIDEO_MONITOR_STATUS5 0x898 +#define VIDEO_MONITOR_STATUS6 0x89c +/* HDCP2 Logic Registers */ +#define HDCP2LOGIC_CONFIG0 0x8e0 +#define HDCP2_BYPASS BIT(0) +#define HDCP2LOGIC_ESM_GPIO_IN 0x8e4 +#define HDCP2LOGIC_ESM_GPIO_OUT 0x8e8 +/* HDCP14 Registers */ +#define HDCP14_CONFIG0 0x900 +#define HDCP14_CONFIG1 0x904 +#define HDCP14_CONFIG2 0x908 +#define HDCP14_CONFIG3 0x90c +#define HDCP14_KEY_SEED 0x914 +#define HDCP14_KEY_H 0x918 +#define HDCP14_KEY_L 0x91c +#define HDCP14_KEY_STATUS 0x920 +#define HDCP14_AKSV_H 0x924 +#define HDCP14_AKSV_L 0x928 +#define HDCP14_AN_H 0x92c +#define HDCP14_AN_L 0x930 +#define HDCP14_STATUS0 0x934 +#define HDCP14_STATUS1 0x938 +/* Scrambler Registers */ +#define SCRAMB_CONFIG0 0x960 +/* Video Configuration Registers */ +#define LINK_CONFIG0 0x968 +#define OPMODE_FRL_4LANES BIT(8) +#define OPMODE_DVI BIT(4) +#define OPMODE_FRL BIT(0) +/* TMDS FIFO Registers */ +#define TMDS_FIFO_CONFIG0 0x970 +#define TMDS_FIFO_CONTROL0 0x974 +/* FRL RSFEC Registers */ +#define FRL_RSFEC_CONFIG0 0xa20 +#define FRL_RSFEC_STATUS0 0xa30 +/* FRL Packetizer Registers */ +#define FRL_PKTZ_CONFIG0 0xa40 +#define FRL_PKTZ_CONTROL0 0xa44 +#define FRL_PKTZ_CONTROL1 0xa50 +#define FRL_PKTZ_STATUS1 0xa54 +/* Packet Scheduler Registers */ +#define PKTSCHED_CONFIG0 0xa80 +#define PKTSCHED_PRQUEUE0_CONFIG0 0xa84 +#define PKTSCHED_PRQUEUE1_CONFIG0 0xa88 +#define PKTSCHED_PRQUEUE2_CONFIG0 0xa8c +#define PKTSCHED_PRQUEUE2_CONFIG1 0xa90 +#define PKTSCHED_PRQUEUE2_CONFIG2 0xa94 +#define PKTSCHED_PKT_CONFIG0 0xa98 +#define PKTSCHED_PKT_CONFIG1 0xa9c +#define PKTSCHED_DRMI_FIELDRATE BIT(13) +#define PKTSCHED_AVI_FIELDRATE BIT(12) +#define PKTSCHED_PKT_CONFIG2 0xaa0 +#define PKTSCHED_PKT_CONFIG3 0xaa4 +#define PKTSCHED_PKT_EN 0xaa8 +#define PKTSCHED_DRMI_TX_EN BIT(17) +#define PKTSCHED_AUDI_TX_EN BIT(15) +#define PKTSCHED_AVI_TX_EN BIT(13) +#define PKTSCHED_EMP_CVTEM_TX_EN BIT(10) +#define PKTSCHED_AMD_TX_EN BIT(8) +#define PKTSCHED_GCP_TX_EN BIT(3) +#define PKTSCHED_AUDS_TX_EN BIT(2) +#define PKTSCHED_ACR_TX_EN BIT(1) +#define PKTSCHED_NULL_TX_EN BIT(0) +#define PKTSCHED_PKT_CONTROL0 0xaac +#define PKTSCHED_PKT_SEND 0xab0 +#define PKTSCHED_PKT_STATUS0 0xab4 +#define PKTSCHED_PKT_STATUS1 0xab8 +#define PKT_NULL_CONTENTS0 0xb00 +#define PKT_NULL_CONTENTS1 0xb04 +#define PKT_NULL_CONTENTS2 0xb08 +#define PKT_NULL_CONTENTS3 0xb0c +#define PKT_NULL_CONTENTS4 0xb10 +#define PKT_NULL_CONTENTS5 0xb14 +#define PKT_NULL_CONTENTS6 0xb18 +#define PKT_NULL_CONTENTS7 0xb1c +#define PKT_ACP_CONTENTS0 0xb20 +#define PKT_ACP_CONTENTS1 0xb24 +#define PKT_ACP_CONTENTS2 0xb28 +#define PKT_ACP_CONTENTS3 0xb2c +#define PKT_ACP_CONTENTS4 0xb30 +#define PKT_ACP_CONTENTS5 0xb34 +#define PKT_ACP_CONTENTS6 0xb38 +#define PKT_ACP_CONTENTS7 0xb3c +#define PKT_ISRC1_CONTENTS0 0xb40 +#define PKT_ISRC1_CONTENTS1 0xb44 +#define PKT_ISRC1_CONTENTS2 0xb48 +#define PKT_ISRC1_CONTENTS3 0xb4c +#define PKT_ISRC1_CONTENTS4 0xb50 +#define PKT_ISRC1_CONTENTS5 0xb54 +#define PKT_ISRC1_CONTENTS6 0xb58 +#define PKT_ISRC1_CONTENTS7 0xb5c +#define PKT_ISRC2_CONTENTS0 0xb60 +#define PKT_ISRC2_CONTENTS1 0xb64 +#define PKT_ISRC2_CONTENTS2 0xb68 +#define PKT_ISRC2_CONTENTS3 0xb6c +#define PKT_ISRC2_CONTENTS4 0xb70 +#define PKT_ISRC2_CONTENTS5 0xb74 +#define PKT_ISRC2_CONTENTS6 0xb78 +#define PKT_ISRC2_CONTENTS7 0xb7c +#define PKT_GMD_CONTENTS0 0xb80 +#define PKT_GMD_CONTENTS1 0xb84 +#define PKT_GMD_CONTENTS2 0xb88 +#define PKT_GMD_CONTENTS3 0xb8c +#define PKT_GMD_CONTENTS4 0xb90 +#define PKT_GMD_CONTENTS5 0xb94 +#define PKT_GMD_CONTENTS6 0xb98 +#define PKT_GMD_CONTENTS7 0xb9c +#define PKT_AMD_CONTENTS0 0xba0 +#define PKT_AMD_CONTENTS1 0xba4 +#define PKT_AMD_CONTENTS2 0xba8 +#define PKT_AMD_CONTENTS3 0xbac +#define PKT_AMD_CONTENTS4 0xbb0 +#define PKT_AMD_CONTENTS5 0xbb4 +#define PKT_AMD_CONTENTS6 0xbb8 +#define PKT_AMD_CONTENTS7 0xbbc +#define PKT_VSI_CONTENTS0 0xbc0 +#define PKT_VSI_CONTENTS1 0xbc4 +#define PKT_VSI_CONTENTS2 0xbc8 +#define PKT_VSI_CONTENTS3 0xbcc +#define PKT_VSI_CONTENTS4 0xbd0 +#define PKT_VSI_CONTENTS5 0xbd4 +#define PKT_VSI_CONTENTS6 0xbd8 +#define PKT_VSI_CONTENTS7 0xbdc +#define PKT_AVI_CONTENTS0 0xbe0 +#define HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT BIT(4) +#define HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR 0x04 +#define HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR 0x08 +#define HDMI_FC_AVICONF2_IT_CONTENT_VALID 0x80 +#define PKT_AVI_CONTENTS1 0xbe4 +#define PKT_AVI_CONTENTS2 0xbe8 +#define PKT_AVI_CONTENTS3 0xbec +#define PKT_AVI_CONTENTS4 0xbf0 +#define PKT_AVI_CONTENTS5 0xbf4 +#define PKT_AVI_CONTENTS6 0xbf8 +#define PKT_AVI_CONTENTS7 0xbfc +#define PKT_SPDI_CONTENTS0 0xc00 +#define PKT_SPDI_CONTENTS1 0xc04 +#define PKT_SPDI_CONTENTS2 0xc08 +#define PKT_SPDI_CONTENTS3 0xc0c +#define PKT_SPDI_CONTENTS4 0xc10 +#define PKT_SPDI_CONTENTS5 0xc14 +#define PKT_SPDI_CONTENTS6 0xc18 +#define PKT_SPDI_CONTENTS7 0xc1c +#define PKT_AUDI_CONTENTS0 0xc20 +#define PKT_AUDI_CONTENTS1 0xc24 +#define PKT_AUDI_CONTENTS2 0xc28 +#define PKT_AUDI_CONTENTS3 0xc2c +#define PKT_AUDI_CONTENTS4 0xc30 +#define PKT_AUDI_CONTENTS5 0xc34 +#define PKT_AUDI_CONTENTS6 0xc38 +#define PKT_AUDI_CONTENTS7 0xc3c +#define PKT_NVI_CONTENTS0 0xc40 +#define PKT_NVI_CONTENTS1 0xc44 +#define PKT_NVI_CONTENTS2 0xc48 +#define PKT_NVI_CONTENTS3 0xc4c +#define PKT_NVI_CONTENTS4 0xc50 +#define PKT_NVI_CONTENTS5 0xc54 +#define PKT_NVI_CONTENTS6 0xc58 +#define PKT_NVI_CONTENTS7 0xc5c +#define PKT_DRMI_CONTENTS0 0xc60 +#define PKT_DRMI_CONTENTS1 0xc64 +#define PKT_DRMI_CONTENTS2 0xc68 +#define PKT_DRMI_CONTENTS3 0xc6c +#define PKT_DRMI_CONTENTS4 0xc70 +#define PKT_DRMI_CONTENTS5 0xc74 +#define PKT_DRMI_CONTENTS6 0xc78 +#define PKT_DRMI_CONTENTS7 0xc7c +#define PKT_GHDMI1_CONTENTS0 0xc80 +#define PKT_GHDMI1_CONTENTS1 0xc84 +#define PKT_GHDMI1_CONTENTS2 0xc88 +#define PKT_GHDMI1_CONTENTS3 0xc8c +#define PKT_GHDMI1_CONTENTS4 0xc90 +#define PKT_GHDMI1_CONTENTS5 0xc94 +#define PKT_GHDMI1_CONTENTS6 0xc98 +#define PKT_GHDMI1_CONTENTS7 0xc9c +#define PKT_GHDMI2_CONTENTS0 0xca0 +#define PKT_GHDMI2_CONTENTS1 0xca4 +#define PKT_GHDMI2_CONTENTS2 0xca8 +#define PKT_GHDMI2_CONTENTS3 0xcac +#define PKT_GHDMI2_CONTENTS4 0xcb0 +#define PKT_GHDMI2_CONTENTS5 0xcb4 +#define PKT_GHDMI2_CONTENTS6 0xcb8 +#define PKT_GHDMI2_CONTENTS7 0xcbc +/* EMP Packetizer Registers */ +#define PKT_EMP_CONFIG0 0xce0 +#define PKT_EMP_CONTROL0 0xcec +#define PKT_EMP_CONTROL1 0xcf0 +#define PKT_EMP_CONTROL2 0xcf4 +#define PKT_EMP_VTEM_CONTENTS0 0xd00 +#define PKT_EMP_VTEM_CONTENTS1 0xd04 +#define PKT_EMP_VTEM_CONTENTS2 0xd08 +#define PKT_EMP_VTEM_CONTENTS3 0xd0c +#define PKT_EMP_VTEM_CONTENTS4 0xd10 +#define PKT_EMP_VTEM_CONTENTS5 0xd14 +#define PKT_EMP_VTEM_CONTENTS6 0xd18 +#define PKT_EMP_VTEM_CONTENTS7 0xd1c +#define PKT0_EMP_CVTEM_CONTENTS0 0xd20 +#define PKT0_EMP_CVTEM_CONTENTS1 0xd24 +#define PKT0_EMP_CVTEM_CONTENTS2 0xd28 +#define PKT0_EMP_CVTEM_CONTENTS3 0xd2c +#define PKT0_EMP_CVTEM_CONTENTS4 0xd30 +#define PKT0_EMP_CVTEM_CONTENTS5 0xd34 +#define PKT0_EMP_CVTEM_CONTENTS6 0xd38 +#define PKT0_EMP_CVTEM_CONTENTS7 0xd3c +#define PKT1_EMP_CVTEM_CONTENTS0 0xd40 +#define PKT1_EMP_CVTEM_CONTENTS1 0xd44 +#define PKT1_EMP_CVTEM_CONTENTS2 0xd48 +#define PKT1_EMP_CVTEM_CONTENTS3 0xd4c +#define PKT1_EMP_CVTEM_CONTENTS4 0xd50 +#define PKT1_EMP_CVTEM_CONTENTS5 0xd54 +#define PKT1_EMP_CVTEM_CONTENTS6 0xd58 +#define PKT1_EMP_CVTEM_CONTENTS7 0xd5c +#define PKT2_EMP_CVTEM_CONTENTS0 0xd60 +#define PKT2_EMP_CVTEM_CONTENTS1 0xd64 +#define PKT2_EMP_CVTEM_CONTENTS2 0xd68 +#define PKT2_EMP_CVTEM_CONTENTS3 0xd6c +#define PKT2_EMP_CVTEM_CONTENTS4 0xd70 +#define PKT2_EMP_CVTEM_CONTENTS5 0xd74 +#define PKT2_EMP_CVTEM_CONTENTS6 0xd78 +#define PKT2_EMP_CVTEM_CONTENTS7 0xd7c +#define PKT3_EMP_CVTEM_CONTENTS0 0xd80 +#define PKT3_EMP_CVTEM_CONTENTS1 0xd84 +#define PKT3_EMP_CVTEM_CONTENTS2 0xd88 +#define PKT3_EMP_CVTEM_CONTENTS3 0xd8c +#define PKT3_EMP_CVTEM_CONTENTS4 0xd90 +#define PKT3_EMP_CVTEM_CONTENTS5 0xd94 +#define PKT3_EMP_CVTEM_CONTENTS6 0xd98 +#define PKT3_EMP_CVTEM_CONTENTS7 0xd9c +#define PKT4_EMP_CVTEM_CONTENTS0 0xda0 +#define PKT4_EMP_CVTEM_CONTENTS1 0xda4 +#define PKT4_EMP_CVTEM_CONTENTS2 0xda8 +#define PKT4_EMP_CVTEM_CONTENTS3 0xdac +#define PKT4_EMP_CVTEM_CONTENTS4 0xdb0 +#define PKT4_EMP_CVTEM_CONTENTS5 0xdb4 +#define PKT4_EMP_CVTEM_CONTENTS6 0xdb8 +#define PKT4_EMP_CVTEM_CONTENTS7 0xdbc +#define PKT5_EMP_CVTEM_CONTENTS0 0xdc0 +#define PKT5_EMP_CVTEM_CONTENTS1 0xdc4 +#define PKT5_EMP_CVTEM_CONTENTS2 0xdc8 +#define PKT5_EMP_CVTEM_CONTENTS3 0xdcc +#define PKT5_EMP_CVTEM_CONTENTS4 0xdd0 +#define PKT5_EMP_CVTEM_CONTENTS5 0xdd4 +#define PKT5_EMP_CVTEM_CONTENTS6 0xdd8 +#define PKT5_EMP_CVTEM_CONTENTS7 0xddc +/* Audio Packetizer Registers */ +#define AUDPKT_CONTROL0 0xe20 +#define AUDPKT_PBIT_FORCE_EN_MASK BIT(12) +#define AUDPKT_PBIT_FORCE_EN BIT(12) +#define AUDPKT_CHSTATUS_OVR_EN_MASK BIT(0) +#define AUDPKT_CHSTATUS_OVR_EN BIT(0) +#define AUDPKT_CONTROL1 0xe24 +#define AUDPKT_ACR_CONTROL0 0xe40 +#define AUDPKT_ACR_N_VALUE 0xfffff +#define AUDPKT_ACR_CONTROL1 0xe44 +#define AUDPKT_ACR_CTS_OVR_VAL_MSK GENMASK(23, 4) +#define AUDPKT_ACR_CTS_OVR_VAL(x) ((x) << 4) +#define AUDPKT_ACR_CTS_OVR_EN_MSK BIT(1) +#define AUDPKT_ACR_CTS_OVR_EN BIT(1) +#define AUDPKT_ACR_STATUS0 0xe4c +#define AUDPKT_CHSTATUS_OVR0 0xe60 +#define AUDPKT_CHSTATUS_OVR1 0xe64 +/* IEC60958 Byte 3: Sampleing frenuency Bits 24 to 27 */ +#define AUDPKT_CHSTATUS_SR_MASK GENMASK(3, 0) +#define AUDPKT_CHSTATUS_SR_22050 0x4 +#define AUDPKT_CHSTATUS_SR_24000 0x6 +#define AUDPKT_CHSTATUS_SR_32000 0x3 +#define AUDPKT_CHSTATUS_SR_44100 0x0 +#define AUDPKT_CHSTATUS_SR_48000 0x2 +#define AUDPKT_CHSTATUS_SR_88200 0x8 +#define AUDPKT_CHSTATUS_SR_96000 0xa +#define AUDPKT_CHSTATUS_SR_176400 0xc +#define AUDPKT_CHSTATUS_SR_192000 0xe +#define AUDPKT_CHSTATUS_SR_768000 0x9 +#define AUDPKT_CHSTATUS_SR_NOT_INDICATED 0x1 +/* IEC60958 Byte 4: Original Sampleing frenuency Bits 36 to 39 */ +#define AUDPKT_CHSTATUS_0SR_MASK GENMASK(15, 12) +#define AUDPKT_CHSTATUS_OSR_8000 0x6 +#define AUDPKT_CHSTATUS_OSR_11025 0xa +#define AUDPKT_CHSTATUS_OSR_12000 0x2 +#define AUDPKT_CHSTATUS_OSR_16000 0x8 +#define AUDPKT_CHSTATUS_OSR_22050 0xb +#define AUDPKT_CHSTATUS_OSR_24000 0x9 +#define AUDPKT_CHSTATUS_OSR_32000 0xc +#define AUDPKT_CHSTATUS_OSR_44100 0xf +#define AUDPKT_CHSTATUS_OSR_48000 0xd +#define AUDPKT_CHSTATUS_OSR_88200 0x7 +#define AUDPKT_CHSTATUS_OSR_96000 0x5 +#define AUDPKT_CHSTATUS_OSR_176400 0x3 +#define AUDPKT_CHSTATUS_OSR_192000 0x1 +#define AUDPKT_CHSTATUS_OSR_NOT_INDICATED 0x0 +#define AUDPKT_CHSTATUS_OVR2 0xe68 +#define AUDPKT_CHSTATUS_OVR3 0xe6c +#define AUDPKT_CHSTATUS_OVR4 0xe70 +#define AUDPKT_CHSTATUS_OVR5 0xe74 +#define AUDPKT_CHSTATUS_OVR6 0xe78 +#define AUDPKT_CHSTATUS_OVR7 0xe7c +#define AUDPKT_CHSTATUS_OVR8 0xe80 +#define AUDPKT_CHSTATUS_OVR9 0xe84 +#define AUDPKT_CHSTATUS_OVR10 0xe88 +#define AUDPKT_CHSTATUS_OVR11 0xe8c +#define AUDPKT_CHSTATUS_OVR12 0xe90 +#define AUDPKT_CHSTATUS_OVR13 0xe94 +#define AUDPKT_CHSTATUS_OVR14 0xe98 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC0 0xea0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC1 0xea4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC2 0xea8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC3 0xeac +#define AUDPKT_USRDATA_OVR_MSG_GENERIC4 0xeb0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC5 0xeb4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC6 0xeb8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC7 0xebc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC8 0xec0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC9 0xec4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC10 0xec8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC11 0xecc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC12 0xed0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC13 0xed4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC14 0xed8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC15 0xedc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC16 0xee0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC17 0xee4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC18 0xee8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC19 0xeec +#define AUDPKT_USRDATA_OVR_MSG_GENERIC20 0xef0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC21 0xef4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC22 0xef8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC23 0xefc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC24 0xf00 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC25 0xf04 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC26 0xf08 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC27 0xf0c +#define AUDPKT_USRDATA_OVR_MSG_GENERIC28 0xf10 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC29 0xf14 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC30 0xf18 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC31 0xf1c +#define AUDPKT_USRDATA_OVR_MSG_GENERIC32 0xf20 +#define AUDPKT_VBIT_OVR0 0xf24 +/* CEC Registers */ +#define CEC_TX_CONTROL 0x1000 +#define CEC_STATUS 0x1004 +#define CEC_CONFIG 0x1008 +#define CEC_ADDR 0x100c +#define CEC_TX_COUNT 0x1020 +#define CEC_TX_DATA3_0 0x1024 +#define CEC_TX_DATA7_4 0x1028 +#define CEC_TX_DATA11_8 0x102c +#define CEC_TX_DATA15_12 0x1030 +#define CEC_RX_COUNT_STATUS 0x1040 +#define CEC_RX_DATA3_0 0x1044 +#define CEC_RX_DATA7_4 0x1048 +#define CEC_RX_DATA11_8 0x104c +#define CEC_RX_DATA15_12 0x1050 +#define CEC_LOCK_CONTROL 0x1054 +#define CEC_RXQUAL_BITTIME_CONFIG 0x1060 +#define CEC_RX_BITTIME_CONFIG 0x1064 +#define CEC_TX_BITTIME_CONFIG 0x1068 +/* eARC RX CMDC Registers */ +#define EARCRX_CMDC_CONFIG0 0x1800 +#define EARCRX_XACTREAD_STOP_CFG BIT(26) +#define EARCRX_XACTREAD_RETRY_CFG BIT(25) +#define EARCRX_CMDC_DSCVR_EARCVALID0_TO_DISC1 BIT(24) +#define EARCRX_CMDC_XACT_RESTART_EN BIT(18) +#define EARCRX_CMDC_CONFIG1 0x1804 +#define EARCRX_CMDC_CONTROL 0x1808 +#define EARCRX_CMDC_HEARTBEAT_LOSS_EN BIT(4) +#define EARCRX_CMDC_DISCOVERY_EN BIT(3) +#define EARCRX_CONNECTOR_HPD BIT(1) +#define EARCRX_CMDC_WHITELIST0_CONFIG 0x180c +#define EARCRX_CMDC_WHITELIST1_CONFIG 0x1810 +#define EARCRX_CMDC_WHITELIST2_CONFIG 0x1814 +#define EARCRX_CMDC_WHITELIST3_CONFIG 0x1818 +#define EARCRX_CMDC_STATUS 0x181c +#define EARCRX_CMDC_XACT_INFO 0x1820 +#define EARCRX_CMDC_XACT_ACTION 0x1824 +#define EARCRX_CMDC_HEARTBEAT_RXSTAT_SE 0x1828 +#define EARCRX_CMDC_HEARTBEAT_STATUS 0x182c +#define EARCRX_CMDC_XACT_WR0 0x1840 +#define EARCRX_CMDC_XACT_WR1 0x1844 +#define EARCRX_CMDC_XACT_WR2 0x1848 +#define EARCRX_CMDC_XACT_WR3 0x184c +#define EARCRX_CMDC_XACT_WR4 0x1850 +#define EARCRX_CMDC_XACT_WR5 0x1854 +#define EARCRX_CMDC_XACT_WR6 0x1858 +#define EARCRX_CMDC_XACT_WR7 0x185c +#define EARCRX_CMDC_XACT_WR8 0x1860 +#define EARCRX_CMDC_XACT_WR9 0x1864 +#define EARCRX_CMDC_XACT_WR10 0x1868 +#define EARCRX_CMDC_XACT_WR11 0x186c +#define EARCRX_CMDC_XACT_WR12 0x1870 +#define EARCRX_CMDC_XACT_WR13 0x1874 +#define EARCRX_CMDC_XACT_WR14 0x1878 +#define EARCRX_CMDC_XACT_WR15 0x187c +#define EARCRX_CMDC_XACT_WR16 0x1880 +#define EARCRX_CMDC_XACT_WR17 0x1884 +#define EARCRX_CMDC_XACT_WR18 0x1888 +#define EARCRX_CMDC_XACT_WR19 0x188c +#define EARCRX_CMDC_XACT_WR20 0x1890 +#define EARCRX_CMDC_XACT_WR21 0x1894 +#define EARCRX_CMDC_XACT_WR22 0x1898 +#define EARCRX_CMDC_XACT_WR23 0x189c +#define EARCRX_CMDC_XACT_WR24 0x18a0 +#define EARCRX_CMDC_XACT_WR25 0x18a4 +#define EARCRX_CMDC_XACT_WR26 0x18a8 +#define EARCRX_CMDC_XACT_WR27 0x18ac +#define EARCRX_CMDC_XACT_WR28 0x18b0 +#define EARCRX_CMDC_XACT_WR29 0x18b4 +#define EARCRX_CMDC_XACT_WR30 0x18b8 +#define EARCRX_CMDC_XACT_WR31 0x18bc +#define EARCRX_CMDC_XACT_WR32 0x18c0 +#define EARCRX_CMDC_XACT_WR33 0x18c4 +#define EARCRX_CMDC_XACT_WR34 0x18c8 +#define EARCRX_CMDC_XACT_WR35 0x18cc +#define EARCRX_CMDC_XACT_WR36 0x18d0 +#define EARCRX_CMDC_XACT_WR37 0x18d4 +#define EARCRX_CMDC_XACT_WR38 0x18d8 +#define EARCRX_CMDC_XACT_WR39 0x18dc +#define EARCRX_CMDC_XACT_WR40 0x18e0 +#define EARCRX_CMDC_XACT_WR41 0x18e4 +#define EARCRX_CMDC_XACT_WR42 0x18e8 +#define EARCRX_CMDC_XACT_WR43 0x18ec +#define EARCRX_CMDC_XACT_WR44 0x18f0 +#define EARCRX_CMDC_XACT_WR45 0x18f4 +#define EARCRX_CMDC_XACT_WR46 0x18f8 +#define EARCRX_CMDC_XACT_WR47 0x18fc +#define EARCRX_CMDC_XACT_WR48 0x1900 +#define EARCRX_CMDC_XACT_WR49 0x1904 +#define EARCRX_CMDC_XACT_WR50 0x1908 +#define EARCRX_CMDC_XACT_WR51 0x190c +#define EARCRX_CMDC_XACT_WR52 0x1910 +#define EARCRX_CMDC_XACT_WR53 0x1914 +#define EARCRX_CMDC_XACT_WR54 0x1918 +#define EARCRX_CMDC_XACT_WR55 0x191c +#define EARCRX_CMDC_XACT_WR56 0x1920 +#define EARCRX_CMDC_XACT_WR57 0x1924 +#define EARCRX_CMDC_XACT_WR58 0x1928 +#define EARCRX_CMDC_XACT_WR59 0x192c +#define EARCRX_CMDC_XACT_WR60 0x1930 +#define EARCRX_CMDC_XACT_WR61 0x1934 +#define EARCRX_CMDC_XACT_WR62 0x1938 +#define EARCRX_CMDC_XACT_WR63 0x193c +#define EARCRX_CMDC_XACT_WR64 0x1940 +#define EARCRX_CMDC_XACT_RD0 0x1960 +#define EARCRX_CMDC_XACT_RD1 0x1964 +#define EARCRX_CMDC_XACT_RD2 0x1968 +#define EARCRX_CMDC_XACT_RD3 0x196c +#define EARCRX_CMDC_XACT_RD4 0x1970 +#define EARCRX_CMDC_XACT_RD5 0x1974 +#define EARCRX_CMDC_XACT_RD6 0x1978 +#define EARCRX_CMDC_XACT_RD7 0x197c +#define EARCRX_CMDC_XACT_RD8 0x1980 +#define EARCRX_CMDC_XACT_RD9 0x1984 +#define EARCRX_CMDC_XACT_RD10 0x1988 +#define EARCRX_CMDC_XACT_RD11 0x198c +#define EARCRX_CMDC_XACT_RD12 0x1990 +#define EARCRX_CMDC_XACT_RD13 0x1994 +#define EARCRX_CMDC_XACT_RD14 0x1998 +#define EARCRX_CMDC_XACT_RD15 0x199c +#define EARCRX_CMDC_XACT_RD16 0x19a0 +#define EARCRX_CMDC_XACT_RD17 0x19a4 +#define EARCRX_CMDC_XACT_RD18 0x19a8 +#define EARCRX_CMDC_XACT_RD19 0x19ac +#define EARCRX_CMDC_XACT_RD20 0x19b0 +#define EARCRX_CMDC_XACT_RD21 0x19b4 +#define EARCRX_CMDC_XACT_RD22 0x19b8 +#define EARCRX_CMDC_XACT_RD23 0x19bc +#define EARCRX_CMDC_XACT_RD24 0x19c0 +#define EARCRX_CMDC_XACT_RD25 0x19c4 +#define EARCRX_CMDC_XACT_RD26 0x19c8 +#define EARCRX_CMDC_XACT_RD27 0x19cc +#define EARCRX_CMDC_XACT_RD28 0x19d0 +#define EARCRX_CMDC_XACT_RD29 0x19d4 +#define EARCRX_CMDC_XACT_RD30 0x19d8 +#define EARCRX_CMDC_XACT_RD31 0x19dc +#define EARCRX_CMDC_XACT_RD32 0x19e0 +#define EARCRX_CMDC_XACT_RD33 0x19e4 +#define EARCRX_CMDC_XACT_RD34 0x19e8 +#define EARCRX_CMDC_XACT_RD35 0x19ec +#define EARCRX_CMDC_XACT_RD36 0x19f0 +#define EARCRX_CMDC_XACT_RD37 0x19f4 +#define EARCRX_CMDC_XACT_RD38 0x19f8 +#define EARCRX_CMDC_XACT_RD39 0x19fc +#define EARCRX_CMDC_XACT_RD40 0x1a00 +#define EARCRX_CMDC_XACT_RD41 0x1a04 +#define EARCRX_CMDC_XACT_RD42 0x1a08 +#define EARCRX_CMDC_XACT_RD43 0x1a0c +#define EARCRX_CMDC_XACT_RD44 0x1a10 +#define EARCRX_CMDC_XACT_RD45 0x1a14 +#define EARCRX_CMDC_XACT_RD46 0x1a18 +#define EARCRX_CMDC_XACT_RD47 0x1a1c +#define EARCRX_CMDC_XACT_RD48 0x1a20 +#define EARCRX_CMDC_XACT_RD49 0x1a24 +#define EARCRX_CMDC_XACT_RD50 0x1a28 +#define EARCRX_CMDC_XACT_RD51 0x1a2c +#define EARCRX_CMDC_XACT_RD52 0x1a30 +#define EARCRX_CMDC_XACT_RD53 0x1a34 +#define EARCRX_CMDC_XACT_RD54 0x1a38 +#define EARCRX_CMDC_XACT_RD55 0x1a3c +#define EARCRX_CMDC_XACT_RD56 0x1a40 +#define EARCRX_CMDC_XACT_RD57 0x1a44 +#define EARCRX_CMDC_XACT_RD58 0x1a48 +#define EARCRX_CMDC_XACT_RD59 0x1a4c +#define EARCRX_CMDC_XACT_RD60 0x1a50 +#define EARCRX_CMDC_XACT_RD61 0x1a54 +#define EARCRX_CMDC_XACT_RD62 0x1a58 +#define EARCRX_CMDC_XACT_RD63 0x1a5c +#define EARCRX_CMDC_XACT_RD64 0x1a60 +#define EARCRX_CMDC_SYNC_CONFIG 0x1b00 +/* eARC RX DMAC Registers */ +#define EARCRX_DMAC_PHY_CONTROL 0x1c00 +#define EARCRX_DMAC_CONFIG 0x1c08 +#define EARCRX_DMAC_CONTROL0 0x1c0c +#define EARCRX_DMAC_AUDIO_EN BIT(1) +#define EARCRX_DMAC_EN BIT(0) +#define EARCRX_DMAC_CONTROL1 0x1c10 +#define EARCRX_DMAC_STATUS 0x1c14 +#define EARCRX_DMAC_CHSTATUS0 0x1c18 +#define EARCRX_DMAC_CHSTATUS1 0x1c1c +#define EARCRX_DMAC_CHSTATUS2 0x1c20 +#define EARCRX_DMAC_CHSTATUS3 0x1c24 +#define EARCRX_DMAC_CHSTATUS4 0x1c28 +#define EARCRX_DMAC_CHSTATUS5 0x1c2c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC0 0x1c30 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC1 0x1c34 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC2 0x1c38 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC3 0x1c3c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC4 0x1c40 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC5 0x1c44 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC6 0x1c48 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC7 0x1c4c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC8 0x1c50 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC9 0x1c54 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC10 0x1c58 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC11 0x1c5c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT0 0x1c60 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT1 0x1c64 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT2 0x1c68 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT3 0x1c6c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT4 0x1c70 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT5 0x1c74 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT6 0x1c78 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT7 0x1c7c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT8 0x1c80 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT9 0x1c84 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT10 0x1c88 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT11 0x1c8c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT0 0x1c90 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT1 0x1c94 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT2 0x1c98 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT3 0x1c9c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT4 0x1ca0 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT5 0x1ca4 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT6 0x1ca8 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT7 0x1cac +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT8 0x1cb0 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT9 0x1cb4 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT10 0x1cb8 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT11 0x1cbc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC0 0x1cc0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC1 0x1cc4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC2 0x1cc8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC3 0x1ccc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC4 0x1cd0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC5 0x1cd4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC6 0x1cd8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC7 0x1cdc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC8 0x1ce0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC9 0x1ce4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC10 0x1ce8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC11 0x1cec +#define EARCRX_DMAC_USRDATA_MSG_GENERIC12 0x1cf0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC13 0x1cf4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC14 0x1cf8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC15 0x1cfc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC16 0x1d00 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC17 0x1d04 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC18 0x1d08 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC19 0x1d0c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC20 0x1d10 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC21 0x1d14 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC22 0x1d18 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC23 0x1d1c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC24 0x1d20 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC25 0x1d24 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC26 0x1d28 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC27 0x1d2c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC28 0x1d30 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC29 0x1d34 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC30 0x1d38 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC31 0x1d3c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC32 0x1d40 +#define EARCRX_DMAC_CHSTATUS_STREAMER0 0x1d44 +#define EARCRX_DMAC_CHSTATUS_STREAMER1 0x1d48 +#define EARCRX_DMAC_CHSTATUS_STREAMER2 0x1d4c +#define EARCRX_DMAC_CHSTATUS_STREAMER3 0x1d50 +#define EARCRX_DMAC_CHSTATUS_STREAMER4 0x1d54 +#define EARCRX_DMAC_CHSTATUS_STREAMER5 0x1d58 +#define EARCRX_DMAC_CHSTATUS_STREAMER6 0x1d5c +#define EARCRX_DMAC_CHSTATUS_STREAMER7 0x1d60 +#define EARCRX_DMAC_CHSTATUS_STREAMER8 0x1d64 +#define EARCRX_DMAC_CHSTATUS_STREAMER9 0x1d68 +#define EARCRX_DMAC_CHSTATUS_STREAMER10 0x1d6c +#define EARCRX_DMAC_CHSTATUS_STREAMER11 0x1d70 +#define EARCRX_DMAC_CHSTATUS_STREAMER12 0x1d74 +#define EARCRX_DMAC_CHSTATUS_STREAMER13 0x1d78 +#define EARCRX_DMAC_CHSTATUS_STREAMER14 0x1d7c +#define EARCRX_DMAC_USRDATA_STREAMER0 0x1d80 +/* Main Unit Interrupt Registers */ +#define MAIN_INTVEC_INDEX 0x3000 +#define MAINUNIT_0_INT_STATUS 0x3010 +#define MAINUNIT_0_INT_MASK_N 0x3014 +#define MAINUNIT_0_INT_CLEAR 0x3018 +#define MAINUNIT_0_INT_FORCE 0x301c +#define MAINUNIT_1_INT_STATUS 0x3020 +#define FLT_EXIT_TO_LTSL_IRQ BIT(22) +#define FLT_EXIT_TO_LTS4_IRQ BIT(21) +#define FLT_EXIT_TO_LTSP_IRQ BIT(20) +#define SCDC_NACK_RCVD_IRQ BIT(12) +#define SCDC_RR_REPLY_STOP_IRQ BIT(11) +#define SCDC_UPD_FLAGS_CLR_IRQ BIT(10) +#define SCDC_UPD_FLAGS_CHG_IRQ BIT(9) +#define SCDC_UPD_FLAGS_RD_IRQ BIT(8) +#define I2CM_NACK_RCVD_IRQ BIT(2) +#define I2CM_READ_REQUEST_IRQ BIT(1) +#define I2CM_OP_DONE_IRQ BIT(0) +#define MAINUNIT_1_INT_MASK_N 0x3024 +#define I2CM_NACK_RCVD_MASK_N BIT(2) +#define I2CM_READ_REQUEST_MASK_N BIT(1) +#define I2CM_OP_DONE_MASK_N BIT(0) +#define MAINUNIT_1_INT_CLEAR 0x3028 +#define I2CM_NACK_RCVD_CLEAR BIT(2) +#define I2CM_READ_REQUEST_CLEAR BIT(1) +#define I2CM_OP_DONE_CLEAR BIT(0) +#define MAINUNIT_1_INT_FORCE 0x302c +/* AVPUNIT Interrupt Registers */ +#define AVP_INTVEC_INDEX 0x3800 +#define AVP_0_INT_STATUS 0x3810 +#define AVP_0_INT_MASK_N 0x3814 +#define AVP_0_INT_CLEAR 0x3818 +#define AVP_0_INT_FORCE 0x381c +#define AVP_1_INT_STATUS 0x3820 +#define AVP_1_INT_MASK_N 0x3824 +#define HDCP14_AUTH_CHG_MASK_N BIT(6) +#define AVP_1_INT_CLEAR 0x3828 +#define AVP_1_INT_FORCE 0x382c +#define AVP_2_INT_STATUS 0x3830 +#define AVP_2_INT_MASK_N 0x3834 +#define AVP_2_INT_CLEAR 0x3838 +#define AVP_2_INT_FORCE 0x383c +#define AVP_3_INT_STATUS 0x3840 +#define AVP_3_INT_MASK_N 0x3844 +#define AVP_3_INT_CLEAR 0x3848 +#define AVP_3_INT_FORCE 0x384c +#define AVP_4_INT_STATUS 0x3850 +#define AVP_4_INT_MASK_N 0x3854 +#define AVP_4_INT_CLEAR 0x3858 +#define AVP_4_INT_FORCE 0x385c +#define AVP_5_INT_STATUS 0x3860 +#define AVP_5_INT_MASK_N 0x3864 +#define AVP_5_INT_CLEAR 0x3868 +#define AVP_5_INT_FORCE 0x386c +#define AVP_6_INT_STATUS 0x3870 +#define AVP_6_INT_MASK_N 0x3874 +#define AVP_6_INT_CLEAR 0x3878 +#define AVP_6_INT_FORCE 0x387c +/* CEC Interrupt Registers */ +#define CEC_INT_STATUS 0x4000 +#define CEC_INT_MASK_N 0x4004 +#define CEC_INT_CLEAR 0x4008 +#define CEC_INT_FORCE 0x400c +/* eARC RX Interrupt Registers */ +#define EARCRX_INTVEC_INDEX 0x4800 +#define EARCRX_0_INT_STATUS 0x4810 +#define EARCRX_CMDC_DISCOVERY_TIMEOUT_IRQ BIT(9) +#define EARCRX_CMDC_DISCOVERY_DONE_IRQ BIT(8) +#define EARCRX_0_INT_MASK_N 0x4814 +#define EARCRX_0_INT_CLEAR 0x4818 +#define EARCRX_0_INT_FORCE 0x481c +#define EARCRX_1_INT_STATUS 0x4820 +#define EARCRX_1_INT_MASK_N 0x4824 +#define EARCRX_1_INT_CLEAR 0x4828 +#define EARCRX_1_INT_FORCE 0x482c + +#endif /* __DW_HDMI_QP_H__ */ diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0031f3c54882..996733ed2c00 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3503,6 +3503,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.of_node = pdev->dev.of_node; hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + if (hdmi->version >= 0x200a) + hdmi->bridge.ycbcr_420_allowed = plat_data->ycbcr_420_allowed; + memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = dev; pdevinfo.id = PLATFORM_DEVID_AUTO; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index f3afdab55c11..7275e66faefc 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1707,13 +1707,20 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge, { struct tc_data *tc = bridge_to_tc(bridge); - drm_mode_copy(&tc->mode, mode); + drm_mode_copy(&tc->mode, adj); } static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); + int ret; + + ret = tc_get_display_props(tc); + if (ret < 0) { + dev_err(tc->dev, "failed to read display props: %d\n", ret); + return 0; + } return drm_edid_read_ddc(connector, &tc->aux.ddc); } @@ -2169,19 +2176,31 @@ static const struct regmap_access_table tc_precious_table = { .n_yes_ranges = ARRAY_SIZE(tc_precious_ranges), }; -static const struct regmap_range tc_non_writeable_ranges[] = { - regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI), - regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI), - regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS), - regmap_reg_range(TC_IDREG, SYSSTAT), - regmap_reg_range(GPIOI, GPIOI), - regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ), -}; - -static const struct regmap_access_table tc_writeable_table = { - .no_ranges = tc_non_writeable_ranges, - .n_no_ranges = ARRAY_SIZE(tc_non_writeable_ranges), -}; +static bool tc_writeable_reg(struct device *dev, unsigned int reg) +{ + /* RO reg */ + switch (reg) { + case PPI_BUSYPPI: + case DSI_BUSYDSI: + case DSI_LANESTATUS0: + case DSI_LANESTATUS1: + case DSI_INTSTATUS: + case TC_IDREG: + case SYSBOOT: + case SYSSTAT: + case GPIOI: + case DP0_LTSTAT: + case DP0_SNKLTCHGREQ: + return false; + } + /* WO reg */ + switch (reg) { + case DSI_STARTDSI: + case DSI_INTCLR: + return true; + } + return tc_readable_reg(dev, reg); +} static const struct regmap_config tc_regmap_config = { .name = "tc358767", @@ -2191,9 +2210,9 @@ static const struct regmap_config tc_regmap_config = { .max_register = PLL_DBG, .cache_type = REGCACHE_MAPLE, .readable_reg = tc_readable_reg, + .writeable_reg = tc_writeable_reg, .volatile_table = &tc_volatile_table, .precious_table = &tc_precious_table, - .wr_table = &tc_writeable_table, .reg_format_endian = REGMAP_ENDIAN_BIG, .val_format_endian = REGMAP_ENDIAN_LITTLE, }; @@ -2229,11 +2248,11 @@ static irqreturn_t tc_irq_handler(int irq, void *arg) bool h = val & INT_GPIO_H(tc->hpd_pin); bool lc = val & INT_GPIO_LC(tc->hpd_pin); - dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin, - h ? "H" : "", lc ? "LC" : ""); - - if (h || lc) + if (h || lc) { + dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin, + h ? "H" : "", lc ? "LC" : ""); drm_kms_helper_hotplug_event(tc->bridge.dev); + } } regmap_write(tc->regmap, INTSTS_G, val); @@ -2298,7 +2317,8 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc) /* port@1 is the DPI input/output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge); if (ret && ret != -ENODEV) - return ret; + return dev_err_probe(dev, ret, + "Could not find DPI panel or bridge\n"); if (panel) { bridge = devm_drm_panel_bridge_add(dev, panel); @@ -2326,7 +2346,8 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc) /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL); if (ret && ret != -ENODEV) - return ret; + return dev_err_probe(dev, ret, + "Could not find DSI panel or bridge\n"); if (panel) { struct drm_bridge *panel_bridge; @@ -2551,7 +2572,7 @@ static int tc_probe(struct i2c_client *client) ret = tc_mipi_dsi_host_attach(tc); if (ret) { drm_bridge_remove(&tc->bridge); - return ret; + return dev_err_probe(dev, ret, "Failed to attach DSI host\n"); } } diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index bb1750a3dab0..2cb748bbefcd 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -461,7 +461,9 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host, ret = -EINVAL; ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0); if (ep) { - ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); + ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines); + if (ret) + ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); of_node_put(ep); } diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index 6b559e071301..a0a1b5dd794e 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -94,7 +94,7 @@ static const struct regmap_access_table dlpc_volatile_table = { .n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges), }; -static struct regmap_config dlpc_regmap_config = { +static const struct regmap_config dlpc_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = WR_DSI_PORT_EN, diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 582cf4f73a74..9e31f750fd88 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1635,8 +1635,8 @@ static void ti_sn_pwm_unregister(void) } #else -static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } -static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} +static inline int __maybe_unused ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } +static inline void __maybe_unused ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} static inline int ti_sn_pwm_register(void) { return 0; } static inline void ti_sn_pwm_unregister(void) {} diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c new file mode 100644 index 000000000000..3472ed5924e8 --- /dev/null +++ b/drivers/gpu/drm/bridge/ti-tdp158.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2024 Freebox SAS + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> + +struct tdp158 { + struct drm_bridge bridge; + struct drm_bridge *next; + struct gpio_desc *enable; // Operation Enable - pin 36 + struct regulator *vcc; // 3.3V + struct regulator *vdd; // 1.1V + struct device *dev; +}; + +static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev) +{ + int err; + struct tdp158 *tdp158 = bridge->driver_private; + + err = regulator_enable(tdp158->vcc); + if (err) + dev_err(tdp158->dev, "failed to enable vcc: %d", err); + + err = regulator_enable(tdp158->vdd); + if (err) + dev_err(tdp158->dev, "failed to enable vdd: %d", err); + + gpiod_set_value_cansleep(tdp158->enable, 1); +} + +static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev) +{ + struct tdp158 *tdp158 = bridge->driver_private; + + gpiod_set_value_cansleep(tdp158->enable, 0); + regulator_disable(tdp158->vdd); + regulator_disable(tdp158->vcc); +} + +static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +{ + struct tdp158 *tdp158 = bridge->driver_private; + + return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags); +} + +static const struct drm_bridge_funcs tdp158_bridge_funcs = { + .attach = tdp158_attach, + .atomic_enable = tdp158_enable, + .atomic_disable = tdp158_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static int tdp158_probe(struct i2c_client *client) +{ + struct tdp158 *tdp158; + struct device *dev = &client->dev; + + tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL); + if (!tdp158) + return -ENOMEM; + + tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(tdp158->next)) + return dev_err_probe(dev, PTR_ERR(tdp158->next), "missing bridge"); + + tdp158->vcc = devm_regulator_get(dev, "vcc"); + if (IS_ERR(tdp158->vcc)) + return dev_err_probe(dev, PTR_ERR(tdp158->vcc), "vcc"); + + tdp158->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(tdp158->vdd)) + return dev_err_probe(dev, PTR_ERR(tdp158->vdd), "vdd"); + + tdp158->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(tdp158->enable)) + return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable"); + + tdp158->bridge.of_node = dev->of_node; + tdp158->bridge.funcs = &tdp158_bridge_funcs; + tdp158->bridge.driver_private = tdp158; + tdp158->dev = dev; + + return devm_drm_bridge_add(dev, &tdp158->bridge); +} + +static const struct of_device_id tdp158_match_table[] = { + { .compatible = "ti,tdp158" }, + { } +}; +MODULE_DEVICE_TABLE(of, tdp158_match_table); + +static struct i2c_driver tdp158_driver = { + .probe = tdp158_probe, + .driver = { + .name = "tdp158", + .of_match_table = tdp158_match_table, + }, +}; +module_i2c_driver(tdp158_driver); + +MODULE_DESCRIPTION("TI TDP158 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index 66e70ced796f..a8fca079921b 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -90,7 +90,12 @@ CONFIG_QCOM_GPI_DMA=y CONFIG_USB_ONBOARD_DEV=y CONFIG_NVMEM_QCOM_QFPROM=y CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y - +CONFIG_REGULATOR_QCOM_REFGEN=y +CONFIG_TYPEC_MUX_FSA4480=y +CONFIG_QCOM_PMIC_GLINK=y +CONFIG_UCSI_PMIC_GLINK=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y # db410c ethernet CONFIG_USB_RTL8152=y diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh index 5a3bdcffae32..139b81db6312 100644 --- a/drivers/gpu/drm/ci/build.sh +++ b/drivers/gpu/drm/ci/build.sh @@ -30,6 +30,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb" + DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb" elif [[ "$KERNEL_ARCH" = "arm" ]]; then GCC_ARCH="arm-linux-gnueabihf" DEBIAN_ARCH="armhf" diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index eca47d4f816f..90bde9f00cc3 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -1,14 +1,14 @@ variables: DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa - DRM_CI_COMMIT_SHA: &drm-ci-commit-sha d9849ac46623797a9f56fb9d46dc52460ac477de + DRM_CI_COMMIT_SHA: &drm-ci-commit-sha c6a9a9c3bce90923f7700219354e0b6e5a3c9ba6 UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git TARGET_BRANCH: drm-next - IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5 + IGT_VERSION: a73311079a5d8ac99eb25336a8369a2c3c6b519b DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git - DEQP_RUNNER_GIT_TAG: v0.15.0 + DEQP_RUNNER_GIT_TAG: v0.20.0 FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb @@ -153,6 +153,14 @@ stages: # Pre-merge pipeline for Marge Bot - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"' when: on_success + # Push to a branch on a fork + - &is-fork-push '$CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"' + +# Rules applied to every job in the pipeline +.common-rules: + rules: + - if: *is-fork-push + when: manual .never-post-merge-rules: rules: diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml index 2c340d063a96..8d8b9e71852e 100644 --- a/drivers/gpu/drm/ci/image-tags.yml +++ b/drivers/gpu/drm/ci/image-tags.yml @@ -1,5 +1,5 @@ variables: - CONTAINER_TAG: "2024-08-07-mesa-uprev" + CONTAINER_TAG: "2024-09-09-uprevs" DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base" DEBIAN_BASE_TAG: "${CONTAINER_TAG}" diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 09d8447840e9..f0ef60c8f56d 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -162,6 +162,22 @@ msm:sdm845: script: - ./install/bare-metal/cros-servo.sh +msm:sm8350-hdk: + extends: + - .lava-igt:arm64 + stage: msm + parallel: 4 + variables: + BOOT_METHOD: fastboot + DEVICE_TYPE: sm8350-hdk + DRIVER_NAME: msm + DTB: ${DEVICE_TYPE} + FARM: collabora + GPU_VERSION: ${DEVICE_TYPE} + KERNEL_IMAGE_NAME: "Image.gz" + KERNEL_IMAGE_TYPE: "" + RUNNER_TAG: mesa-ci-x86-64-lava-sm8350-hdk + .rockchip-device: variables: DTB: ${DEVICE_TYPE} @@ -286,6 +302,15 @@ i915:tgl: GPU_VERSION: tgl RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer +i915:jsl: + extends: + - .i915 + parallel: 4 + variables: + DEVICE_TYPE: acer-cb317-1h-c3z6-dedede + GPU_VERSION: jsl + RUNNER_TAG: mesa-ci-x86-64-lava-acer-cb317-1h-c3z6-dedede + .amdgpu: extends: - .lava-igt:x86_64 diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt index 8e2fed6d76a3..f44dbce3151a 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt @@ -2,6 +2,7 @@ amdgpu/amd_abm@abm_enabled,Fail amdgpu/amd_abm@abm_gradual,Fail amdgpu/amd_abm@backlight_monotonic_abm,Fail amdgpu/amd_abm@backlight_monotonic_basic,Fail +amdgpu/amd_abm@dpms_cycle,Fail amdgpu/amd_assr@assr-links,Fail amdgpu/amd_assr@assr-links-dpms,Fail amdgpu/amd_mall@static-screen,Crash @@ -14,7 +15,6 @@ amdgpu/amd_plane@mpo-scale-p010,Fail amdgpu/amd_plane@mpo-scale-rgb,Crash amdgpu/amd_plane@mpo-swizzle-toggle,Fail amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail -dumb_buffer@invalid-bpp,Fail kms_addfb_basic@bad-pitch-65536,Fail kms_addfb_basic@bo-too-small,Fail kms_addfb_basic@too-high,Fail diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt index e4faa96fa000..e70bd9d447ca 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt @@ -18,3 +18,10 @@ kms_async_flips@crc # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 kms_plane@pixel-format-source-clamping + +# Board Name: hp-11A-G6-EE-grunt +# Bug Report: https://lore.kernel.org/amd-gfx/09ee1862-3a0e-4085-ac1b-262601b1122b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_async_flips@async-flip-with-page-flip-events diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt index 9b84f68a5122..0907cb0f6d9e 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt @@ -1,3 +1,4 @@ +core_setmaster@master-drop-set-shared-fd,Fail core_setmaster@master-drop-set-user,Fail core_setmaster_vs_auth,Fail i915_module_load@load,Fail @@ -6,7 +7,6 @@ i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail -kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_fb_coherency@memset-crc,Crash kms_flip@busy-flip,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt index 581f0da4d0f2..0207c9807bee 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt @@ -46,3 +46,10 @@ i915_hangman@engine-engine-hang # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_pm_rpm@modeset-lpsp-stress + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_pm_rpm@drm-resources-equal diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt index e612281149aa..64772fedaed5 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt @@ -8,7 +8,6 @@ kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail -kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt index 4663d4d13f35..e8bddda56737 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt @@ -4,3 +4,10 @@ # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 kms_fb_coherency@memset-crc + +# Board Name: asus-C523NA-A20057-coral +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_universal_plane@cursor-fb-leak diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt index 2723e2832797..f352b719cf7d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt @@ -1,5 +1,5 @@ +core_setmaster@master-drop-set-shared-fd,Fail core_setmaster@master-drop-set-user,Fail -core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -9,10 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout +i915_pm_rps@engine-order,Fail +kms_big_fb@linear-16bpp-rotate-180,Timeout kms_fb_coherency@memset-crc,Crash kms_flip@busy-flip,Timeout -kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail @@ -40,14 +40,11 @@ kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout -kms_prop_blob@invalid-set-prop,Fail kms_psr2_sf@cursor-plane-update-sf,Fail -kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout kms_psr2_sf@overlay-plane-update-continuous-sf,Fail kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail @@ -55,7 +52,6 @@ kms_psr2_sf@plane-move-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail kms_psr2_su@page_flip-NV12,Fail -kms_psr2_su@page_flip-P010,Fail kms_rotation_crc@primary-rotation-180,Timeout kms_setmode@basic,Fail kms_vblank@query-forked-hang,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt index 58a6001abb28..d8401251e5f4 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt @@ -11,3 +11,17 @@ kms_plane_alpha_blend@constant-alpha-min # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_atomic_transition@plane-all-modeset-transition-internal-panels + +# Board Name: asus-C436FA-Flip-hatch +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_plane_alpha_blend@constant-alpha-min + +# Board Name: asus-C436FA-Flip-hatch +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_async_flips@crc diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt index 4821c9adefd1..6eb64c672f7d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt @@ -63,3 +63,4 @@ xe_module_load@load,Fail xe_module_load@many-reload,Fail xe_module_load@reload,Fail xe_module_load@reload-no-display,Fail +core_setmaster@master-drop-set-shared-fd,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt new file mode 100644 index 000000000000..ed9f7b576843 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt @@ -0,0 +1,51 @@ +core_setmaster@master-drop-set-user,Fail +i915_module_load@load,Fail +i915_module_load@reload,Fail +i915_module_load@reload-no-display,Fail +i915_module_load@resize-bar,Fail +i915_pm_rpm@gem-execbuf-stress,Timeout +i915_pm_rpm@module-reload,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_lease@lease-uevent,Fail +kms_pm_rpm@legacy-planes,Timeout +kms_pm_rpm@legacy-planes-dpms,Timeout +kms_pm_rpm@modeset-stress-extra-wait,Timeout +kms_pm_rpm@universal-planes,Timeout +kms_pm_rpm@universal-planes-dpms,Timeout +kms_rotation_crc@multiplane-rotation,Fail +kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail +kms_rotation_crc@multiplane-rotation-cropping-top,Fail +perf@i915-ref-count,Fail +perf_pmu@busy-accuracy-50,Fail +perf_pmu@module-unload,Fail +perf_pmu@most-busy-idle-check-all,Fail +perf_pmu@rc6,Crash +sysfs_heartbeat_interval@long,Timeout +sysfs_heartbeat_interval@off,Timeout +sysfs_preempt_timeout@off,Timeout +sysfs_timeslice_duration@off,Timeout +xe_module_load@force-load,Fail +xe_module_load@load,Fail +xe_module_load@many-reload,Fail +xe_module_load@reload,Fail +xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt new file mode 100644 index 000000000000..5c3ef4486b9d --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt @@ -0,0 +1,13 @@ +# Board Name: acer-cb317-1h-c3z6-dedede +# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +kms_flip@flip-vs-panning-interruptible + +# Board Name: acer-cb317-1h-c3z6-dedede +# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +kms_universal_plane@cursor-fb-leak diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt new file mode 100644 index 000000000000..1a3d87c0ca6e --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt @@ -0,0 +1,20 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* + +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^panfrost.* +^v3d.* +^vc4.* +^vmwgfx* + +# GEM tests takes ~1000 hours, so skip it +gem_.* + +# trap_err +i915_pm_rc6_residency.* + +# Hangs the machine and timeout occurs +i915_pm_rpm@system-hibernate* diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt index 1de04a3308c4..d4fba4f55ec1 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt @@ -17,12 +17,10 @@ perf@i915-ref-count,Fail perf_pmu@busy-accuracy-50,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash -prime_busy@after,Fail sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -testdisplay,Timeout xe_module_load@force-load,Fail xe_module_load@load,Fail xe_module_load@many-reload,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt index e728ccc62326..461ef69ef08a 100644 --- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt @@ -1,40 +1,64 @@ +api_intel_allocator@fork-simple-stress-signal,Timeout +api_intel_allocator@open-vm,Timeout api_intel_allocator@simple-allocator,Timeout +api_intel_bb@lot-of-buffers,Timeout api_intel_bb@object-reloc-keep-cache,Timeout api_intel_bb@offset-control,Timeout -core_auth@getclient-simple,Timeout -core_hotunplug@hotunbind-rebind,Timeout +api_intel_bb@render-ccs,Timeout +api_intel_bb@reset-bb,Timeout +core_auth@basic-auth,Timeout +core_hotunplug@hotrebind,Timeout +core_setmaster@master-drop-set-user,Fail debugfs_test@read_all_entries_display_on,Timeout -drm_read@invalid-buffer,Timeout -drm_read@short-buffer-nonblock,Timeout +drm_read@empty-block,Timeout +dumb_buffer@create-clear,Timeout +dumb_buffer@invalid-bpp,Timeout gen3_render_tiledx_blits,Timeout gen7_exec_parse@basic-allocation,Timeout -gen7_exec_parse@batch-without-end,Timeout gen9_exec_parse@batch-invalid-length,Timeout gen9_exec_parse@bb-secure,Timeout gen9_exec_parse@secure-batches,Timeout gen9_exec_parse@shadow-peek,Timeout gen9_exec_parse@unaligned-jump,Timeout +i915_getparams_basic@basic-subslice-total,Timeout +i915_hangman@gt-engine-hang,Timeout i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail +i915_pciid,Timeout +i915_pipe_stress@stress-xrgb8888-ytiled,Timeout +i915_pm_rpm@gem-execbuf-stress,Timeout +i915_pm_rps@engine-order,Timeout +i915_pm_rps@thresholds-idle-park,Timeout i915_query@engine-info,Timeout i915_query@query-topology-kernel-writes,Timeout i915_query@test-query-geometry-subslices,Timeout kms_lease@lease-uevent,Fail kms_rotation_crc@multiplane-rotation,Fail perf@i915-ref-count,Fail +perf_pmu@busy,Timeout perf_pmu@enable-race,Timeout perf_pmu@event-wait,Timeout +perf_pmu@faulting-read,Timeout perf_pmu@gt-awake,Timeout perf_pmu@interrupts,Timeout perf_pmu@module-unload,Fail +perf_pmu@most-busy-idle-check-all,Timeout perf_pmu@rc6,Crash +perf_pmu@render-node-busy-idle,Fail +perf_pmu@semaphore-wait-idle,Timeout +prime_busy@after,Timeout +prime_mmap@test_aperture_limit,Timeout prime_mmap@test_map_unmap,Timeout prime_mmap@test_refcounting,Timeout prime_self_import@basic-with_one_bo,Timeout +sriov_basic@enable-vfs-autoprobe-off,Timeout +syncobj_basic@bad-destroy,Timeout syncobj_basic@bad-flags-fd-to-handle,Timeout +syncobj_basic@create-signaled,Timeout syncobj_eventfd@invalid-bad-pad,Timeout +syncobj_eventfd@timeline-wait-before-signal,Timeout syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout syncobj_wait@invalid-signal-illegal-handle,Timeout syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt index 2adae2175501..0ce240e3aa07 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt @@ -1,5 +1,5 @@ +core_setmaster@master-drop-set-shared-fd,Fail core_setmaster@master-drop-set-user,Fail -core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -7,7 +7,8 @@ i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout +i915_pm_rps@engine-order,Fail +kms_big_fb@linear-16bpp-rotate-180,Timeout kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_dirtyfb@default-dirtyfb-ioctl,Fail kms_dirtyfb@fbc-dirtyfb-ioctl,Fail @@ -32,19 +33,17 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail -kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout kms_frontbuffer_tracking@fbc-tiling-linear,Fail +kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt,Timeout kms_lease@lease-uevent,Fail kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout -kms_prop_blob@invalid-set-prop,Fail kms_rotation_crc@primary-rotation-180,Timeout kms_vblank@query-forked-hang,Timeout perf@i915-ref-count,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt index a14349a1967f..8e0efc80d510 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail fbdev@eof,Fail fbdev@read,Fail kms_3d,Fail @@ -27,10 +22,6 @@ kms_cursor_legacy@cursor-vs-flip-atomic,Fail kms_cursor_legacy@cursor-vs-flip-legacy,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail -kms_flip@flip-vs-suspend,Fail -kms_flip@flip-vs-suspend-interruptible,Fail kms_lease@lease-uevent,Fail -kms_properties@get_properties-sanity-atomic,Fail -kms_properties@plane-properties-atomic,Fail -kms_properties@plane-properties-legacy,Fail kms_rmfb@close-fd,Fail +kms_flip@flip-vs-suspend-interruptible,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt index 8cb2cb67853d..845f852bb4a0 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt @@ -1,10 +1,5 @@ core_setmaster@master-drop-set-shared-fd,Fail -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail dumb_buffer@create-clear,Crash -dumb_buffer@invalid-bpp,Fail fbdev@eof,Fail fbdev@pan,Fail fbdev@read,Fail @@ -18,5 +13,4 @@ kms_color@invalid-gamma-lut-sizes,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-suspend,Fail kms_lease@lease-uevent,Fail -kms_properties@plane-properties-atomic,Fail kms_rmfb@close-fd,Fail diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt index 328967d3e23d..fc3745180683 100644 --- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt +++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt @@ -1,4 +1,3 @@ -dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_cursor_legacy@forked-bo,Fail kms_cursor_legacy@forked-move,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt index 4ac46168eff3..066d24ee3e08 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_cursor_legacy@torture-bo,Fail kms_force_connector_basic@force-edid,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt index bd0653caf7a0..2893f98a6b97 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt @@ -1,7 +1,2 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt index d42004cd6977..6dbc2080347d 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail @@ -11,35 +6,13 @@ kms_color@ctm-green-to-red,Fail kms_color@ctm-negative,Fail kms_color@ctm-red-to-blue,Fail kms_color@ctm-signed,Fail -kms_content_protection@atomic,Crash -kms_content_protection@atomic-dpms,Crash -kms_content_protection@content-type-change,Crash -kms_content_protection@lic-type-0,Crash -kms_content_protection@lic-type-1,Crash -kms_content_protection@srm,Crash -kms_content_protection@type1,Crash -kms_content_protection@uevent,Crash -kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-flip-vs-cursor-legacy,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-legacy,Fail kms_cursor_legacy@cursor-vs-flip-toggle,Fail kms_cursor_legacy@cursor-vs-flip-varying-size,Fail -kms_display_modes@extended-mode-basic,Fail -kms_flip@2x-flip-vs-modeset-vs-hang,Fail -kms_flip@2x-flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_lease@lease-uevent,Fail -kms_multipipe_modeset@basic-max-pipe-crc-check,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail kms_plane_alpha_blend@alpha-7efc,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail -kms_plane_lowres@tiling-none,Fail kms_rmfb@close-fd,Fail -kms_vblank@ts-continuation-dpms-rpm,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt index d42004cd6977..6dbc2080347d 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail @@ -11,35 +6,13 @@ kms_color@ctm-green-to-red,Fail kms_color@ctm-negative,Fail kms_color@ctm-red-to-blue,Fail kms_color@ctm-signed,Fail -kms_content_protection@atomic,Crash -kms_content_protection@atomic-dpms,Crash -kms_content_protection@content-type-change,Crash -kms_content_protection@lic-type-0,Crash -kms_content_protection@lic-type-1,Crash -kms_content_protection@srm,Crash -kms_content_protection@type1,Crash -kms_content_protection@uevent,Crash -kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-flip-vs-cursor-legacy,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-legacy,Fail kms_cursor_legacy@cursor-vs-flip-toggle,Fail kms_cursor_legacy@cursor-vs-flip-varying-size,Fail -kms_display_modes@extended-mode-basic,Fail -kms_flip@2x-flip-vs-modeset-vs-hang,Fail -kms_flip@2x-flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_lease@lease-uevent,Fail -kms_multipipe_modeset@basic-max-pipe-crc-check,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail kms_plane_alpha_blend@alpha-7efc,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail -kms_plane_lowres@tiling-none,Fail kms_rmfb@close-fd,Fail -kms_vblank@ts-continuation-dpms-rpm,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt index 770a1c685fde..fa8c7e663858 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt @@ -1,8 +1,4 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail +drm_read@invalid-buffer,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt index 2aa96b1241c3..38ec0305c1f4 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt @@ -116,3 +116,17 @@ kms_cursor_legacy@flip-vs-cursor-toggle # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 msm/msm_shrink@copy-mmap-oom-8 + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_lease@page-flip-implicit-plane + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_flip@flip-vs-expired-vblank diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt index 90651048ab61..94783cafc21a 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt @@ -25,3 +25,8 @@ core_hotunplug.* # Whole machine hangs kms_cursor_crc.* + +# IGT test crash +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_content_protection@uevent diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt new file mode 100644 index 000000000000..4892c0c70a6d --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt @@ -0,0 +1,15 @@ +kms_3d,Fail +kms_cursor_legacy@forked-bo,Fail +kms_cursor_legacy@forked-move,Fail +kms_cursor_legacy@single-bo,Fail +kms_cursor_legacy@single-move,Fail +kms_cursor_legacy@torture-bo,Fail +kms_cursor_legacy@torture-move,Fail +kms_hdmi_inject@inject-4k,Fail +kms_lease@lease-uevent,Fail +kms_plane_alpha_blend@alpha-7efc,Fail +kms_plane_alpha_blend@alpha-basic,Fail +kms_plane_alpha_blend@alpha-opaque-fb,Fail +kms_plane_alpha_blend@alpha-transparent-fb,Fail +kms_plane_alpha_blend@constant-alpha-max,Fail +msm/msm_recovery@gpu-fault-parallel,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt new file mode 100644 index 000000000000..c1859d9b165f --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt @@ -0,0 +1,6 @@ +# Board Name: sm8350-hdk +# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/65 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +msm/msm_recovery@gpu-fault diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt new file mode 100644 index 000000000000..329770c520d9 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt @@ -0,0 +1,211 @@ +# Skip driver specific tests +^amdgpu.* +nouveau_.* +^panfrost.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* + +# Kernel panic +msm/msm_mapping@ring +# DEBUG - Begin test msm/msm_mapping@ring +# [ 200.874157] [IGT] msm_mapping: executing +# [ 200.880236] [IGT] msm_mapping: starting subtest ring +# [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISSION source=CP (0,0,0,1) +# [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.950227] platform 3d6a000.gmu: [drm:a6xx_hfi_send_msg.constprop.0] *ERROR* Message HFI_H2F_MSG_GX_BW_PERF_VOTE id 25 timed out waiting for response +# [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 204.213387] platform 3d6a000.gmu: GMU watchdog expired +# [ 205.909103] adreno_fault_handler: 224274 callbacks suppressed +# [ 205.909108] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.925794] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.936529] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.947263] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.957997] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.968731] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.979465] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.990199] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 206.000932] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 206.011666] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.925090] adreno_fault_handler: 224511 callbacks suppressed +# [ 210.925096] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.941781] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.952517] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.963250] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.973985] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.984719] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.995452] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.006186] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.016921] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.027655] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.937100] adreno_fault_handler: 223760 callbacks suppressed +# [ 215.937106] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.953824] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.964573] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.975321] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.986067] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.996815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.007563] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.018310] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.029057] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.039805] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.945182] adreno_fault_handler: 222822 callbacks suppressed +# [ 220.945188] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.961897] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.972645] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.983392] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.994140] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.004889] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.015636] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.026383] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.037130] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.047879] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.953179] adreno_fault_handler: 223373 callbacks suppressed +# [ 225.953184] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.969883] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.980617] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.991350] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.002084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.012818] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.023551] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.034285] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.045019] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.055753] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 228.001087] rcu: INFO: rcu_preempt detected stalls on CPUs/tasks: +# [ 228.007412] rcu: 0-....: (524 ticks this GP) idle=4ffc/1/0x4000000000000000 softirq=9367/9368 fqs=29 +# [ 228.017097] rcu: (detected by 1, t=6504 jiffies, g=29837, q=6 ncpus=8) +# [ 228.023959] Sending NMI from CPU 1 to CPUs 0: +# [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150] +# [ 228.173169] Modules linked in: +# [ 228.176361] irq event stamp: 2809595 +# [ 228.180083] hardirqs last enabled at (2809594): [<ffffd3bc52cb91ac>] exit_to_kernel_mode+0x38/0x130 +# [ 228.189547] hardirqs last disabled at (2809595): [<ffffd3bc52cb92c8>] el1_interrupt+0x24/0x64 +# [ 228.198377] softirqs last enabled at (1669060): [<ffffd3bc51936f98>] handle_softirqs+0x4a4/0x4bc +# [ 228.207565] softirqs last disabled at (1669063): [<ffffd3bc518905a4>] __do_softirq+0x14/0x20 +# [ 228.216316] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Not tainted 6.12.0-rc1-g685d530dc83a #1 +# [ 228.224966] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +# [ 228.231730] pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +# [ 228.238948] pc : tcp_fastretrans_alert+0x0/0x884 +# [ 228.243751] lr : tcp_ack+0x9d4/0x1238 +# [ 228.247562] sp : ffff8000800036d0 +# [ 228.251011] x29: ffff8000800036d0 x28: 000000000000000c x27: 0000000000000001 +# [ 228.258421] x26: ffff704683cd8000 x25: 0000000000000403 x24: ffff70468b7e7c00 +# [ 228.265829] x23: 0000000000000000 x22: 0000000000000004 x21: 000000000000140f +# [ 228.273237] x20: 00000000f1de79f7 x19: 00000000f1de7a5f x18: 0000000000000001 +# [ 228.280644] x17: 00000000302d6762 x16: 632d6b64682d3035 x15: ffff704683c39000 +# [ 228.288051] x14: 00000000000e2000 x13: ffff704683df6000 x12: 0000000000000000 +# [ 228.295458] x11: 00000000000000a0 x10: 0000000000000000 x9 : ffffd3bc551a9a20 +# [ 228.302865] x8 : ffff800080003640 x7 : 0000000000040faa x6 : 00000000ffff9634 +# [ 228.310271] x5 : 00000000000005a8 x4 : ffff800080003788 x3 : ffff80008000377c +# [ 228.317679] x2 : 0000000000000000 x1 : 00000000f1de79f7 x0 : ffff704683cd8000 +# [ 228.325087] Call trace: +# [ 228.327640] tcp_fastretrans_alert+0x0/0x884 +# [ 228.332082] tcp_rcv_established+0x7c4/0x8bc +# [ 228.336523] tcp_v4_do_rcv+0x244/0x31c +# [ 228.340429] tcp_v4_rcv+0xcc4/0x1084 +# [ 228.344155] ip_protocol_deliver_rcu+0x64/0x218 +# [ 228.348862] ip_local_deliver_finish+0xb8/0x1ac +# [ 228.353566] ip_local_deliver+0x84/0x254 +# [ 228.357651] ip_sublist_rcv_finish+0x84/0xb8 +# [ 228.362092] ip_sublist_rcv+0x11c/0x2f0 +# [ 228.366081] ip_list_rcv+0xfc/0x190 +# [ 228.369711] __netif_receive_skb_list_core+0x174/0x208 +# [ 228.375050] netif_receive_skb_list_internal+0x204/0x3ac +# [ 228.380564] napi_complete_done+0x64/0x1d0 +# [ 228.384826] lan78xx_poll+0x71c/0x9cc +# [ 228.388638] __napi_poll.constprop.0+0x3c/0x254 +# [ 228.393341] net_rx_action+0x164/0x2d4 +# [ 228.397244] handle_softirqs+0x128/0x4bc +# [ 228.401329] __do_softirq+0x14/0x20 +# [ 228.404958] ____do_softirq+0x10/0x1c +# [ 228.408769] call_on_irq_stack+0x24/0x4c +# [ 228.412854] do_softirq_own_stack+0x1c/0x28 +# [ 228.417199] __irq_exit_rcu+0x124/0x164 +# [ 228.421188] irq_exit_rcu+0x10/0x38 +# [ 228.424819] el1_interrupt+0x38/0x64 +# [ 228.428546] el1h_64_irq_handler+0x18/0x24 +# [ 228.432807] el1h_64_irq+0x64/0x68 +# [ 228.436354] lock_acquire+0x214/0x32c +# [ 228.440166] __mutex_lock+0x98/0x3d0 +# [ 228.443893] mutex_lock_nested+0x24/0x30 +# [ 228.447978] fault_worker+0x58/0x184 +# [ 228.451704] kthread_worker_fn+0xf4/0x320 +# [ 228.455873] kthread+0x114/0x118 +# [ 228.459243] ret_from_fork+0x10/0x20 +# [ 228.462970] Kernel panic - not syncing: softlockup: hung tasks +# [ 228.469018] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Tainted: G L 6.12.0-rc1-g685d530dc83a #1 +# [ 228.479190] Tainted: [L]=SOFTLOCKUP +# [ 228.482815] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +# [ 228.489574] Call trace: +# [ 228.492125] dump_backtrace+0x98/0xf0 +# [ 228.495931] show_stack+0x18/0x24 +# [ 228.499380] dump_stack_lvl+0x38/0xd0 +# [ 228.503189] dump_stack+0x18/0x24 +# [ 228.506639] panic+0x3bc/0x41c +# [ 228.509826] watchdog_timer_fn+0x254/0x2e4 +# [ 228.514087] __hrtimer_run_queues+0x3b0/0x40c +# [ 228.518612] hrtimer_interrupt+0xe8/0x248 +# [ 228.522777] arch_timer_handler_virt+0x2c/0x44 +# [ 228.527399] handle_percpu_devid_irq+0xa8/0x2c4 +# [ 228.532103] generic_handle_domain_irq+0x2c/0x44 +# [ 228.536902] gic_handle_irq+0x4c/0x11c +# [ 228.540802] do_interrupt_handler+0x50/0x84 +# [ 228.545146] el1_interrupt+0x34/0x64 +# [ 228.548870] el1h_64_irq_handler+0x18/0x24 +# [ 228.553128] el1h_64_irq+0x64/0x68 +# [ 228.556672] tcp_fastretrans_alert+0x0/0x884 +# [ 228.561110] tcp_rcv_established+0x7c4/0x8bc +# [ 228.565548] tcp_v4_do_rcv+0x244/0x31c +# [ 228.569449] tcp_v4_rcv+0xcc4/0x1084 +# [ 228.573171] ip_protocol_deliver_rcu+0x64/0x218 +# [ 228.577873] ip_local_deliver_finish+0xb8/0x1ac +# [ 228.582574] ip_local_deliver+0x84/0x254 +# [ 228.586655] ip_sublist_rcv_finish+0x84/0xb8 +# [ 228.591092] ip_sublist_rcv+0x11c/0x2f0 +# [ 228.595079] ip_list_rcv+0xfc/0x190 +# [ 228.598706] __netif_receive_skb_list_core+0x174/0x208 +# [ 228.604039] netif_receive_skb_list_internal+0x204/0x3ac +# [ 228.609549] napi_complete_done+0x64/0x1d0 +# [ 228.613808] lan78xx_poll+0x71c/0x9cc +# [ 228.617614] __napi_poll.constprop.0+0x3c/0x254 +# [ 228.622314] net_rx_action+0x164/0x2d4 +# [ 228.626214] handle_softirqs+0x128/0x4bc +# [ 228.630297] __do_softirq+0x14/0x20 +# [ 228.633923] ____do_softirq+0x10/0x1c +# [ 228.637729] call_on_irq_stack+0x24/0x4c +# [ 228.641811] do_softirq_own_stack+0x1c/0x28 +# [ 228.646152] __irq_exit_rcu+0x124/0x164 +# [ 228.650139] irq_exit_rcu+0x10/0x38 +# [ 228.653768] el1_interrupt+0x38/0x64 +# [ 228.657491] el1h_64_irq_handler+0x18/0x24 +# [ 228.661750] el1h_64_irq+0x64/0x68 +# [ 228.665293] lock_acquire+0x214/0x32c +# [ 228.669098] __mutex_lock+0x98/0x3d0 +# [ 228.672821] mutex_lock_nested+0x24/0x30 +# [ 228.676903] fault_worker+0x58/0x184 +# [ 228.680626] kthread_worker_fn+0xf4/0x320 +# [ 228.684790] kthread+0x114/0x118 +# [ 228.688156] ret_from_fork+0x10/0x20 +# [ 228.691882] SMP: stopping secondary CPUs +# [ 229.736843] SMP: failed to stop secondary CPUs 1,4 +# [ 229.741827] Kernel Offset: 0x53bbd1880000 from 0xffff800080000000 +# [ 229.748159] PHYS_OFFSET: 0xfff08fba80000000 +# [ 229.752499] CPU features: 0x18,00000017,00200928,4200720b +# [ 229.758095] Memory Limit: none +# [ 229.761291] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]--- diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt index fe8ce2ce33e6..abd1ccb71561 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Fail +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt index fe8ce2ce33e6..abd1ccb71561 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Fail +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt index 4a2f4b6b14c1..8330b934602a 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Crash +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Crash diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt index fe8ce2ce33e6..abd1ccb71561 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Fail +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt deleted file mode 100644 index 5e6d48d98e4e..000000000000 --- a/drivers/gpu/drm/ci/xfails/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -git+https://gitlab.freedesktop.org/gfx-ci/ci-collate@09e7142715c16f54344ddf97013331ba063b162b -termcolor==2.3.0 - -# ci-collate dependencies -certifi==2023.7.22 -charset-normalizer==3.2.0 -idna==3.4 -pip==23.3 -python-gitlab==3.15.0 -requests==2.31.0 -requests-toolbelt==1.0.0 -ruamel.yaml==0.17.32 -ruamel.yaml.clib==0.2.7 -setuptools==70.0.0 -tenacity==8.2.3 -urllib3==2.0.7 -wheel==0.41.1 diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt index ea7b2ceb95b9..90282dfa19f4 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt @@ -1,18 +1,24 @@ -core_setmaster@master-drop-set-root,Crash core_setmaster@master-drop-set-user,Crash -core_setmaster_vs_auth,Crash -device_reset@cold-reset-bound,Crash -device_reset@reset-bound,Crash -device_reset@unbind-cold-reset-rebind,Crash -device_reset@unbind-reset-rebind,Crash dumb_buffer@create-clear,Crash -dumb_buffer@invalid-bpp,Crash fbdev@pan,Crash +kms_bw@linear-tiling-2-displays-1920x1080p,Fail kms_cursor_crc@cursor-onscreen-32x10,Crash kms_cursor_crc@cursor-onscreen-32x32,Crash +kms_cursor_crc@cursor-onscreen-64x64,Crash kms_cursor_crc@cursor-random-32x10,Crash +kms_cursor_crc@cursor-sliding-32x10,Crash kms_cursor_crc@cursor-sliding-32x32,Crash +kms_cursor_crc@cursor-sliding-64x21,Crash kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-crc-atomic,Crash +kms_flip@flip-vs-panning-vs-hang,Crash +kms_invalid_mode@int-max-clock,Crash +kms_lease@invalid-create-leases,Fail +kms_pipe_crc_basic@read-crc-frame-sequence,Crash +kms_plane@pixel-format,Crash +kms_plane@pixel-format-source-clamping,Crash kms_prop_blob@invalid-set-prop,Crash -kms_prop_blob@invalid-set-prop-any,Crash +kms_properties@get_properties-sanity-atomic,Crash +kms_properties@get_properties-sanity-non-atomic,Crash +kms_rmfb@close-fd,Crash diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt index 7ede273aab20..cd0b27d8b636 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt @@ -4,3 +4,31 @@ # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_cursor_legacy@flip-vs-cursor-atomic + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_cursor_crc@cursor-offscreen-32x10 + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_cursor_edge_walk@64x64-left-edge + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_flip@plain-flip-ts-check + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_cursor_crc@cursor-alpha-opaque diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt index 9309ff15e23a..83a38853b4af 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt @@ -1,9 +1,4 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail dumb_buffer@create-clear,Crash -dumb_buffer@invalid-bpp,Fail kms_atomic_transition@modeset-transition,Fail kms_atomic_transition@modeset-transition-fencing,Fail kms_atomic_transition@plane-toggle-modeset-transition,Fail @@ -46,7 +41,6 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail kms_flip@basic-flip-vs-wf_vblank,Fail kms_flip@blocking-wf_vblank,Fail -kms_flip@dpms-vs-vblank-race,Fail kms_flip@flip-vs-absolute-wf_vblank,Fail kms_flip@flip-vs-blocking-wf-vblank,Fail kms_flip@flip-vs-modeset-vs-hang,Fail @@ -59,7 +53,6 @@ kms_flip@plain-flip-fb-recreate,Fail kms_flip@plain-flip-fb-recreate-interruptible,Fail kms_flip@plain-flip-ts-check,Fail kms_flip@plain-flip-ts-check-interruptible,Fail -kms_flip@wf_vblank-ts-check,Fail kms_flip@wf_vblank-ts-check-interruptible,Fail kms_invalid_mode@int-max-clock,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt index d98f6a17343c..56f7d4f1ed15 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt @@ -46,3 +46,31 @@ kms_setmode@basic # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_bw@connected-linear-tiling-1-displays-2560x1440p + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_flip@wf_vblank-ts-check + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_flip@dpms-vs-vblank-race + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_bw@linear-tiling-2-displays-2160x1440p + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_flip@flip-vs-expired-vblank diff --git a/drivers/gpu/drm/ci/xfails/update-xfails.py b/drivers/gpu/drm/ci/xfails/update-xfails.py deleted file mode 100755 index a446e98d72a1..000000000000 --- a/drivers/gpu/drm/ci/xfails/update-xfails.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -from collections import defaultdict -import difflib -import os -import re -from glcollate import Collate -from termcolor import colored -from urllib.parse import urlparse - - -def get_canonical_name(job_name): - return re.split(r" \d+/\d+", job_name)[0] - - -def get_xfails_file_path(job_name, suffix): - canonical_name = get_canonical_name(job_name) - name = canonical_name.replace(":", "-") - script_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join(script_dir, f"{name}-{suffix}.txt") - - -def get_unit_test_name_and_results(unit_test): - if "Artifact results/failures.csv not found" in unit_test or '' == unit_test: - return None, None - unit_test_name, unit_test_result = unit_test.strip().split(",") - return unit_test_name, unit_test_result - - -def read_file(file_path): - try: - with open(file_path, "r") as file: - f = file.readlines() - if len(f): - f[-1] = f[-1].strip() + "\n" - return f - except FileNotFoundError: - return [] - - -def save_file(content, file_path): - # delete file is content is empty - if not content or not any(content): - if os.path.exists(file_path): - os.remove(file_path) - return - - with open(file_path, "w") as file: - file.writelines(content) - - -def is_test_present_on_file(file_content, unit_test_name): - return any(unit_test_name in line for line in file_content) - - -def is_unit_test_present_in_other_jobs(unit_test, job_ids): - return all(unit_test in job_ids[job_id] for job_id in job_ids) - - -def remove_unit_test_if_present(lines, unit_test_name): - if not is_test_present_on_file(lines, unit_test_name): - return - lines[:] = [line for line in lines if unit_test_name not in line] - - -def add_unit_test_if_not_present(lines, unit_test_name, file_name): - # core_getversion is mandatory - if "core_getversion" in unit_test_name: - print("WARNING: core_getversion should pass, not adding it to", os.path.basename(file_name)) - elif all(unit_test_name not in line for line in lines): - lines.append(unit_test_name + "\n") - - -def update_unit_test_result_in_fails_txt(fails_txt, unit_test): - unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test) - for i, line in enumerate(fails_txt): - if unit_test_name in line: - _, current_result = get_unit_test_name_and_results(line) - fails_txt[i] = unit_test + "\n" - return - - -def add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, fails_txt_path): - unit_test_name, _ = get_unit_test_name_and_results(unit_test) - if not is_test_present_on_file(fails_txt, unit_test_name): - add_unit_test_if_not_present(fails_txt, unit_test, fails_txt_path) - # if it is present but not with the same result - elif not is_test_present_on_file(fails_txt, unit_test): - update_unit_test_result_in_fails_txt(fails_txt, unit_test) - - -def split_unit_test_from_collate(xfails): - for job_name in xfails.keys(): - for job_id in xfails[job_name].copy().keys(): - if "not found" in xfails[job_name][job_id].content_as_str: - del xfails[job_name][job_id] - continue - xfails[job_name][job_id] = xfails[job_name][job_id].content_as_str.splitlines() - - -def get_xfails_from_pipeline_url(pipeline_url): - parsed_url = urlparse(pipeline_url) - path_components = parsed_url.path.strip("/").split("/") - - namespace = path_components[0] - project = path_components[1] - pipeline_id = path_components[-1] - - print("Collating from:", namespace, project, pipeline_id) - xfails = ( - Collate(namespace=namespace, project=project) - .from_pipeline(pipeline_id) - .get_artifact("results/failures.csv") - ) - - split_unit_test_from_collate(xfails) - return xfails - - -def get_xfails_from_pipeline_urls(pipelines_urls): - xfails = defaultdict(dict) - - for url in pipelines_urls: - new_xfails = get_xfails_from_pipeline_url(url) - for key in new_xfails: - xfails[key].update(new_xfails[key]) - - return xfails - - -def print_diff(old_content, new_content, file_name): - diff = difflib.unified_diff(old_content, new_content, lineterm="", fromfile=file_name, tofile=file_name) - diff = [colored(line, "green") if line.startswith("+") else - colored(line, "red") if line.startswith("-") else line for line in diff] - print("\n".join(diff[:3])) - print("".join(diff[3:])) - - -def main(pipelines_urls, only_flakes): - xfails = get_xfails_from_pipeline_urls(pipelines_urls) - - for job_name in xfails.keys(): - fails_txt_path = get_xfails_file_path(job_name, "fails") - flakes_txt_path = get_xfails_file_path(job_name, "flakes") - - fails_txt = read_file(fails_txt_path) - flakes_txt = read_file(flakes_txt_path) - - fails_txt_original = fails_txt.copy() - flakes_txt_original = flakes_txt.copy() - - for job_id in xfails[job_name].keys(): - for unit_test in xfails[job_name][job_id]: - unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test) - - if not unit_test_name: - continue - - if only_flakes: - remove_unit_test_if_present(fails_txt, unit_test_name) - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # drop it from flakes if it is present to analyze it again - remove_unit_test_if_present(flakes_txt, unit_test_name) - - if unit_test_result == "UnexpectedPass": - remove_unit_test_if_present(fails_txt, unit_test_name) - # flake result - if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]): - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # flake result - if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]): - remove_unit_test_if_present(fails_txt, unit_test_name) - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # consistent result - add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, - fails_txt_path) - - fails_txt.sort() - flakes_txt.sort() - - if fails_txt != fails_txt_original: - save_file(fails_txt, fails_txt_path) - print_diff(fails_txt_original, fails_txt, os.path.basename(fails_txt_path)) - if flakes_txt != flakes_txt_original: - save_file(flakes_txt, flakes_txt_path) - print_diff(flakes_txt_original, flakes_txt, os.path.basename(flakes_txt_path)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Update xfails from a given pipeline.") - parser.add_argument("pipeline_urls", nargs="+", type=str, help="URLs to the pipelines to analyze the failures.") - parser.add_argument("--only-flakes", action="store_true", help="Treat every detected failure as a flake, edit *-flakes.txt only.") - - args = parser.parse_args() - - main(args.pipeline_urls, args.only_flakes) - print("Done.") diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt index 5408110f4c60..71c02104a683 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt @@ -1,24 +1,3 @@ -core_hotunplug@hotrebind,Fail -core_hotunplug@hotrebind-lateclose,Fail -core_hotunplug@hotreplug,Fail -core_hotunplug@hotreplug-lateclose,Fail -core_hotunplug@hotunbind-rebind,Fail -core_hotunplug@hotunplug-rescan,Fail -core_hotunplug@unbind-rebind,Fail -core_hotunplug@unplug-rescan,Fail -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail -kms_content_protection@atomic,Crash -kms_content_protection@atomic-dpms,Crash -kms_content_protection@content-type-change,Crash -kms_content_protection@lic-type-0,Crash -kms_content_protection@lic-type-1,Crash -kms_content_protection@srm,Crash -kms_content_protection@type1,Crash -kms_content_protection@uevent,Crash kms_cursor_crc@cursor-rapid-movement-128x128,Fail kms_cursor_crc@cursor-rapid-movement-128x42,Fail kms_cursor_crc@cursor-rapid-movement-256x256,Fail diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt index 5ccc771fbb36..b3d16e82e9a2 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt @@ -205,6 +205,59 @@ kms_cursor_edge_walk@128x128-right-edge # R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810 # R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000 +kms_cursor_edge_walk@128x128-left-edge +# DEBUG - Begin test kms_cursor_edge_walk@128x128-left-edge +# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI +# CPU: 0 UID: 0 PID: 27 Comm: kworker/u8:1 Not tainted 6.11.0-rc5-g5d3429a7e9aa #1 +# Hardware name: ChromiumOS crosvm, BIOS 0 +# Workqueue: vkms_composer vkms_composer_worker [vkms] +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffa437800ebd58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa0e841904000 +# RDX: 00000000000000ff RSI: ffffa0e841905ff8 RDI: ffffa0e841902000 +# RBP: 0000000000000000 R08: ffffa0e84158a600 R09: 00000000000003ff +# R10: 0000000078b2bcd2 R11: 00000000278b2bcd R12: ffffa0e84870fc60 +# R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffffa0e86bc00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0 +# Call Trace: +# <TASK> +# ? __die+0x1e/0x60 +# ? page_fault_oops+0x17b/0x4a0 +# ? exc_page_fault+0x6d/0x230 +# ? asm_exc_page_fault+0x26/0x30 +# ? compose_active_planes+0x344/0x4e0 [vkms] +# ? compose_active_planes+0x32f/0x4e0 [vkms] +# ? srso_return_thunk+0x5/0x5f +# vkms_composer_worker+0x205/0x240 [vkms] +# process_one_work+0x201/0x6c0 +# ? lock_is_held_type+0x9e/0x110 +# worker_thread+0x17e/0x310 +# ? __pfx_worker_thread+0x10/0x10 +# kthread+0xce/0x100 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork+0x2f/0x50 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork_asm+0x1a/0x30 +# </TASK> +# Modules linked in: vkms +# CR2: 0000000000000018 +# ---[ end trace 0000000000000000 ]--- +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffa437800ebd58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa0e841904000 +# RDX: 00000000000000ff RSI: ffffa0e841905ff8 RDI: ffffa0e841902000 +# RBP: 0000000000000000 R08: ffffa0e84158a600 R09: 00000000000003ff +# R10: 0000000078b2bcd2 R11: 00000000278b2bcd R12: ffffa0e84870fc60 +# R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffffa0e86bc00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0 +# vkms_vblank_simulate: vblank timer overrun + # Skip driver specific tests ^amdgpu.* ^msm.* diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 3b824e01c9b5..6a4e892afcf8 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -3,7 +3,7 @@ config DRM_DISPLAY_DP_AUX_BUS tristate depends on DRM - depends on OF || COMPILE_TEST + depends on OF config DRM_DISPLAY_HELPER tristate @@ -64,6 +64,12 @@ config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG If in doubt, say "N". +config DRM_DISPLAY_DSC_HELPER + bool + depends on DRM_DISPLAY_HELPER + help + DRM display helpers for VESA DSC (used by DSI and DisplayPort). + config DRM_DISPLAY_HDCP_HELPER bool help diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index fbb9d2b8acd4..629c834c3192 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -8,10 +8,11 @@ drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_dp_dual_mode_helper.o \ drm_dp_helper.o \ - drm_dp_mst_topology.o \ - drm_dsc_helper.o + drm_dp_mst_topology.o drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \ drm_dp_tunnel.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \ + drm_dsc_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 3da5b8bf8259..320c297008aa 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -397,11 +397,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, bridge_connector->encoder = encoder; /* - * TODO: Handle doublescan_allowed, stereo_allowed and - * ycbcr_420_allowed. + * TODO: Handle doublescan_allowed and stereo_allowed. */ connector = &bridge_connector->base; connector->interlace_allowed = true; + connector->ycbcr_420_allowed = true; /* * Initialise connector status handling. First locate the furthest @@ -414,6 +414,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, drm_for_each_bridge_in_chain(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; + if (!bridge->ycbcr_420_allowed) + connector->ycbcr_420_allowed = false; if (bridge->ops & DRM_BRIDGE_OP_EDID) bridge_connector->bridge_edid = bridge; diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c index 14a2a8473682..c491e3203bf1 100644 --- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c @@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write); static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) { - static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] = "DP-HDMI ADAPTOR\x04"; return memcmp(hdmi_id, dp_dual_mode_hdmi_id, - sizeof(dp_dual_mode_hdmi_id)) == 0; + DP_DUAL_MODE_HDMI_ID_LEN) == 0; } static bool is_type1_adaptor(uint8_t adaptor_id) diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c deleted file mode 100644 index 5729f3bb4398..000000000000 --- a/drivers/gpu/drm/drm_aperture.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: MIT - -#include <linux/aperture.h> -#include <linux/platform_device.h> - -#include <drm/drm_aperture.h> -#include <drm/drm_drv.h> -#include <drm/drm_print.h> - -/** - * DOC: overview - * - * A graphics device might be supported by different drivers, but only one - * driver can be active at any given time. Many systems load a generic - * graphics drivers, such as EFI-GOP or VESA, early during the boot process. - * During later boot stages, they replace the generic driver with a dedicated, - * hardware-specific driver. To take over the device the dedicated driver - * first has to remove the generic driver. DRM aperture functions manage - * ownership of DRM framebuffer memory and hand-over between drivers. - * - * DRM drivers should call drm_aperture_remove_conflicting_framebuffers() - * at the top of their probe function. The function removes any generic - * driver that is currently associated with the given framebuffer memory. - * If the framebuffer is located at PCI BAR 0, the rsp code looks as in the - * example given below. - * - * .. code-block:: c - * - * static const struct drm_driver example_driver = { - * ... - * }; - * - * static int remove_conflicting_framebuffers(struct pci_dev *pdev) - * { - * resource_size_t base, size; - * int ret; - * - * base = pci_resource_start(pdev, 0); - * size = pci_resource_len(pdev, 0); - * - * return drm_aperture_remove_conflicting_framebuffers(base, size, - * &example_driver); - * } - * - * static int probe(struct pci_dev *pdev) - * { - * int ret; - * - * // Remove any generic drivers... - * ret = remove_conflicting_framebuffers(pdev); - * if (ret) - * return ret; - * - * // ... and initialize the hardware. - * ... - * - * drm_dev_register(); - * - * return 0; - * } - * - * PCI device drivers should call - * drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the - * framebuffer apertures automatically. Device drivers without knowledge of - * the framebuffer's location shall call drm_aperture_remove_framebuffers(), - * which removes all drivers for known framebuffer. - * - * Drivers that are susceptible to being removed by other drivers, such as - * generic EFI or VESA drivers, have to register themselves as owners of their - * given framebuffer memory. Ownership of the framebuffer memory is achieved - * by calling devm_aperture_acquire_from_firmware(). On success, the driver - * is the owner of the framebuffer range. The function fails if the - * framebuffer is already owned by another driver. See below for an example. - * - * .. code-block:: c - * - * static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev) - * { - * resource_size_t base, size; - * - * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - * if (!mem) - * return -EINVAL; - * base = mem->start; - * size = resource_size(mem); - * - * return devm_acquire_aperture_from_firmware(dev, base, size); - * } - * - * static int probe(struct platform_device *pdev) - * { - * struct drm_device *dev; - * int ret; - * - * // ... Initialize the device... - * dev = devm_drm_dev_alloc(); - * ... - * - * // ... and acquire ownership of the framebuffer. - * ret = acquire_framebuffers(dev, pdev); - * if (ret) - * return ret; - * - * drm_dev_register(dev, 0); - * - * return 0; - * } - * - * The generic driver is now subject to forced removal by other drivers. This - * only works for platform drivers that support hot unplug. - * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al. - * for the registered framebuffer range, the aperture helpers call - * platform_device_unregister() and the generic driver unloads itself. It - * may not access the device's registers, framebuffer memory, ROM, etc - * afterwards. - */ - -/** - * devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer - * on behalf of a DRM driver. - * @dev: the DRM device to own the framebuffer memory - * @base: the framebuffer's byte offset in physical memory - * @size: the framebuffer size in bytes - * - * Installs the given device as the new owner of the framebuffer. The function - * expects the framebuffer to be provided by a platform device that has been - * set up by firmware. Firmware can be any generic interface, such as EFI, - * VESA, VGA, etc. If the native hardware driver takes over ownership of the - * framebuffer range, the firmware state gets lost. Aperture helpers will then - * unregister the platform device automatically. Acquired apertures are - * released automatically if the underlying device goes away. - * - * The function fails if the framebuffer range, or parts of it, is currently - * owned by another driver. To evict current owners, callers should use - * drm_aperture_remove_conflicting_framebuffers() et al. before calling this - * function. The function also fails if the given device is not a platform - * device. - * - * Returns: - * 0 on success, or a negative errno value otherwise. - */ -int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, - resource_size_t size) -{ - struct platform_device *pdev; - - if (drm_WARN_ON(dev, !dev_is_platform(dev->dev))) - return -EINVAL; - - pdev = to_platform_device(dev->dev); - - return devm_aperture_acquire_for_platform_device(pdev, base, size); -} -EXPORT_SYMBOL(devm_aperture_acquire_from_firmware); - -/** - * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range - * @base: the aperture's base address in physical memory - * @size: aperture size in bytes - * @req_driver: requesting DRM driver - * - * This function removes graphics device drivers which use the memory range described by - * @base and @size. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - const struct drm_driver *req_driver) -{ - return aperture_remove_conflicting_devices(base, size, req_driver->name); -} -EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); - -/** - * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices - * @pdev: PCI device - * @req_driver: requesting DRM driver - * - * This function removes graphics device drivers using the memory range configured - * for any of @pdev's memory bars. The function assumes that a PCI device with - * shadowed ROM drives a primary display and so kicks out vga16fb. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const struct drm_driver *req_driver) -{ - return aperture_remove_conflicting_pci_devices(pdev, req_driver->name); -} -EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 0fc99da93afe..9ea2611770f4 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1132,6 +1132,8 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + drm_printf(p, "\tinterlace_allowed=%d\n", connector->interlace_allowed); + drm_printf(p, "\tycbcr_420_allowed=%d\n", connector->ycbcr_420_allowed); drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace)); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 43cdf39019a4..5186d2114a50 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3015,7 +3015,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, bool stall) { int i, ret; - unsigned long flags; + unsigned long flags = 0; struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; struct drm_crtc *crtc; diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index bfedcbf516db..549b28a5918c 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -10,7 +10,6 @@ #include <linux/slab.h> #include <drm/drm_client.h> -#include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> @@ -172,99 +171,6 @@ void drm_client_release(struct drm_client_dev *client) } EXPORT_SYMBOL(drm_client_release); -/** - * drm_client_dev_unregister - Unregister clients - * @dev: DRM device - * - * This function releases all clients by calling each client's - * &drm_client_funcs.unregister callback. The callback function - * is responsibe for releaseing all resources including the client - * itself. - * - * The helper drm_dev_unregister() calls this function. Drivers - * that use it don't need to call this function themselves. - */ -void drm_client_dev_unregister(struct drm_device *dev) -{ - struct drm_client_dev *client, *tmp; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { - list_del(&client->list); - if (client->funcs && client->funcs->unregister) { - client->funcs->unregister(client); - } else { - drm_client_release(client); - kfree(client); - } - } - mutex_unlock(&dev->clientlist_mutex); -} -EXPORT_SYMBOL(drm_client_dev_unregister); - -/** - * drm_client_dev_hotplug - Send hotplug event to clients - * @dev: DRM device - * - * This function calls the &drm_client_funcs.hotplug callback on the attached clients. - * - * drm_kms_helper_hotplug_event() calls this function, so drivers that use it - * don't need to call this function themselves. - */ -void drm_client_dev_hotplug(struct drm_device *dev) -{ - struct drm_client_dev *client; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - if (!dev->mode_config.num_connector) { - drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n"); - return; - } - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) { - if (!client->funcs || !client->funcs->hotplug) - continue; - - if (client->hotplug_failed) - continue; - - ret = client->funcs->hotplug(client); - drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); - if (ret) - client->hotplug_failed = true; - } - mutex_unlock(&dev->clientlist_mutex); -} -EXPORT_SYMBOL(drm_client_dev_hotplug); - -void drm_client_dev_restore(struct drm_device *dev) -{ - struct drm_client_dev *client; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) { - if (!client->funcs || !client->funcs->restore) - continue; - - ret = client->funcs->restore(client); - drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); - if (!ret) /* The first one to return zero gets the privilege to restore */ - break; - } - mutex_unlock(&dev->clientlist_mutex); -} - static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { if (buffer->gem) { @@ -584,30 +490,3 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re 0, 0, NULL, 0); } EXPORT_SYMBOL(drm_client_framebuffer_flush); - -#ifdef CONFIG_DEBUG_FS -static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_client_dev *client; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) - drm_printf(&p, "%s\n", client->name); - mutex_unlock(&dev->clientlist_mutex); - - return 0; -} - -static const struct drm_debugfs_info drm_client_debugfs_list[] = { - { "internal_clients", drm_client_debugfs_internal_clients, 0 }, -}; - -void drm_client_debugfs_init(struct drm_device *dev) -{ - drm_debugfs_add_files(dev, drm_client_debugfs_list, - ARRAY_SIZE(drm_client_debugfs_list)); -} -#endif diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c new file mode 100644 index 000000000000..e303de564485 --- /dev/null +++ b/drivers/gpu/drm/drm_client_event.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* + * Copyright 2018 Noralf Trønnes + */ + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> + +#include <drm/drm_client.h> +#include <drm/drm_client_event.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_print.h> + +#include "drm_internal.h" + +/** + * drm_client_dev_unregister - Unregister clients + * @dev: DRM device + * + * This function releases all clients by calling each client's + * &drm_client_funcs.unregister callback. The callback function + * is responsibe for releaseing all resources including the client + * itself. + * + * The helper drm_dev_unregister() calls this function. Drivers + * that use it don't need to call this function themselves. + */ +void drm_client_dev_unregister(struct drm_device *dev) +{ + struct drm_client_dev *client, *tmp; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { + list_del(&client->list); + if (client->funcs && client->funcs->unregister) { + client->funcs->unregister(client); + } else { + drm_client_release(client); + kfree(client); + } + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_unregister); + +/** + * drm_client_dev_hotplug - Send hotplug event to clients + * @dev: DRM device + * + * This function calls the &drm_client_funcs.hotplug callback on the attached clients. + * + * drm_kms_helper_hotplug_event() calls this function, so drivers that use it + * don't need to call this function themselves. + */ +void drm_client_dev_hotplug(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + if (!dev->mode_config.num_connector) { + drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n"); + return; + } + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->hotplug) + continue; + + if (client->hotplug_failed) + continue; + + ret = client->funcs->hotplug(client); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + if (ret) + client->hotplug_failed = true; + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_hotplug); + +void drm_client_dev_restore(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->restore) + continue; + + ret = client->funcs->restore(client); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + if (!ret) /* The first one to return zero gets the privilege to restore */ + break; + } + mutex_unlock(&dev->clientlist_mutex); +} + +static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_device *dev = client->dev; + int ret = 0; + + if (drm_WARN_ON_ONCE(dev, client->suspended)) + return 0; + + if (client->funcs && client->funcs->suspend) + ret = client->funcs->suspend(client, holds_console_lock); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + + client->suspended = true; + + return ret; +} + +void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +{ + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->suspended) + drm_client_suspend(client, holds_console_lock); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_suspend); + +static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_device *dev = client->dev; + int ret = 0; + + if (drm_WARN_ON_ONCE(dev, !client->suspended)) + return 0; + + if (client->funcs && client->funcs->resume) + ret = client->funcs->resume(client, holds_console_lock); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + + client->suspended = false; + + return ret; +} + +void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +{ + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (client->suspended) + drm_client_resume(client, holds_console_lock); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_resume); + +#ifdef CONFIG_DEBUG_FS +static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) +{ + struct drm_debugfs_entry *entry = m->private; + struct drm_device *dev = entry->dev; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) + drm_printf(&p, "%s\n", client->name); + mutex_unlock(&dev->clientlist_mutex); + + return 0; +} + +static const struct drm_debugfs_info drm_client_debugfs_list[] = { + { "internal_clients", drm_client_debugfs_internal_clients, 0 }, +}; + +void drm_client_debugfs_init(struct drm_device *dev) +{ + drm_debugfs_add_files(dev, drm_client_debugfs_list, + ARRAY_SIZE(drm_client_debugfs_list)); +} +#endif diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index cee5eafbfb81..251f94313717 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -145,7 +145,7 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) } static struct drm_display_mode * -drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height) +drm_connector_preferred_mode(struct drm_connector *connector, int width, int height) { struct drm_display_mode *mode; @@ -159,6 +159,12 @@ drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int return NULL; } +static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector) +{ + return list_first_entry_or_null(&connector->modes, + struct drm_display_mode, head); +} + static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector) { struct drm_cmdline_mode *cmdline_mode; @@ -331,7 +337,7 @@ static bool drm_client_target_cloned(struct drm_device *dev, if (!modes[i]) can_clone = false; } - kfree(dmt_mode); + drm_mode_destroy(dev, dmt_mode); if (can_clone) { drm_dbg_kms(dev, "can clone using 1024x768\n"); @@ -441,13 +447,11 @@ retry: drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n", connector->base.id, connector->name, connector->tile_group ? connector->tile_group->id : 0); - modes[i] = drm_connector_has_preferred_mode(connector, width, height); + modes[i] = drm_connector_preferred_mode(connector, width, height); } /* No preferred modes, pick one off the list */ - if (!modes[i] && !list_empty(&connector->modes)) { - list_for_each_entry(modes[i], &connector->modes, head) - break; - } + if (!modes[i]) + modes[i] = drm_connector_first_mode(connector); /* * In case of tiled mode if all tiles not present fallback to * first available non tiled mode. @@ -531,7 +535,7 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, my_score++; if (connector->cmdline_mode.specified) my_score++; - if (drm_connector_has_preferred_mode(connector, width, height)) + if (drm_connector_preferred_mode(connector, width, height)) my_score++; /* @@ -686,16 +690,14 @@ retry: "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n", connector->base.id, connector->name, str_yes_no(connector->has_tile)); - modes[i] = drm_connector_has_preferred_mode(connector, width, height); + modes[i] = drm_connector_preferred_mode(connector, width, height); } /* No preferred mode marked by the EDID? Are there any modes? */ if (!modes[i] && !list_empty(&connector->modes)) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n", connector->base.id, connector->name); - modes[i] = list_first_entry(&connector->modes, - struct drm_display_mode, - head); + modes[i] = drm_connector_first_mode(connector); } /* last resort: use current mode */ @@ -878,7 +880,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, break; } - kfree(modeset->mode); + drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, mode); if (!modeset->mode) { ret = -ENOMEM; diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/drm_client_setup.c new file mode 100644 index 000000000000..c14221ca5a0d --- /dev/null +++ b/drivers/gpu/drm/drm_client_setup.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT + +#include <drm/drm_client_setup.h> +#include <drm/drm_device.h> +#include <drm/drm_fbdev_client.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> + +/** + * drm_client_setup() - Setup in-kernel DRM clients + * @dev: DRM device + * @format: Preferred pixel format for the device. Use NULL, unless + * there is clearly a driver-preferred format. + * + * This function sets up the in-kernel DRM clients. Restore, hotplug + * events and teardown are all taken care of. + * + * Drivers should call drm_client_setup() after registering the new + * DRM device with drm_dev_register(). This function is safe to call + * even when there are no connectors present. Setup will be retried + * on the next hotplug event. + * + * The clients are destroyed by drm_dev_unregister(). + */ +void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format) +{ + int ret; + + ret = drm_fbdev_client_setup(dev, format); + if (ret) + drm_warn(dev, "Failed to set up DRM client; error %d\n", ret); +} +EXPORT_SYMBOL(drm_client_setup); + +/** + * drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode + * @dev: DRM device + * @fourcc: Preferred pixel format as 4CC code for the device + * + * This function sets up the in-kernel DRM clients. It is equivalent + * to drm_client_setup(), but expects a 4CC code as second argument. + */ +void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) +{ + drm_client_setup(dev, drm_format_info(fourcc)); +} +EXPORT_SYMBOL(drm_client_setup_with_fourcc); + +/** + * drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode + * @dev: DRM device + * @color_mode: Preferred color mode for the device + * + * This function sets up the in-kernel DRM clients. It is equivalent + * to drm_client_setup(), but expects a color mode as second argument. + * + * Do not use this function in new drivers. Prefer drm_client_setup() with a + * format of NULL. + */ +void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode) +{ + u32 fourcc = drm_driver_color_mode_format(dev, color_mode); + + drm_client_setup_with_fourcc(dev, fourcc); +} +EXPORT_SYMBOL(drm_client_setup_with_color_mode); + +MODULE_DESCRIPTION("In-kernel DRM clients"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d3e6dd68810..536409a35df4 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -32,7 +32,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_auth.h> #include <drm/drm_bridge.h> -#include <drm/drm_client.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -78,12 +77,14 @@ static int drm_clients_info(struct seq_file *m, void *data) kuid_t uid; seq_printf(m, - "%20s %5s %3s master a %5s %10s\n", + "%20s %5s %3s master a %5s %10s %*s\n", "command", "tgid", "dev", "uid", - "magic"); + "magic", + DRM_CLIENT_NAME_MAX_LEN, + "name"); /* dev->filelist is sorted youngest first, but we want to present * oldest first (i.e. kernel, servers, clients), so walk backwardss. @@ -94,19 +95,23 @@ static int drm_clients_info(struct seq_file *m, void *data) struct task_struct *task; struct pid *pid; + mutex_lock(&priv->client_name_lock); rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */ pid = rcu_dereference(priv->pid); task = pid_task(pid, PIDTYPE_TGID); uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID; - seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n", + seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n", task ? task->comm : "<unknown>", pid_vnr(pid), priv->minor->index, is_current_master ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), uid), - priv->magic); + priv->magic, + DRM_CLIENT_NAME_MAX_LEN, + priv->client_name ? priv->client_name : "<unset>"); rcu_read_unlock(); + mutex_unlock(&priv->client_name_lock); } mutex_unlock(&dev->filelist_mutex); return 0; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ac30b0ec9d93..c2c172eb25df 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -38,7 +38,7 @@ #include <drm/drm_accel.h> #include <drm/drm_cache.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 29c53f9f449c..c9008113111b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -492,8 +492,8 @@ EXPORT_SYMBOL(drm_fb_helper_init); * @fb_helper: driver-allocated fbdev helper * * A helper to alloc fb_info and the member cmap. Called by the driver - * within the fb_probe fb_helper callback function. Drivers do not - * need to release the allocated fb_info structure themselves, this is + * within the struct &drm_driver.fbdev_probe callback function. Drivers do + * not need to release the allocated fb_info structure themselves, this is * automatically done when calling drm_fb_helper_fini(). * * RETURNS: @@ -554,7 +554,7 @@ EXPORT_SYMBOL(drm_fb_helper_release_info); /** * drm_fb_helper_unregister_info - unregister fb_info framebuffer device - * @fb_helper: driver-allocated fbdev helper, can be NULL + * @fb_helper: driver-allocated fbdev helper, must not be NULL * * A wrapper around unregister_framebuffer, to release the fb_info * framebuffer device. This must be called before releasing all resources for @@ -562,8 +562,12 @@ EXPORT_SYMBOL(drm_fb_helper_release_info); */ void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) { - if (fb_helper && fb_helper->info) - unregister_framebuffer(fb_helper->info); + struct fb_info *info = fb_helper->info; + struct device *dev = info->device; + + if (dev_is_pci(dev)) + vga_switcheroo_client_fb_set(to_pci_dev(dev), NULL); + unregister_framebuffer(fb_helper->info); } EXPORT_SYMBOL(drm_fb_helper_unregister_info); @@ -693,6 +697,7 @@ void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u3 } EXPORT_SYMBOL(drm_fb_helper_damage_area); +#ifdef CONFIG_FB_DEFERRED_IO /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer @@ -736,6 +741,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli } } EXPORT_SYMBOL(drm_fb_helper_deferred_io); +#endif /** * drm_fb_helper_set_suspend - wrapper around fb_set_suspend @@ -1441,67 +1447,27 @@ unlock: EXPORT_SYMBOL(drm_fb_helper_pan_display); static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats, - size_t format_count, uint32_t bpp, uint32_t depth) + size_t format_count, unsigned int color_mode) { struct drm_device *dev = fb_helper->dev; uint32_t format; size_t i; - /* - * Do not consider YUV or other complicated formats - * for framebuffers. This means only legacy formats - * are supported (fmt->depth is a legacy field), but - * the framebuffer emulation can only deal with such - * formats, specifically RGB/BGA formats. - */ - format = drm_mode_legacy_fb_format(bpp, depth); - if (!format) - goto err; + format = drm_driver_color_mode_format(dev, color_mode); + if (!format) { + drm_info(dev, "unsupported color mode of %d\n", color_mode); + return DRM_FORMAT_INVALID; + } for (i = 0; i < format_count; ++i) { if (formats[i] == format) return format; } - -err: - /* We found nothing. */ - drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth); + drm_warn(dev, "format %p4cc not supported\n", &format); return DRM_FORMAT_INVALID; } -static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper, - const uint32_t *formats, size_t format_count, - unsigned int color_mode) -{ - struct drm_device *dev = fb_helper->dev; - uint32_t bpp, depth; - - switch (color_mode) { - case 1: - case 2: - case 4: - case 8: - case 16: - case 24: - bpp = depth = color_mode; - break; - case 15: - bpp = 16; - depth = 15; - break; - case 32: - bpp = 32; - depth = 24; - break; - default: - drm_info(dev, "unsupported color mode of %d\n", color_mode); - return DRM_FORMAT_INVALID; - } - - return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth); -} - static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { @@ -1531,10 +1497,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, if (!cmdline_mode->bpp_specified) continue; - surface_format = drm_fb_helper_find_color_mode_format(fb_helper, - plane->format_types, - plane->format_count, - cmdline_mode->bpp); + surface_format = drm_fb_helper_find_format(fb_helper, + plane->format_types, + plane->format_count, + cmdline_mode->bpp); if (surface_format != DRM_FORMAT_INVALID) break; /* found supported format */ } @@ -1544,10 +1510,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, break; /* found supported format */ /* try preferred color mode */ - surface_format = drm_fb_helper_find_color_mode_format(fb_helper, - plane->format_types, - plane->format_count, - fb_helper->preferred_bpp); + surface_format = drm_fb_helper_find_format(fb_helper, + plane->format_types, + plane->format_count, + fb_helper->preferred_bpp); if (surface_format != DRM_FORMAT_INVALID) break; /* found supported format */ } @@ -1648,13 +1614,14 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, /* * Allocates the backing storage and sets up the fbdev info structure through - * the ->fb_probe callback. + * the ->fbdev_probe callback. */ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_surface_size sizes; + struct fb_info *info; int ret; ret = drm_fb_helper_find_sizes(fb_helper, &sizes); @@ -1666,15 +1633,20 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) } /* push down into drivers */ - ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); + if (dev->driver->fbdev_probe) + ret = dev->driver->fbdev_probe(fb_helper, &sizes); + else if (fb_helper->funcs) + ret = fb_helper->funcs->fb_probe(fb_helper, &sizes); if (ret < 0) return ret; strcpy(fb_helper->fb->comm, "[fbcon]"); + info = fb_helper->info; + /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */ - if (dev_is_pci(dev->dev)) - vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info); + if (dev_is_pci(info->device)) + vga_switcheroo_client_fb_set(to_pci_dev(info->device), info); return 0; } @@ -1738,7 +1710,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info, * instance and the drm framebuffer allocated in &drm_fb_helper.fb. * * Drivers should call this (or their equivalent setup code) from their - * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev + * &drm_driver.fbdev_probe callback after having allocated the fbdev * backing storage framebuffer. */ void drm_fb_helper_fill_info(struct fb_info *info, @@ -1894,7 +1866,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) * Note that this also registers the fbdev and so allows userspace to call into * the driver through the fbdev interfaces. * - * This function will call down into the &drm_fb_helper_funcs.fb_probe callback + * This function will call down into the &drm_driver.fbdev_probe callback * to let the driver allocate and initialize the fbdev info structure and the * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided * as a helper to setup simple default values for the fbdev info structure. diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/drm_fbdev_client.c new file mode 100644 index 000000000000..246fb63ab250 --- /dev/null +++ b/drivers/gpu/drm/drm_fbdev_client.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: MIT + +#include <drm/drm_client.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fbdev_client.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> + +/* + * struct drm_client_funcs + */ + +static void drm_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->info) { + drm_fb_helper_unregister_info(fb_helper); + } else { + drm_client_release(&fb_helper->client); + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); + } +} + +static int drm_fbdev_client_restore(struct drm_client_dev *client) +{ + drm_fb_helper_lastclose(client->dev); + + return 0; +} + +static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + ret = drm_fb_helper_init(dev, fb_helper); + if (ret) + goto err_drm_err; + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper); + if (ret) + goto err_drm_fb_helper_fini; + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_drm_err: + drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret); + return ret; +} + +static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (holds_console_lock) + drm_fb_helper_set_suspend(fb_helper, true); + else + drm_fb_helper_set_suspend_unlocked(fb_helper, true); + + return 0; +} + +static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (holds_console_lock) + drm_fb_helper_set_suspend(fb_helper, false); + else + drm_fb_helper_set_suspend_unlocked(fb_helper, false); + + return 0; +} + +static const struct drm_client_funcs drm_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = drm_fbdev_client_unregister, + .restore = drm_fbdev_client_restore, + .hotplug = drm_fbdev_client_hotplug, + .suspend = drm_fbdev_client_suspend, + .resume = drm_fbdev_client_resume, +}; + +/** + * drm_fbdev_client_setup() - Setup fbdev emulation + * @dev: DRM device + * @format: Preferred color format for the device. DRM_FORMAT_XRGB8888 + * is used if this is zero. + * + * This function sets up fbdev emulation. Restore, hotplug events and + * teardown are all taken care of. Drivers that do suspend/resume need + * to call drm_client_dev_suspend() and drm_client_dev_resume() by + * themselves. Simple drivers might use drm_mode_config_helper_suspend(). + * + * This function is safe to call even when there are no connectors present. + * Setup will be retried on the next hotplug event. + * + * The fbdev client is destroyed by drm_dev_unregister(). + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format) +{ + struct drm_fb_helper *fb_helper; + unsigned int color_mode; + int ret; + + /* TODO: Use format info throughout DRM */ + if (format) { + unsigned int bpp = drm_format_info_bpp(format, 0); + + switch (bpp) { + case 16: + color_mode = format->depth; // could also be 15 + break; + default: + color_mode = bpp; + } + } else { + switch (dev->mode_config.preferred_depth) { + case 0: + case 24: + color_mode = 32; + break; + default: + color_mode = dev->mode_config.preferred_depth; + } + } + + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return -ENOMEM; + drm_fb_helper_prepare(dev, fb_helper, color_mode, NULL); + + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + if (ret) { + drm_err(dev, "Failed to register client: %d\n", ret); + goto err_drm_client_init; + } + + drm_client_register(&fb_helper->client); + + return 0; + +err_drm_client_init: + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); + return ret; +} +EXPORT_SYMBOL(drm_fbdev_client_setup); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 51c2d742d199..b14b581c059d 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -2,15 +2,13 @@ #include <linux/fb.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_dma.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> -#include <drm/drm_fbdev_dma.h> - /* * struct fb_ops */ @@ -103,8 +101,35 @@ static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = { * struct drm_fb_helper */ -static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { + .fb_dirty = drm_fbdev_dma_helper_fb_dirty, +}; + +/* + * struct drm_fb_helper + */ + +int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; @@ -148,6 +173,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, goto err_drm_client_buffer_delete; } + fb_helper->funcs = &drm_fbdev_dma_helper_funcs; fb_helper->buffer = buffer; fb_helper->fb = fb; @@ -211,136 +237,4 @@ err_drm_client_buffer_delete: drm_client_framebuffer_delete(buffer); return ret; } - -static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, - struct drm_clip_rect *clip) -{ - struct drm_device *dev = helper->dev; - int ret; - - /* Call damage handlers only if necessary */ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - return ret; - } - - return 0; -} - -static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { - .fb_probe = drm_fbdev_dma_helper_fb_probe, - .fb_dirty = drm_fbdev_dma_helper_fb_dirty, -}; - -/* - * struct drm_client_funcs - */ - -static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int drm_fbdev_dma_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs drm_fbdev_dma_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_dma_client_unregister, - .restore = drm_fbdev_dma_client_restore, - .hotplug = drm_fbdev_dma_client_hotplug, -}; - -/** - * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * 32 is used if this is zero. - * - * This function sets up fbdev emulation for GEM DMA drivers that support - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_dma_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} -EXPORT_SYMBOL(drm_fbdev_dma_setup); +EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c index 0c785007f11b..f824369baacd 100644 --- a/drivers/gpu/drm/drm_fbdev_shmem.c +++ b/drivers/gpu/drm/drm_fbdev_shmem.c @@ -2,15 +2,13 @@ #include <linux/fb.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> -#include <drm/drm_fbdev_shmem.h> - /* * struct fb_ops */ @@ -105,8 +103,35 @@ static struct page *drm_fbdev_shmem_get_page(struct fb_info *info, unsigned long * struct drm_fb_helper */ -static int drm_fbdev_shmem_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = { + .fb_dirty = drm_fbdev_shmem_helper_fb_dirty, +}; + +/* + * struct drm_driver + */ + +int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; @@ -139,6 +164,7 @@ static int drm_fbdev_shmem_helper_fb_probe(struct drm_fb_helper *fb_helper, goto err_drm_client_buffer_delete; } + fb_helper->funcs = &drm_fbdev_shmem_helper_funcs; fb_helper->buffer = buffer; fb_helper->fb = fb; @@ -182,136 +208,4 @@ err_drm_client_buffer_delete: drm_client_framebuffer_delete(buffer); return ret; } - -static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper, - struct drm_clip_rect *clip) -{ - struct drm_device *dev = helper->dev; - int ret; - - /* Call damage handlers only if necessary */ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - return ret; - } - - return 0; -} - -static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = { - .fb_probe = drm_fbdev_shmem_helper_fb_probe, - .fb_dirty = drm_fbdev_shmem_helper_fb_dirty, -}; - -/* - * struct drm_client_funcs - */ - -static void drm_fbdev_shmem_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int drm_fbdev_shmem_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int drm_fbdev_shmem_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "fbdev-shmem: Failed to setup emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs drm_fbdev_shmem_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_shmem_client_unregister, - .restore = drm_fbdev_shmem_client_restore, - .hotplug = drm_fbdev_shmem_client_hotplug, -}; - -/** - * drm_fbdev_shmem_setup() - Setup fbdev emulation for GEM SHMEM helpers - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * 32 is used if this is zero. - * - * This function sets up fbdev emulation for GEM DMA drivers that support - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_shmem_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_shmem_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_shmem_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_shmem_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} -EXPORT_SYMBOL(drm_fbdev_shmem_setup); +EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index 119ffb28aaf9..73d35d59590c 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -65,79 +65,6 @@ static const struct fb_ops drm_fbdev_ttm_fb_ops = { .fb_destroy = drm_fbdev_ttm_fb_destroy, }; -/* - * This function uses the client API to create a framebuffer backed by a dumb buffer. - */ -static int drm_fbdev_ttm_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_client_dev *client = &fb_helper->client; - struct drm_device *dev = fb_helper->dev; - struct drm_client_buffer *buffer; - struct fb_info *info; - size_t screen_size; - void *screen_buffer; - u32 format; - int ret; - - drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", - sizes->surface_width, sizes->surface_height, - sizes->surface_bpp); - - format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, - sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, - sizes->surface_height, format); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); - - fb_helper->buffer = buffer; - fb_helper->fb = buffer->fb; - - screen_size = buffer->gem->size; - screen_buffer = vzalloc(screen_size); - if (!screen_buffer) { - ret = -ENOMEM; - goto err_drm_client_framebuffer_delete; - } - - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_vfree; - } - - drm_fb_helper_fill_info(info, fb_helper, sizes); - - info->fbops = &drm_fbdev_ttm_fb_ops; - - /* screen */ - info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; - info->screen_buffer = screen_buffer; - info->fix.smem_len = screen_size; - - /* deferred I/O */ - fb_helper->fbdefio.delay = HZ / 20; - fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - - info->fbdefio = &fb_helper->fbdefio; - ret = fb_deferred_io_init(info); - if (ret) - goto err_drm_fb_helper_release_info; - - return 0; - -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); -err_vfree: - vfree(screen_buffer); -err_drm_client_framebuffer_delete: - fb_helper->fb = NULL; - fb_helper->buffer = NULL; - drm_client_framebuffer_delete(buffer); - return ret; -} - static void drm_fbdev_ttm_damage_blit_real(struct drm_fb_helper *fb_helper, struct drm_clip_rect *clip, struct iosys_map *dst) @@ -236,115 +163,81 @@ static int drm_fbdev_ttm_helper_fb_dirty(struct drm_fb_helper *helper, } static const struct drm_fb_helper_funcs drm_fbdev_ttm_helper_funcs = { - .fb_probe = drm_fbdev_ttm_helper_fb_probe, .fb_dirty = drm_fbdev_ttm_helper_fb_dirty, }; -static void drm_fbdev_ttm_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int drm_fbdev_ttm_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} +/* + * struct drm_driver + */ -static int drm_fbdev_ttm_client_hotplug(struct drm_client_dev *client) +int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; + struct drm_client_dev *client = &fb_helper->client; + struct drm_device *dev = fb_helper->dev; + struct drm_client_buffer *buffer; + struct fb_info *info; + size_t screen_size; + void *screen_buffer; + u32 format; int ret; - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); + drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); + buffer = drm_client_framebuffer_create(client, sizes->surface_width, + sizes->surface_height, format); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); - return 0; + fb_helper->funcs = &drm_fbdev_ttm_helper_funcs; + fb_helper->buffer = buffer; + fb_helper->fb = buffer->fb; -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret); - return ret; -} + screen_size = buffer->gem->size; + screen_buffer = vzalloc(screen_size); + if (!screen_buffer) { + ret = -ENOMEM; + goto err_drm_client_framebuffer_delete; + } -static const struct drm_client_funcs drm_fbdev_ttm_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_ttm_client_unregister, - .restore = drm_fbdev_ttm_client_restore, - .hotplug = drm_fbdev_ttm_client_hotplug, -}; + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_vfree; + } -/** - * drm_fbdev_ttm_setup() - Setup fbdev emulation for TTM-based drivers - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * - * This function sets up fbdev emulation for TTM-based drivers that support - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_ttm_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * In order to provide fixed mmap-able memory ranges, fbdev emulation - * uses a shadow buffer in system memory. The implementation blits the shadow - * fbdev buffer onto the real buffer in regular intervals. - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; + drm_fb_helper_fill_info(info, fb_helper, sizes); - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); + info->fbops = &drm_fbdev_ttm_fb_ops; - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_ttm_helper_funcs); + /* screen */ + info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; + info->screen_buffer = screen_buffer; + info->fix.smem_len = screen_size; - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_ttm_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } + /* deferred I/O */ + fb_helper->fbdefio.delay = HZ / 20; + fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - drm_client_register(&fb_helper->client); + info->fbdefio = &fb_helper->fbdefio; + ret = fb_deferred_io_init(info); + if (ret) + goto err_drm_fb_helper_release_info; - return; + return 0; -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - return; +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); +err_vfree: + vfree(screen_buffer); +err_drm_client_framebuffer_delete: + fb_helper->fb = NULL; + fb_helper->buffer = NULL; + drm_client_framebuffer_delete(buffer); + return ret; } -EXPORT_SYMBOL(drm_fbdev_ttm_setup); +EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ad1dc638c83b..cb5f22f5bbb6 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -40,7 +40,7 @@ #include <linux/slab.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> @@ -129,7 +129,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) */ struct drm_file *drm_file_alloc(struct drm_minor *minor) { - static atomic64_t ident = ATOMIC_INIT(0); + static atomic64_t ident = ATOMIC64_INIT(0); struct drm_device *dev = minor->dev; struct drm_file *file; int ret; @@ -157,6 +157,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) spin_lock_init(&file->master_lookup_lock); mutex_init(&file->event_read_lock); + mutex_init(&file->client_name_lock); if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_open(dev, file); @@ -258,6 +259,10 @@ void drm_file_free(struct drm_file *file) WARN_ON(!list_empty(&file->event_list)); put_pid(rcu_access_pointer(file->pid)); + + mutex_destroy(&file->client_name_lock); + kfree(file->client_name); + kfree(file); } @@ -950,6 +955,11 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); } + mutex_lock(&file->client_name_lock); + if (file->client_name) + drm_printf(&p, "drm-client-name:\t%s\n", file->client_name); + mutex_unlock(&file->client_name_lock); + if (dev->driver->show_fdinfo) dev->driver->show_fdinfo(&p, file); } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 193cf8ed7912..3a94ca211f9c 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -36,7 +36,6 @@ * @depth: bit depth per pixel * * Computes a drm fourcc pixel format code for the given @bpp/@depth values. - * Useful in fbdev emulation code, since that deals in those values. */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { @@ -140,6 +139,35 @@ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, } EXPORT_SYMBOL(drm_driver_legacy_fb_format); +/** + * drm_driver_color_mode_format - Compute DRM 4CC code from color mode + * @dev: DRM device + * @color_mode: command-line color mode + * + * Computes a DRM 4CC pixel format code for the given color mode using + * drm_driver_color_mode(). The color mode is in the format used and the + * kernel command line. It specifies the number of bits per pixel + * and color depth in a single value. + * + * Useful in fbdev emulation code, since that deals in those values. The + * helper does not consider YUV or other complicated formats. This means + * only legacy formats are supported (fmt->depth is a legacy field), but + * the framebuffer emulation can only deal with such formats, specifically + * RGB/BGA formats. + */ +uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode) +{ + switch (color_mode) { + case 15: + return drm_driver_legacy_fb_format(dev, 16, 15); + case 32: + return drm_driver_legacy_fb_format(dev, 32, 24); + default: + return drm_driver_legacy_fb_format(dev, color_mode, color_mode); + } +} +EXPORT_SYMBOL(drm_driver_color_mode_format); + /* * Internal function to query information for a given format. See * drm_format_info() for the public API. diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 888aadb6a4ac..b781601946db 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -99,6 +99,7 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y, return 0; } +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_check_src_coords); /** * drm_mode_addfb - add an FB to the graphics configuration @@ -838,6 +839,7 @@ void drm_framebuffer_free(struct kref *kref) fb->funcs->destroy(fb); } +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free); /** * drm_framebuffer_init - initialize a framebuffer @@ -868,7 +870,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; - strcpy(fb->comm, current->comm); + strscpy(fb->comm, current->comm); ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 149b8e25da5b..ee811764c3df 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -114,22 +114,32 @@ drm_gem_init(struct drm_device *dev) } /** - * drm_gem_object_init - initialize an allocated shmem-backed GEM object + * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM + * object in a given shmfs mountpoint + * * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size + * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use + * the usual tmpfs mountpoint (`shm_mnt`). * * Initialize an already allocated GEM object of the specified size with * shmfs backing store. */ -int drm_gem_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size) +int drm_gem_object_init_with_mnt(struct drm_device *dev, + struct drm_gem_object *obj, size_t size, + struct vfsmount *gemfs) { struct file *filp; drm_gem_private_object_init(dev, obj, size); - filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (gemfs) + filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, + VM_NORESERVE); + else + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (IS_ERR(filp)) return PTR_ERR(filp); @@ -137,6 +147,22 @@ int drm_gem_object_init(struct drm_device *dev, return 0; } +EXPORT_SYMBOL(drm_gem_object_init_with_mnt); + +/** + * drm_gem_object_init - initialize an allocated shmem-backed GEM object + * @dev: drm_device the object should be initialized for + * @obj: drm_gem_object to initialize + * @size: object size + * + * Initialize an already allocated GEM object of the specified size with + * shmfs backing store. + */ +int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, + size_t size) +{ + return drm_gem_object_init_with_mnt(dev, obj, size, NULL); +} EXPORT_SYMBOL(drm_gem_object_init); /** diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 53c003983ad1..8508060a1a95 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -49,7 +49,8 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { }; static struct drm_gem_shmem_object * -__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, + struct vfsmount *gemfs) { struct drm_gem_shmem_object *shmem; struct drm_gem_object *obj; @@ -76,7 +77,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) drm_gem_private_object_init(dev, obj, size); shmem->map_wc = false; /* dma-buf mappings use always writecombine */ } else { - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); } if (ret) { drm_gem_private_object_fini(obj); @@ -123,11 +124,32 @@ err_free: */ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) { - return __drm_gem_shmem_create(dev, size, false); + return __drm_gem_shmem_create(dev, size, false, NULL); } EXPORT_SYMBOL_GPL(drm_gem_shmem_create); /** + * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a + * given mountpoint + * @dev: DRM device + * @size: Size of the object to allocate + * @gemfs: tmpfs mount where the GEM object will be created + * + * This function creates a shmem GEM object in a given tmpfs mountpoint. + * + * Returns: + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative + * error code on failure. + */ +struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, + size_t size, + struct vfsmount *gemfs) +{ + return __drm_gem_shmem_create(dev, size, false, gemfs); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); + +/** * drm_gem_shmem_free - Free resources associated with a shmem GEM object * @shmem: shmem GEM object to free * @@ -765,7 +787,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, size_t size = PAGE_ALIGN(attach->dmabuf->size); struct drm_gem_shmem_object *shmem; - shmem = __drm_gem_shmem_create(dev, size, true); + shmem = __drm_gem_shmem_create(dev, size, true, NULL); if (IS_ERR(shmem)) return ERR_CAST(shmem); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 6027584406af..22b1fe9c03b8 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -16,7 +16,6 @@ #include <drm/drm_mode.h> #include <drm/drm_plane.h> #include <drm/drm_prime.h> -#include <drm/drm_simple_kms_helper.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_tt.h> @@ -687,50 +686,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); /* - * Helpers for struct drm_simple_display_pipe_funcs - */ - -/** - * drm_gem_vram_simple_display_pipe_prepare_fb() - Implements &struct - * drm_simple_display_pipe_funcs.prepare_fb - * @pipe: a simple display pipe - * @new_state: the plane's new state - * - * During plane updates, this function pins the GEM VRAM - * objects of the plane's new framebuffer to VRAM. Call - * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. - * - * Returns: - * 0 on success, or - * a negative errno code otherwise. - */ -int drm_gem_vram_simple_display_pipe_prepare_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state) -{ - return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); -} -EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); - -/** - * drm_gem_vram_simple_display_pipe_cleanup_fb() - Implements &struct - * drm_simple_display_pipe_funcs.cleanup_fb - * @pipe: a simple display pipe - * @old_state: the plane's old state - * - * During plane updates, this function unpins the GEM VRAM - * objects of the plane's old framebuffer from VRAM. Complements - * drm_gem_vram_simple_display_pipe_prepare_fb(). - */ -void drm_gem_vram_simple_display_pipe_cleanup_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) -{ - drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); -} -EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); - -/* * PRIME helpers */ diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 1705bfc90b1e..b2b6a8e49dda 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -48,6 +48,14 @@ struct drm_prime_file_private; struct drm_printer; struct drm_vblank_crtc; +/* drm_client_event.c */ +#if defined(CONFIG_DRM_CLIENT) +void drm_client_debugfs_init(struct drm_device *dev); +#else +static inline void drm_client_debugfs_init(struct drm_device *dev) +{ } +#endif + /* drm_file.c */ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 51f39912866f..f593dc569d31 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -540,6 +540,55 @@ int drm_version(struct drm_device *dev, void *data, return err; } +/* + * Check if the passed string contains control char or spaces or + * anything that would mess up a formatted output. + */ +static int drm_validate_value_string(const char *value, size_t len) +{ + int i; + + for (i = 0; i < len; i++) { + if (!isascii(value[i]) || !isgraph(value[i])) + return -EINVAL; + } + return 0; +} + +static int drm_set_client_name(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_set_client_name *name = data; + size_t len = name->name_len; + void __user *user_ptr; + char *new_name; + + if (len > DRM_CLIENT_NAME_MAX_LEN) { + return -EINVAL; + } else if (len) { + user_ptr = u64_to_user_ptr(name->name); + + new_name = memdup_user_nul(user_ptr, len); + if (IS_ERR(new_name)) + return PTR_ERR(new_name); + + if (strlen(new_name) != len || + drm_validate_value_string(new_name, len) < 0) { + kfree(new_name); + return -EINVAL; + } + } else { + new_name = NULL; + } + + mutex_lock(&file_priv->client_name_lock); + kfree(file_priv->client_name); + file_priv->client_name = new_name; + mutex_unlock(&file_priv->client_name_lock); + + return 0; +} + static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) { /* ROOT_ONLY is only for CAP_SYS_ADMIN */ @@ -610,6 +659,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_NAME, drm_set_client_name, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER), diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 2bc3973d35a1..5e5c5f84daac 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1521,6 +1521,22 @@ void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx, EXPORT_SYMBOL(mipi_dsi_compression_mode_ext_multi); /** + * mipi_dsi_compression_mode_multi() - enable/disable DSC on the peripheral + * @ctx: Context for multiple DSI transactions + * @enable: Whether to enable or disable the DSC + * + * Enable or disable Display Stream Compression on the peripheral using the + * default Picture Parameter Set and VESA DSC 1.1 algorithm. + */ +void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx, + bool enable) +{ + return mipi_dsi_compression_mode_ext_multi(ctx, enable, + MIPI_DSI_COMPRESSION_DSC, 0); +} +EXPORT_SYMBOL(mipi_dsi_compression_mode_multi); + +/** * mipi_dsi_dcs_nop_multi() - send DCS NOP packet * @ctx: Context for multiple DSI transactions * diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 5ace481c1901..ca254611b382 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -151,7 +151,7 @@ static void show_leaks(struct drm_mm *mm) { } INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, u64, __subtree_last, - START, LAST, static inline, drm_mm_interval_tree) + START, LAST, static inline __maybe_unused, drm_mm_interval_tree) struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) @@ -611,7 +611,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, } EXPORT_SYMBOL(drm_mm_insert_node_in_range); -static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) +static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node) { return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); } diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index df4cc0e8e263..e943205a2394 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -81,6 +81,7 @@ int drm_mode_object_add(struct drm_device *dev, { return __drm_mode_object_add(dev, obj, obj_type, true, NULL); } +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_mode_object_add); void drm_mode_object_register(struct drm_device *dev, struct drm_mode_object *obj) diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index 2c582020cb42..5565464c1734 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -21,7 +21,7 @@ */ #include <drm/drm_atomic_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> @@ -185,7 +185,7 @@ EXPORT_SYMBOL(drm_crtc_init); * Zero on success, negative error code on error. * * See also: - * drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked(). + * drm_kms_helper_poll_disable() and drm_client_dev_suspend(). */ int drm_mode_config_helper_suspend(struct drm_device *dev) { @@ -199,10 +199,11 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (dev->mode_config.poll_enabled) drm_kms_helper_poll_disable(dev); - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + drm_client_dev_suspend(dev, false); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_client_dev_resume(dev, false); + /* * Don't enable polling if it was never initialized */ @@ -230,7 +231,7 @@ EXPORT_SYMBOL(drm_mode_config_helper_suspend); * Zero on success, negative error code on error. * * See also: - * drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable(). + * drm_client_dev_resume() and drm_kms_helper_poll_enable(). */ int drm_mode_config_helper_resume(struct drm_device *dev) { @@ -247,7 +248,8 @@ int drm_mode_config_helper_resume(struct drm_device *dev) DRM_ERROR("Failed to resume (%d)\n", ret); dev->mode_config.suspend_state = NULL; - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_client_dev_resume(dev, false); + /* * Don't enable polling if it is not initialized */ diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 177b600895d3..5c2abc9eca9c 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -341,8 +341,23 @@ static int drm_of_lvds_get_remote_pixels_type( return pixels_type; } +static int __drm_of_lvds_get_dual_link_pixel_order(int p1_pt, int p2_pt) +{ + /* + * A valid dual-lVDS bus is found when one port is marked with + * "dual-lvds-even-pixels", and the other port is marked with + * "dual-lvds-odd-pixels", bail out if the markers are not right. + */ + if (p1_pt + p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return -EINVAL; + + return p1_pt == DRM_OF_LVDS_EVEN ? + DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : + DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; +} + /** - * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order + * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link source pixel order * @port1: First DT port node of the Dual-link LVDS source * @port2: Second DT port node of the Dual-link LVDS source * @@ -387,19 +402,58 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, if (remote_p2_pt < 0) return remote_p2_pt; - /* - * A valid dual-lVDS bus is found when one remote port is marked with - * "dual-lvds-even-pixels", and the other remote port is marked with - * "dual-lvds-odd-pixels", bail out if the markers are not right. - */ - if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return __drm_of_lvds_get_dual_link_pixel_order(remote_p1_pt, remote_p2_pt); +} +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); + +/** + * drm_of_lvds_get_dual_link_pixel_order_sink - Get LVDS dual-link sink pixel order + * @port1: First DT port node of the Dual-link LVDS sink + * @port2: Second DT port node of the Dual-link LVDS sink + * + * An LVDS dual-link connection is made of two links, with even pixels + * transitting on one link, and odd pixels on the other link. This function + * returns, for two ports of an LVDS dual-link sink, which port shall transmit + * the even and odd pixels, based on the requirements of the sink. + * + * The pixel order is determined from the dual-lvds-even-pixels and + * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those + * properties are not present, or if their usage is not valid, this function + * returns -EINVAL. + * + * If either port is not connected, this function returns -EPIPE. + * + * @port1 and @port2 are typically DT sibling nodes, but may have different + * parents when, for instance, two separate LVDS decoders receive the even and + * odd pixels. + * + * Return: + * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 receives even pixels and @port2 + * receives odd pixels + * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 receives odd pixels and @port2 + * receives even pixels + * * -EINVAL - @port1 or @port2 are NULL + * * -EPIPE - when @port1 or @port2 are not connected + */ +int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2) +{ + int sink_p1_pt, sink_p2_pt; + + if (!port1 || !port2) return -EINVAL; - return remote_p1_pt == DRM_OF_LVDS_EVEN ? - DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : - DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; + sink_p1_pt = drm_of_lvds_get_port_pixels_type(port1); + if (!sink_p1_pt) + return -EPIPE; + + sink_p2_pt = drm_of_lvds_get_port_pixels_type(port2); + if (!sink_p2_pt) + return -EPIPE; + + return __drm_of_lvds_get_dual_link_pixel_order(sink_p1_pt, sink_p2_pt); } -EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order_sink); /** * drm_of_lvds_get_data_mapping - Get LVDS data mapping @@ -410,7 +464,9 @@ EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); * Return: * * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18" * * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24" + * * MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA - data-mapping is "jeida-30" * * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24" + * * MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG - data-mapping is "vesa-30" * * -EINVAL - the "data-mapping" property is unsupported * * -ENODEV - the "data-mapping" property is missing */ @@ -427,8 +483,12 @@ int drm_of_lvds_get_data_mapping(const struct device_node *port) return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; if (!strcmp(mapping, "jeida-24")) return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + if (!strcmp(mapping, "jeida-30")) + return MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA; if (!strcmp(mapping, "vesa-24")) return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + if (!strcmp(mapping, "vesa-30")) + return MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG; return -EINVAL; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 2d84d7ea1ab7..4a73821b81f6 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -184,6 +184,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO AYANEO 2 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* AYA NEO 2021 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"), @@ -196,6 +202,18 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_PRODUCT_NAME, "AIR"), }, .driver_data = (void *)&lcd1080x1920_leftside_up, + }, { /* AYA NEO Founder */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO GEEK */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* AYA NEO NEXT */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 74412b7bf936..0a9ecc1380d2 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -209,6 +209,14 @@ static u32 convert_xrgb8888_to_argb2101010(u32 pix) return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); } +static u32 convert_xrgb8888_to_abgr2101010(u32 pix) +{ + pix = ((pix & 0x00FF0000) >> 14) | + ((pix & 0x0000FF00) << 4) | + ((pix & 0x000000FF) << 22); + return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); +} + /* * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format * @color: input color, in xrgb8888 format @@ -242,6 +250,8 @@ static u32 convert_from_xrgb8888(u32 color, u32 format) return convert_xrgb8888_to_xrgb2101010(color); case DRM_FORMAT_ARGB2101010: return convert_xrgb8888_to_argb2101010(color); + case DRM_FORMAT_ABGR2101010: + return convert_xrgb8888_to_abgr2101010(color); default: WARN_ONCE(1, "Can't convert to %p4cc\n", &format); return 0; diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 1ef56cb07dfb..09500cddc009 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -209,12 +209,9 @@ const FORMAT_INFOS_QR_L: [u16; 8] = [ impl Version { /// Returns the smallest QR version than can hold these segments. fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> { - for v in (1..=40).map(|k| Version(k)) { - if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() { - return Some(v); - } - } - None + (1..=40) + .map(Version) + .find(|&v| v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum()) } fn width(&self) -> u8 { @@ -242,7 +239,7 @@ impl Version { } fn alignment_pattern(&self) -> &'static [u8] { - &ALIGNMENT_PATTERNS[self.0 - 1] + ALIGNMENT_PATTERNS[self.0 - 1] } fn poly(&self) -> &'static [u8] { @@ -479,7 +476,7 @@ struct EncodedMsg<'a> { /// Data to be put in the QR code, with correct segment encoding, padding, and /// Error Code Correction. impl EncodedMsg<'_> { - fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> { + fn new<'a>(segments: &[&Segment<'_>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> { let version = Version::from_segments(segments)?; let ec_size = version.ec_size(); let g1_blocks = version.g1_blocks(); @@ -492,7 +489,7 @@ impl EncodedMsg<'_> { data.fill(0); let mut em = EncodedMsg { - data: data, + data, ec_size, g1_blocks, g2_blocks, @@ -722,7 +719,10 @@ impl QrImage<'_> { fn is_finder(&self, x: u8, y: u8) -> bool { let end = self.width - 8; - (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + #[expect(clippy::nonminimal_bool)] + { + (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + } } // Alignment pattern: 5x5 squares in a grid. @@ -979,10 +979,11 @@ pub unsafe extern "C" fn drm_panic_qr_generate( /// * `url_len`: Length of the URL. /// /// * If `url_len` > 0, remove the 2 segments header/length and also count the -/// conversion to numeric segments. +/// conversion to numeric segments. /// * If `url_len` = 0, only removes 3 bytes for 1 binary segment. #[no_mangle] pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize { + #[expect(clippy::manual_range_contains)] if version < 1 || version > 40 { return 0; } diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index 0081190201a7..08cfea04e22b 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -235,6 +235,20 @@ void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf) } EXPORT_SYMBOL(__drm_printfn_err); +void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf) +{ + unsigned int counter = ++p->line.counter; + const char *prefix = p->prefix ?: ""; + const char *pad = p->prefix ? " " : ""; + + if (p->line.series) + drm_printf(p->arg, "%s%s%u.%u: %pV", + prefix, pad, p->line.series, counter, vaf); + else + drm_printf(p->arg, "%s%s%u: %pV", prefix, pad, counter, vaf); +} +EXPORT_SYMBOL(__drm_printfn_line); + /** * drm_puts - print a const string to a &drm_printer stream * @p: the &drm printer diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 92f21764246f..96b266b37ba4 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -33,7 +33,7 @@ #include <linux/moduleparam.h> #include <drm/drm_bridge.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index a031c335bdb9..33a3c98a962d 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -100,15 +100,9 @@ drm_writeback_fence_get_timeline_name(struct dma_fence *fence) return wb_connector->timeline_name; } -static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence) -{ - return true; -} - static const struct dma_fence_ops drm_writeback_fence_ops = { .get_driver_name = drm_writeback_fence_get_driver_name, .get_timeline_name = drm_writeback_fence_get_timeline_name, - .enable_signaling = drm_writeback_fence_enable_signaling, }; static int create_writeback_properties(struct drm_device *dev) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 384df1659be6..b13a17276d07 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -482,7 +482,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, } else { CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | - VIVS_GL_FLUSH_CACHE_COLOR); + VIVS_GL_FLUSH_CACHE_COLOR | + VIVS_GL_FLUSH_CACHE_SHADER_L1); if (has_blt) { CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c index 721d633aece9..7aa5f14d0c87 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c @@ -5,8 +5,6 @@ #include <linux/dma-mapping.h> -#include <drm/drm_mm.h> - #include "etnaviv_cmdbuf.h" #include "etnaviv_gem.h" #include "etnaviv_gpu.h" @@ -55,6 +53,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev) return suballoc; free_suballoc: + mutex_destroy(&suballoc->lock); kfree(suballoc); return ERR_PTR(ret); @@ -79,6 +78,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc) { dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr); + mutex_destroy(&suballoc->lock); kfree(suballoc); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 6500f3999c5f..9b4e2f4b1bc7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -538,6 +538,16 @@ static int etnaviv_bind(struct device *dev) priv->num_gpus = 0; priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + /* + * If the GPU is part of a system with DMA addressing limitations, + * request pages for our SHM backend buffers from the DMA32 zone to + * hopefully avoid performance killing SWIOTLB bounce buffering. + */ + if (dma_addressing_limited(dev)) { + priv->shm_gfp_mask |= GFP_DMA32; + priv->shm_gfp_mask &= ~__GFP_HIGHMEM; + } + priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); if (IS_ERR(priv->cmdbuf_suballoc)) { dev_err(drm->dev, "Failed to create cmdbuf suballocator\n"); @@ -564,6 +574,7 @@ out_unbind: out_destroy_suballoc: etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); out_free_priv: + mutex_destroy(&priv->gem_lock); kfree(priv); out_put: drm_dev_put(drm); @@ -608,7 +619,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!of_device_is_available(core_node)) continue; - drm_of_component_match_add(&pdev->dev, &match, + drm_of_component_match_add(dev, &match, component_compare_of, core_node); } } else { @@ -631,9 +642,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) * bit to make sure we are allocating the command buffers and * TLBs in the lower 4 GiB address space. */ - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { - dev_dbg(&pdev->dev, "No suitable DMA available\n"); + if (dma_set_mask(dev, DMA_BIT_MASK(40)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + dev_dbg(dev, "No suitable DMA available\n"); return -ENODEV; } @@ -644,7 +655,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) */ first_node = etnaviv_of_first_available_node(); if (first_node) { - of_dma_configure(&pdev->dev, first_node, true); + of_dma_configure(dev, first_node, true); of_node_put(first_node); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5c0c9d4e3be1..16473c371444 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -514,6 +514,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) etnaviv_obj->ops->release(etnaviv_obj); drm_gem_object_release(obj); + mutex_destroy(&etnaviv_obj->lock); kfree(etnaviv_obj); } @@ -543,7 +544,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .vm_ops = &vm_ops, }; -static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) { struct etnaviv_gem_object *etnaviv_obj; @@ -570,6 +571,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, if (!etnaviv_obj) return -ENOMEM; + etnaviv_obj->size = ALIGN(size, SZ_4K); etnaviv_obj->flags = flags; etnaviv_obj->ops = ops; @@ -590,15 +592,13 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj = NULL; int ret; - size = PAGE_ALIGN(size); - - ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, &etnaviv_gem_shmem_ops, &obj); if (ret) goto fail; lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size)); if (ret) goto fail; @@ -627,7 +627,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, struct drm_gem_object *obj; int ret; - ret = etnaviv_gem_new_impl(dev, flags, ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); if (ret) return ret; @@ -686,7 +686,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) kfree(etnaviv_obj->sgt); } if (etnaviv_obj->pages) { - int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT; unpin_user_pages(etnaviv_obj->pages, npages); kvfree(etnaviv_obj->pages); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index a42d260cac2c..687555aae807 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -36,6 +36,11 @@ struct etnaviv_gem_object { const struct etnaviv_gem_ops *ops; struct mutex lock; + /* + * The actual size that is visible to the GPU, not necessarily + * PAGE_SIZE aligned, but should be aligned to GPU page size. + */ + u32 size; u32 flags; struct list_head gem_node; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 3524b5811682..6b98200068e4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -17,7 +17,7 @@ static struct lock_class_key etnaviv_prime_lock_class; struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - int npages = obj->size >> PAGE_SHIFT; + unsigned int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ return ERR_PTR(-EINVAL); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 3d0f8d182506..3c0a5c3e0e3d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -6,7 +6,6 @@ #include <drm/drm_file.h> #include <linux/dma-fence-array.h> #include <linux/file.h> -#include <linux/pm_runtime.h> #include <linux/dma-resv.h> #include <linux/sync_file.h> #include <linux/uaccess.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 7c7f97793ddd..c7d59c06ccd1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -574,8 +574,8 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) continue; } - /* disable debug registers, as they are not normally needed */ - control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; + /* enable debug register access */ + control &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); failed = false; @@ -839,17 +839,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (ret) goto fail; - /* - * If the GPU is part of a system with DMA addressing limitations, - * request pages for our SHM backend buffers from the DMA32 zone to - * hopefully avoid performance killing SWIOTLB bounce buffering. - */ - if (dma_addressing_limited(gpu->dev)) - priv->shm_gfp_mask |= GFP_DMA32; - /* Create buffer: */ - ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, - PAGE_SIZE); + ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, SZ_4K); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); goto fail; @@ -1330,17 +1321,16 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, { u32 val; + mutex_lock(&gpu->lock); + /* disable clock gating */ val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); - /* enable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); + + mutex_unlock(&gpu->lock); } static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, @@ -1350,23 +1340,22 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, unsigned int i; u32 val; + mutex_lock(&gpu->lock); + sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); + /* enable clock gating */ + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); + val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); + + mutex_unlock(&gpu->lock); + for (i = 0; i < submit->nr_pmrs; i++) { const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; *pmr->bo_vma = pmr->sequence; } - - /* disable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - - /* enable clock gating */ - val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); - val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); } @@ -1862,7 +1851,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (!gpu) return -ENOMEM; - gpu->dev = &pdev->dev; + gpu->dev = dev; mutex_init(&gpu->lock); mutex_init(&gpu->sched_lock); @@ -1876,8 +1865,8 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (gpu->irq < 0) return gpu->irq; - err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, - dev_name(gpu->dev), gpu); + err = devm_request_irq(dev, gpu->irq, irq_handler, 0, + dev_name(dev), gpu); if (err) { dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); return err; @@ -1914,13 +1903,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) * autosuspend delay is rather arbitary: no measurements have * yet been performed to determine an appropriate value. */ - pm_runtime_use_autosuspend(gpu->dev); - pm_runtime_set_autosuspend_delay(gpu->dev, 200); - pm_runtime_enable(gpu->dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 200); + pm_runtime_enable(dev); - err = component_add(&pdev->dev, &gpu_ops); + err = component_add(dev, &gpu_ops); if (err < 0) { - dev_err(&pdev->dev, "failed to register component: %d\n", err); + dev_err(dev, "failed to register component: %d\n", err); return err; } @@ -1929,8 +1918,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) static void etnaviv_gpu_platform_remove(struct platform_device *pdev) { + struct etnaviv_gpu *gpu = dev_get_drvdata(&pdev->dev); + component_del(&pdev->dev, &gpu_ops); pm_runtime_disable(&pdev->dev); + + mutex_destroy(&gpu->lock); + mutex_destroy(&gpu->sched_lock); } static int etnaviv_gpu_rpm_suspend(struct device *dev) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 31322195b9e4..4d8a7d48ade3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -144,6 +144,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_primid; u32 hangcheck_fence; void __iomem *mmio; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 1661d589bf3e..7e065b3723cf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, + u32 iova, unsigned int va_len, struct sg_table *sgt, int prot) -{ struct scatterlist *sg; +{ + struct scatterlist *sg; unsigned int da = iova; unsigned int i; int ret; @@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, for_each_sgtable_dma_sg(sgt, sg, i) { phys_addr_t pa = sg_dma_address(sg) - sg->offset; - size_t bytes = sg_dma_len(sg) + sg->offset; + unsigned int da_len = sg_dma_len(sg) + sg->offset; + unsigned int bytes = min_t(unsigned int, da_len, va_len); - VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; + va_len -= bytes; da += bytes; } @@ -104,21 +108,7 @@ fail: static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sgtable_dma_sg(sgt, sg, i) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - - da += bytes; - } + etnaviv_context_unmap(context, iova, len); context->flush_seq++; } @@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, lockdep_assert_held(&context->lock); etnaviv_iommu_unmap(context, mapping->vram_node.start, - etnaviv_obj->sgt, etnaviv_obj->base.size); + etnaviv_obj->sgt, etnaviv_obj->size); drm_mm_remove_node(&mapping->vram_node); } @@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, node = &mapping->vram_node; if (va) - ret = etnaviv_iommu_insert_exact(context, node, - etnaviv_obj->base.size, va); + ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else - ret = etnaviv_iommu_find_iova(context, node, - etnaviv_obj->base.size); + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, + ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { @@ -358,7 +346,7 @@ static void etnaviv_iommu_context_free(struct kref *kref) container_of(kref, struct etnaviv_iommu_context, refcount); etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); - + mutex_destroy(&context->lock); context->global->ops->free(context); } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index c01a147f0dfd..7f8ac0178547 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -61,7 +61,6 @@ struct etnaviv_iommu_global { /* P(age) T(able) A(rray) */ u64 *pta_cpu; dma_addr_t pta_dma; - struct spinlock pta_lock; DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES); } v2; }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index dc9dea664a28..d53a5c293373 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -62,6 +62,8 @@ static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += perf_reg_read(gpu, domain, signal); @@ -81,6 +83,8 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += gpu_read(gpu, signal->data); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index ab9ca4824b62..5b67eda122db 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -11,6 +11,7 @@ #include "etnaviv_gpu.h" #include "etnaviv_sched.h" #include "state.xml.h" +#include "state_hi.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -35,7 +36,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; - u32 dma_addr; + u32 dma_addr, primid = 0; int change; /* @@ -52,10 +53,22 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; + if (submit->exec_state == ETNA_PIPE_3D) { + /* guard against concurrent usage from perfmon_sample */ + mutex_lock(&gpu->lock); + gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, + VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM << + VIVS_MC_PROFILE_CONFIG0_FE__SHIFT); + primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); + mutex_unlock(&gpu->lock); + } if (gpu->state == ETNA_GPU_STATE_RUNNING && (gpu->completed_fence != gpu->hangcheck_fence || - change < 0 || change > 16)) { + change < 0 || change > 16 || + (submit->exec_state == ETNA_PIPE_3D && + gpu->hangcheck_primid != primid))) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_primid = primid; gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } @@ -72,7 +85,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job drm_sched_resubmit_jobs(&gpu->sched); - drm_sched_start(&gpu->sched); + drm_sched_start(&gpu->sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 829bc528e618..f7bc5f6e20ff 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state.xml ( 29355 bytes, from 2024-01-19 10:18:54) -- common.xml ( 35664 bytes, from 2023-12-06 10:55:32) -- common_3d.xml ( 15069 bytes, from 2023-11-22 10:05:24) -- state_hi.xml ( 35854 bytes, from 2023-12-11 15:50:17) -- copyright.xml ( 1597 bytes, from 2016-11-10 13:58:32) -- state_2d.xml ( 52271 bytes, from 2023-06-02 12:35:03) -- state_3d.xml ( 89522 bytes, from 2024-01-19 10:18:54) -- state_blt.xml ( 14592 bytes, from 2023-11-22 10:05:09) -- state_vg.xml ( 5975 bytes, from 2016-11-10 13:58:32) - -Copyright (C) 2012-2023 by the following authors: +- state.xml ( 30729 bytes, from 2024-06-21 11:31:54) +- common.xml ( 35664 bytes, from 2023-12-13 09:33:18) +- common_3d.xml ( 15069 bytes, from 2023-12-13 09:33:18) +- state_hi.xml ( 35909 bytes, from 2024-06-21 11:31:54) +- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03) +- state_2d.xml ( 52271 bytes, from 2023-05-30 20:50:02) +- state_3d.xml ( 89626 bytes, from 2024-06-21 11:32:57) +- state_blt.xml ( 14592 bytes, from 2023-12-13 09:33:18) +- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03) + +Copyright (C) 2012-2024 by the following authors: - Wladimir J. van der Laan <laanwj@gmail.com> - Christian Gmeiner <christian.gmeiner@gmail.com> - Lucas Stach <l.stach@pengutronix.de> @@ -467,6 +467,7 @@ DEALINGS IN THE SOFTWARE. #define VIVS_MC_PROFILE_CONFIG0 0x00000470 #define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff #define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM 0x00000009 #define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a #define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b #define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 733b109a5095..0d13828e7d9e 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -4,6 +4,7 @@ config DRM_EXYNOS depends on OF && DRM && COMMON_CLK depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on MMU + select DRM_CLIENT_SELECTION select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP select DRM_KMS_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 0d185c0564b9..c65364087fac 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,6 +37,24 @@ #define WINDOWS_NR 2 +struct decon_data { + unsigned int vidw_buf_start_base; + unsigned int shadowcon_win_protect_shift; + unsigned int wincon_burstlen_shift; +}; + +static struct decon_data exynos7_decon_data = { + .vidw_buf_start_base = 0x80, + .shadowcon_win_protect_shift = 10, + .wincon_burstlen_shift = 11, +}; + +static struct decon_data exynos7870_decon_data = { + .vidw_buf_start_base = 0x880, + .shadowcon_win_protect_shift = 8, + .wincon_burstlen_shift = 10, +}; + struct decon_context { struct device *dev; struct drm_device *drm_dev; @@ -55,11 +73,19 @@ struct decon_context { wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; + const struct decon_data *data; struct drm_encoder *encoder; }; static const struct of_device_id decon_driver_dt_match[] = { - {.compatible = "samsung,exynos7-decon"}, + { + .compatible = "samsung,exynos7-decon", + .data = &exynos7_decon_data, + }, + { + .compatible = "samsung,exynos7870-decon", + .data = &exynos7870_decon_data, + }, {}, }; MODULE_DEVICE_TABLE(of, decon_driver_dt_match); @@ -81,10 +107,31 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { DRM_PLANE_TYPE_CURSOR, }; -static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) +/** + * decon_shadow_protect_win() - disable updating values from shadow registers at vsync + * + * @ctx: display and enhancement controller context + * @win: window to protect registers for + * @protect: 1 to protect (disable updates) + */ +static void decon_shadow_protect_win(struct decon_context *ctx, + unsigned int win, bool protect) { - struct decon_context *ctx = crtc->ctx; + u32 bits, val; + unsigned int shift = ctx->data->shadowcon_win_protect_shift; + + bits = SHADOWCON_WINx_PROTECT(shift, win); + + val = readl(ctx->regs + SHADOWCON); + if (protect) + val |= bits; + else + val &= ~bits; + writel(val, ctx->regs + SHADOWCON); +} +static void decon_wait_for_vblank(struct decon_context *ctx) +{ if (ctx->suspended) return; @@ -100,25 +147,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n"); } -static void decon_clear_channels(struct exynos_drm_crtc *crtc) +static void decon_clear_channels(struct decon_context *ctx) { - struct decon_context *ctx = crtc->ctx; unsigned int win, ch_enabled = 0; + u32 val; /* Check if any channel is enabled. */ for (win = 0; win < WINDOWS_NR; win++) { - u32 val = readl(ctx->regs + WINCON(win)); + val = readl(ctx->regs + WINCON(win)); if (val & WINCONx_ENWIN) { + decon_shadow_protect_win(ctx, win, true); + val &= ~WINCONx_ENWIN; writel(val, ctx->regs + WINCON(win)); ch_enabled = 1; + + decon_shadow_protect_win(ctx, win, false); } } + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); + /* Wait for vsync, as disable channel takes effect at next vsync */ if (ch_enabled) - decon_wait_for_vblank(ctx->crtc); + decon_wait_for_vblank(ctx); } static int decon_ctx_initialize(struct decon_context *ctx, @@ -126,7 +181,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, { ctx->drm_dev = drm_dev; - decon_clear_channels(ctx->crtc); + decon_clear_channels(ctx); return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } @@ -140,7 +195,7 @@ static void decon_ctx_remove(struct decon_context *ctx) static u32 decon_calc_clkdiv(struct decon_context *ctx, const struct drm_display_mode *mode) { - unsigned long ideal_clk = mode->clock; + unsigned long ideal_clk = mode->clock * 1000; u32 clkdiv; /* Find the clock divider value that gets us closest to ideal_clk */ @@ -263,6 +318,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, { unsigned long val; int padding; + unsigned int shift = ctx->data->wincon_burstlen_shift; val = readl(ctx->regs + WINCON(win)); val &= ~WINCONx_BPPMODE_MASK; @@ -270,44 +326,44 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, switch (fb->format->format) { case DRM_FORMAT_RGB565: val |= WINCONx_BPPMODE_16BPP_565; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_XRGB8888: val |= WINCONx_BPPMODE_24BPP_xRGB; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_XBGR8888: val |= WINCONx_BPPMODE_24BPP_xBGR; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_RGBX8888: val |= WINCONx_BPPMODE_24BPP_RGBx; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_BGRX8888: val |= WINCONx_BPPMODE_24BPP_BGRx; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_ARGB8888: val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_ABGR8888: val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_RGBA8888: val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_BGRA8888: default: val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; } @@ -323,8 +379,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width; if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { - val &= ~WINCONx_BURSTLEN_MASK; - val |= WINCONx_BURSTLEN_8WORD; + val &= ~WINCONx_BURSTLEN_MASK(shift); + val |= WINCONx_BURSTLEN_8WORD(shift); } writel(val, ctx->regs + WINCON(win)); @@ -343,28 +399,6 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); } -/** - * decon_shadow_protect_win() - disable updating values from shadow registers at vsync - * - * @ctx: display and enhancement controller context - * @win: window to protect registers for - * @protect: 1 to protect (disable updates) - */ -static void decon_shadow_protect_win(struct decon_context *ctx, - unsigned int win, bool protect) -{ - u32 bits, val; - - bits = SHADOWCON_WINx_PROTECT(win); - - val = readl(ctx->regs + SHADOWCON); - if (protect) - val |= bits; - else - val &= ~bits; - writel(val, ctx->regs + SHADOWCON); -} - static void decon_atomic_begin(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -391,6 +425,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, unsigned int win = plane->index; unsigned int cpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; + unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base; if (ctx->suspended) return; @@ -407,7 +442,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, /* buffer start address */ val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0); - writel(val, ctx->regs + VIDW_BUF_START(win)); + writel(val, ctx->regs + VIDW_BUF_START(vidw_addr0_base, win)); padding = (pitch / cpp) - fb->width; @@ -689,6 +724,7 @@ static int decon_probe(struct platform_device *pdev) ctx->dev = dev; ctx->suspended = true; + ctx->data = of_device_get_match_data(dev); i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); if (i80_if_timings) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 0ed4f2b8595a..1815374c38df 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -19,9 +19,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, enum exynos_drm_output_type out_type, const struct exynos_drm_crtc_ops *ops, void *context); -void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); -void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, - struct exynos_drm_plane *exynos_plane); /* This function gets crtc device matched with out_type. */ struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 7c59e1164a48..2a466d8179f4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> @@ -111,6 +112,7 @@ static const struct drm_driver exynos_drm_driver = { .dumb_create = exynos_drm_gem_dumb_create, .gem_prime_import = exynos_drm_gem_prime_import, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, + EXYNOS_DRM_FBDEV_DRIVER_OPS, .ioctls = exynos_ioctls, .num_ioctls = ARRAY_SIZE(exynos_ioctls), .fops = &exynos_drm_driver_fops, @@ -288,7 +290,7 @@ static int exynos_drm_bind(struct device *dev) if (ret < 0) goto err_cleanup_poll; - exynos_drm_fbdev_setup(drm); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index a379c8ca435a..9526a25e90ac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -23,7 +23,6 @@ #include "exynos_drm_fbdev.h" #define MAX_CONNECTOR 4 -#define PREFERRED_BPP 32 static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { @@ -87,8 +86,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, return 0; } -static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static const struct drm_fb_helper_funcs exynos_drm_fbdev_helper_funcs = { +}; + +int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct exynos_drm_gem *exynos_gem; struct drm_device *dev = helper->dev; @@ -120,6 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, ret = PTR_ERR(helper->fb); goto err_destroy_gem; } + helper->funcs = &exynos_drm_fbdev_helper_funcs; ret = exynos_drm_fbdev_update(helper, sizes, exynos_gem); if (ret < 0) @@ -134,93 +137,3 @@ err_destroy_gem: exynos_drm_gem_destroy(exynos_gem); return ret; } - -static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { - .fb_probe = exynos_drm_fbdev_create, -}; - -/* - * struct drm_client - */ - -static void exynos_drm_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int exynos_drm_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int exynos_drm_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs exynos_drm_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = exynos_drm_fbdev_client_unregister, - .restore = exynos_drm_fbdev_client_restore, - .hotplug = exynos_drm_fbdev_client_hotplug, -}; - -void exynos_drm_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &exynos_drm_fbdev_client_funcs); - if (ret) - goto err_drm_client_init; - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h index 1e1dea627cd9..02a9201abea3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h @@ -11,12 +11,17 @@ #ifndef _EXYNOS_DRM_FBDEV_H_ #define _EXYNOS_DRM_FBDEV_H_ -#ifdef CONFIG_DRM_FBDEV_EMULATION -void exynos_drm_fbdev_setup(struct drm_device *dev); +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + +#if defined(CONFIG_DRM_FBDEV_EMULATION) +int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes); +#define EXYNOS_DRM_FBDEV_DRIVER_OPS \ + .fbdev_probe = exynos_drm_fbdev_driver_fbdev_probe #else -static inline void exynos_drm_fbdev_setup(struct drm_device *dev) -{ -} +#define EXYNOS_DRM_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 59fa22050717..1ae90ef1fc23 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1286,7 +1286,7 @@ static int gsc_probe(struct platform_device *pdev) return ret; } - /* context initailization */ + /* context initialization */ ctx->id = pdev->id; platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 1e26cd4f8347..c9d4b9146df9 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -883,27 +883,32 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static int hdmi_get_modes(struct drm_connector *connector) { struct hdmi_context *hdata = connector_to_hdmi(connector); - struct edid *edid; + const struct drm_display_info *info = &connector->display_info; + const struct drm_edid *drm_edid; int ret; if (!hdata->ddc_adpt) goto no_edid; - edid = drm_get_edid(connector, hdata->ddc_adpt); - if (!edid) + drm_edid = drm_edid_read_ddc(connector, hdata->ddc_adpt); + + ret = drm_edid_connector_update(connector, drm_edid); + if (ret) + return 0; + + cec_notifier_set_phys_addr(hdata->notifier, info->source_physical_address); + + if (!drm_edid) goto no_edid; - hdata->dvi_mode = !connector->display_info.is_hdmi; + hdata->dvi_mode = !info->is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), - edid->width_cm, edid->height_cm); - - drm_connector_update_edid_property(connector, edid); - cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid); + info->width_mm / 10, info->height_mm / 10); - ret = drm_add_edid_modes(connector, edid); + ret = drm_edid_connector_add_modes(connector); - kfree(edid); + drm_edid_free(drm_edid); return ret; diff --git a/drivers/gpu/drm/exynos/regs-decon7.h b/drivers/gpu/drm/exynos/regs-decon7.h index 5bc5f1db5196..216c106dac8f 100644 --- a/drivers/gpu/drm/exynos/regs-decon7.h +++ b/drivers/gpu/drm/exynos/regs-decon7.h @@ -48,7 +48,7 @@ /* SHADOWCON */ #define SHADOWCON 0x30 -#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win))) +#define SHADOWCON_WINx_PROTECT(_shf, _win) (1 << ((_shf) + (_win))) /* WINCONx */ #define WINCON(_win) (0x50 + ((_win) * 4)) @@ -58,10 +58,9 @@ #define WINCONx_BUFSEL_SHIFT 28 #define WINCONx_TRIPLE_BUF_MODE (0x1 << 18) #define WINCONx_DOUBLE_BUF_MODE (0x0 << 18) -#define WINCONx_BURSTLEN_16WORD (0x0 << 11) -#define WINCONx_BURSTLEN_8WORD (0x1 << 11) -#define WINCONx_BURSTLEN_MASK (0x1 << 11) -#define WINCONx_BURSTLEN_SHIFT 11 +#define WINCONx_BURSTLEN_16WORD(_shf) (0x0 << (_shf)) +#define WINCONx_BURSTLEN_8WORD(_shf) (0x1 << (_shf)) +#define WINCONx_BURSTLEN_MASK(_shf) (0x1 << (_shf)) #define WINCONx_BLD_PLANE (0 << 8) #define WINCONx_BLD_PIX (1 << 8) #define WINCONx_ALPHA_MUL (1 << 7) @@ -89,9 +88,9 @@ #define VIDOSD_H(_x) (0x80 + ((_x) * 4)) /* Frame buffer start addresses: VIDWxxADD0n */ -#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10)) -#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10)) -#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10)) +#define VIDW_BUF_START(_base, _win) ((_base) + ((_win) * 0x10)) +#define VIDW_BUF_START1(_base, _win) ((_base) + ((_win) * 0x10)) +#define VIDW_BUF_START2(_base, _win) ((_base) + ((_win) * 0x10)) #define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8)) #define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8)) diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index 5ca71ef87325..0e0f910ceb9f 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -3,11 +3,13 @@ config DRM_FSL_DCU tristate "DRM Support for Freescale DCU" depends on DRM && OF && ARM && COMMON_CLK select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS + select MFD_SYSCON if SOC_LS1021A help Choose this option if you have an Freescale DCU chipset. If M is selected the module will be called fsl-dcu-drm. diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index ab6c0c6cd0e2..91a48d774cf7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -19,6 +19,7 @@ #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -100,12 +101,25 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev) static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + struct regmap *scfg; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); - if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); - return ret; + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to initialize mode setting\n"); + + scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg"); + if (PTR_ERR(scfg) != -ENODEV) { + /* + * For simplicity, enable the PIXCLK unconditionally, + * resulting in increased power consumption. Disabling + * the clock in PM or on unload could be implemented as + * a future improvement. + */ + ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN, + SCFG_PIXCLKCR_PXCEN); + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n"); } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); @@ -156,6 +170,7 @@ static const struct drm_driver fsl_dcu_drm_driver = { .load = fsl_dcu_load, .unload = fsl_dcu_unload, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fsl_dcu_drm_fops, .name = "fsl-dcu-drm", .desc = "Freescale DCU DRM", @@ -272,10 +287,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) } fsl_dev->irq = platform_get_irq(pdev, 0); - if (fsl_dev->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (fsl_dev->irq < 0) return fsl_dev->irq; - } fsl_dev->regmap = devm_regmap_init_mmio(dev, base, &fsl_dcu_regmap_config); @@ -333,7 +346,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) if (ret < 0) goto put; - drm_fbdev_dma_setup(drm, legacyfb_depth); + drm_client_setup_with_color_mode(drm, legacyfb_depth); return 0; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index e2049a0e8a92..566396013c04 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -160,6 +160,9 @@ #define FSL_DCU_ARGB4444 12 #define FSL_DCU_YUV422 14 +#define SCFG_PIXCLKCR 0x28 +#define SCFG_PIXCLKCR_PXCEN BIT(31) + #define VF610_LAYER_REG_NUM 9 #define LS1021A_LAYER_REG_NUM 10 diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c index 9eb5abaf7d66..49bbd00c77ae 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c @@ -29,7 +29,7 @@ void fsl_tcon_bypass_enable(struct fsl_tcon *tcon) FSL_TCON_CTRL1_TCON_BYPASS); } -static struct regmap_config fsl_tcon_regmap_config = { +static const struct regmap_config fsl_tcon_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 23b7c14de5e2..aa2ea128aa2f 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -2,6 +2,7 @@ config DRM_GMA500 tristate "Intel GMA500/600/3600/3650 KMS Framebuffer" depends on DRM && PCI && X86 && MMU && HAS_IOPORT + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION select I2C diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 98b44974d42d..8edefea2ef59 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -143,12 +143,15 @@ static const struct fb_ops psb_fbdev_fb_ops = { .fb_destroy = psb_fbdev_fb_destroy, }; +static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = { +}; + /* - * struct drm_fb_helper_funcs + * struct drm_driver */ -static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fb_helper->dev; struct drm_psb_private *dev_priv = to_drm_psb_private(dev); @@ -206,6 +209,7 @@ static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper, goto err_drm_gem_object_put; } + fb_helper->funcs = &psb_fbdev_fb_helper_funcs; fb_helper->fb = fb; info = drm_fb_helper_alloc_info(fb_helper); @@ -246,93 +250,3 @@ err_drm_gem_object_put: drm_gem_object_put(obj); return ret; } - -static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = { - .fb_probe = psb_fbdev_fb_probe, -}; - -/* - * struct drm_client_funcs and setup code - */ - -static void psb_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_fb_helper_unprepare(fb_helper); - drm_client_release(&fb_helper->client); - kfree(fb_helper); - } -} - -static int psb_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int psb_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs psb_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = psb_fbdev_client_unregister, - .restore = psb_fbdev_client_restore, - .hotplug = psb_fbdev_client_hotplug, -}; - -void psb_fbdev_setup(struct drm_psb_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->dev; - struct drm_fb_helper *fb_helper; - int ret; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_fb_helper_unprepare; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_fb_helper_unprepare: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index d67c2b3ad901..c419ebbc49ec 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -20,6 +20,7 @@ #include <acpi/video.h> #include <drm/drm.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -475,7 +476,7 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - psb_fbdev_setup(dev_priv); + drm_client_setup(dev, NULL); return 0; } @@ -507,6 +508,7 @@ static const struct drm_driver driver = { .num_ioctls = ARRAY_SIZE(psb_ioctls), .dumb_create = psb_gem_dumb_create, + PSB_FBDEV_DRIVER_OPS, .ioctls = psb_ioctls, .fops = &psb_gem_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index bddf89b82fec..de62cbfcdc72 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -184,6 +184,9 @@ #define KSEL_BYPASS_25 6 #define KSEL_BYPASS_83_100 7 +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -597,10 +600,13 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, /* fbdev */ #if defined(CONFIG_DRM_FBDEV_EMULATION) -void psb_fbdev_setup(struct drm_psb_private *dev_priv); +int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +#define PSB_FBDEV_DRIVER_OPS \ + .fbdev_probe = psb_fbdev_driver_fbdev_probe #else -static inline void psb_fbdev_setup(struct drm_psb_private *dev_priv) -{ } +#define PSB_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif /* backlight.c */ diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig index 9c1e61f9eec3..b4d2136942f0 100644 --- a/drivers/gpu/drm/gud/Kconfig +++ b/drivers/gpu/drm/gud/Kconfig @@ -4,6 +4,7 @@ config DRM_GUD tristate "GUD USB Display" depends on DRM && USB && MMU select LZ4_COMPRESS + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index ac6bbf920c72..09ccdc1dc1a2 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> @@ -376,6 +377,7 @@ static const struct drm_driver gud_drm_driver = { .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gud_gem_prime_import, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = "gud", .desc = "Generic USB Display", @@ -622,7 +624,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) drm_kms_helper_poll_init(drm); - drm_fbdev_shmem_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 126504318a4f..80253d39664a 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -3,6 +3,7 @@ config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" depends on DRM && PCI && (ARM64 || COMPILE_TEST) depends on MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 9f9b19ea0587..8c488c98ac97 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -11,11 +11,12 @@ * Jianhua Li <lijianhua@huawei.com> */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -63,6 +64,7 @@ static const struct drm_driver hibmc_driver = { .debugfs_init = drm_vram_mm_debugfs_init, .dumb_create = hibmc_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, + DRM_FBDEV_TTM_DRIVER_OPS, }; static int __maybe_unused hibmc_pm_suspend(struct device *dev) @@ -306,7 +308,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hibmc_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, hibmc_driver.name); if (ret) return ret; @@ -339,7 +341,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, goto err_unload; } - drm_fbdev_ttm_setup(dev, 32); + drm_client_setup(dev, NULL); return 0; diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig index 0772f79567ef..43e8a4fd2d11 100644 --- a/drivers/gpu/drm/hisilicon/kirin/Kconfig +++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig @@ -2,6 +2,7 @@ config DRM_HISI_KIRIN tristate "DRM Support for Hisilicon Kirin series SoCs Platform" depends on DRM && OF && (ARM64 || COMPILE_TEST) + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 871f79a6b17e..5616c3917c03 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -25,6 +25,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> +#include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> @@ -925,6 +926,7 @@ static const struct drm_driver ade_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ade_fops, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "kirin", .desc = "Hisilicon Kirin620 SoC DRM Driver", .date = "20150718", diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 12666985686b..86a3a1faff49 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -18,8 +18,8 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> -#include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -237,7 +237,7 @@ static int kirin_drm_bind(struct device *dev) if (ret) goto err_kms_cleanup; - drm_fbdev_dma_setup(drm_dev, 32); + drm_client_setup(drm_dev, NULL); return 0; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index ff93e08d5036..e0953777a206 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -3,13 +3,14 @@ * Copyright 2021 Microsoft */ +#include <linux/aperture.h> #include <linux/efi.h> #include <linux/hyperv.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_gem_shmem_helper.h> @@ -36,6 +37,7 @@ static struct drm_driver hyperv_driver = { .fops = &hv_fops, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; static int hyperv_pci_probe(struct pci_dev *pdev, @@ -124,7 +126,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, goto err_hv_set_drv_data; } - drm_aperture_remove_framebuffers(&hyperv_driver); + aperture_remove_all_conflicting_devices(hyperv_driver.name); ret = hyperv_setup_vram(hv, hdev); if (ret) @@ -149,7 +151,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, goto err_free_mmio; } - drm_fbdev_shmem_setup(dev, 0); + drm_client_setup(dev, NULL); return 0; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 14ac351fd76d..5e939004b646 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -10,7 +10,9 @@ config DRM_I915 # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c63fa2133ccb..31710d98cad5 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -51,7 +51,8 @@ i915-y += \ i915-y += \ soc/intel_dram.o \ soc/intel_gmch.o \ - soc/intel_pch.o + soc/intel_pch.o \ + soc/intel_rom.o # core library code i915-y += \ @@ -225,6 +226,7 @@ i915-y += \ display/intel_atomic_plane.o \ display/intel_audio.o \ display/intel_bios.o \ + display/intel_bo.o \ display/intel_bw.o \ display/intel_cdclk.o \ display/intel_color.o \ @@ -242,6 +244,7 @@ i915-y += \ display/intel_display_power_well.o \ display/intel_display_reset.o \ display/intel_display_rps.o \ + display/intel_display_snapshot.o \ display/intel_display_wa.o \ display/intel_dmc.o \ display/intel_dmc_wl.o \ @@ -325,6 +328,7 @@ i915-y += \ display/intel_dp_hdcp.o \ display/intel_dp_link_training.o \ display/intel_dp_mst.o \ + display/intel_dp_test.o \ display/intel_dsi.o \ display/intel_dsi_dcs_backlight.o \ display/intel_dsi_vbt.o \ @@ -335,6 +339,7 @@ i915-y += \ display/intel_lspcon.o \ display/intel_lvds.o \ display/intel_panel.o \ + display/intel_pfit.o \ display/intel_pps.o \ display/intel_qp_tables.o \ display/intel_sdvo.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 526c8c4d7b53..4fbec065d53e 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -19,6 +19,7 @@ #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_link_training.h" +#include "intel_dp_test.h" #include "intel_dpio_phy.h" #include "intel_encoder.h" #include "intel_fifo_underrun.h" @@ -169,13 +170,12 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) { struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN; - I915_STATE_WARN(dev_priv, cur_state != state, - "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", - dig_port->base.base.base.id, dig_port->base.base.name, - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", + dig_port->base.base.base.id, dig_port->base.base.name, + str_on_off(state), str_on_off(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) @@ -184,9 +184,9 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) struct intel_display *display = &dev_priv->display; bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE; - I915_STATE_WARN(dev_priv, cur_state != state, - "eDP PLL state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "eDP PLL state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) @@ -477,12 +477,8 @@ intel_dp_link_down(struct intel_encoder *encoder, msleep(intel_dp->pps.panel_power_down_delay); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_wakeref_t wakeref; - - with_intel_pps_lock(intel_dp, wakeref) - intel_dp->pps.active_pipe = INVALID_PIPE; - } + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_pps_port_disable(encoder, old_crtc_state); } static void g4x_dp_audio_enable(struct intel_encoder *encoder, @@ -694,7 +690,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, with_intel_pps_lock(intel_dp, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_pps_init(encoder, pipe_config); + vlv_pps_port_enable_unlocked(encoder, pipe_config); intel_dp_enable_port(intel_dp, pipe_config); @@ -709,8 +705,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, if (IS_CHERRYVIEW(dev_priv)) lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); - vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), - lane_mask); + vlv_wait_port_ready(display, dp_to_dig_port(intel_dp), lane_mask); } intel_dp_set_power(intel_dp, DP_SET_POWER_D0); @@ -1172,12 +1167,8 @@ intel_dp_hotplug(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum intel_hotplug_state state; - if (intel_dp->compliance.test_active && - intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { - intel_dp_phy_test(encoder); - /* just do the PHY test and nothing else */ + if (intel_dp_test_phy(intel_dp)) return INTEL_HOTPLUG_UNCHANGED; - } state = intel_encoder_hotplug(encoder, connector); @@ -1249,20 +1240,6 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(enc_to_dig_port(to_intel_encoder(encoder))); } -enum pipe vlv_active_pipe(struct intel_dp *intel_dp) -{ - struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - enum pipe pipe; - - if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, - encoder->port, &pipe)) - return pipe; - - return INVALID_PIPE; -} - static void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct intel_display *display = to_intel_display(encoder->dev); @@ -1272,13 +1249,10 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder) intel_dp->DP = intel_de_read(display, intel_dp->output_reg); intel_dp->reset_link_params = true; + intel_dp_invalidate_source_oui(intel_dp); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_wakeref_t wakeref; - - with_intel_pps_lock(intel_dp, wakeref) - intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); - } + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_pps_pipe_reset(intel_dp); intel_pps_encoder_reset(intel_dp); } diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h index a10638ab749c..c75e64ae79b7 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.h +++ b/drivers/gpu/drm/i915/display/g4x_dp.h @@ -19,7 +19,6 @@ struct intel_encoder; #ifdef I915 const struct dpll *vlv_get_dpll(struct drm_i915_private *i915); -enum pipe vlv_active_pipe(struct intel_dp *intel_dp); void g4x_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv, @@ -32,10 +31,6 @@ static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915) { return NULL; } -static inline int vlv_active_pipe(struct intel_dp *intel_dp) -{ - return 0; -} static inline void g4x_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 46f23bdb4c17..d1a7d0d57c6b 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -480,8 +480,8 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); vlv_phy_pre_encoder_enable(encoder, pipe_config); @@ -496,7 +496,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, g4x_hdmi_enable_port(encoder, pipe_config); - vlv_wait_port_ready(dev_priv, dig_port, 0x0); + vlv_wait_port_ready(display, dig_port, 0x0); } static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state, @@ -557,9 +557,8 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); chv_phy_pre_encoder_enable(encoder, pipe_config); @@ -573,7 +572,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, g4x_hdmi_enable_port(encoder, pipe_config); - vlv_wait_port_ready(dev_priv, dig_port, 0x0); + vlv_wait_port_ready(display, dig_port, 0x0); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 611a7d6ef80c..34c5d28fc866 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -3,6 +3,8 @@ * Copyright © 2022 Intel Corporation */ +#include <linux/debugfs.h> + #include "hsw_ips.h" #include "i915_drv.h" #include "i915_reg.h" @@ -13,6 +15,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 val; @@ -25,16 +28,16 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * This function is called from post_plane_update, which is run after * a vblank wait. */ - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); val = IPS_ENABLE; - if (i915->display.ips.false_color) + if (display->ips.false_color) val |= IPS_FALSE_COLOR; if (IS_BROADWELL(i915)) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, val | IPS_PCODE_CONTROL)); /* @@ -44,7 +47,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * so we need to just enable it and continue on. */ } else { - intel_de_write(i915, IPS_CTL, val); + intel_de_write(display, IPS_CTL, val); /* * The bit only becomes 1 in the next vblank, so this wait here * is essentially intel_wait_for_vblank. If we don't have this @@ -52,14 +55,15 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50)) - drm_err(&i915->drm, + if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50)) + drm_err(display->drm, "Timed out waiting for IPS enable\n"); } } bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); bool need_vblank_wait = false; @@ -68,19 +72,19 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) return need_vblank_wait; if (IS_BROADWELL(i915)) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ - if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100)) - drm_err(&i915->drm, + if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100)) + drm_err(display->drm, "Timed out waiting for IPS disable\n"); } else { - intel_de_write(i915, IPS_CTL, 0); - intel_de_posting_read(i915, IPS_CTL); + intel_de_write(display, IPS_CTL, 0); + intel_de_posting_read(display, IPS_CTL); } /* We need to wait for a vblank before we can disable the plane. */ @@ -186,6 +190,7 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc) bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -193,7 +198,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) if (!hsw_crtc_supports_ips(crtc)) return false; - if (!i915->display.params.enable_ips) + if (!display->params.enable_ips) return false; if (crtc_state->pipe_bpp > 24) @@ -207,7 +212,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) * Should measure whether using a lower cdclk w/o IPS */ if (IS_BROADWELL(i915) && - crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100) + crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100) return false; return true; @@ -257,6 +262,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state, void hsw_ips_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -264,7 +270,7 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state) return; if (IS_HASWELL(i915)) { - crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE; + crtc_state->ips_enabled = intel_de_read(display, IPS_CTL) & IPS_ENABLE; } else { /* * We cannot readout IPS state on broadwell, set to @@ -278,9 +284,9 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state) static int hsw_ips_debugfs_false_color_get(void *data, u64 *val) { struct intel_crtc *crtc = data; - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - *val = i915->display.ips.false_color; + *val = display->ips.false_color; return 0; } @@ -288,7 +294,7 @@ static int hsw_ips_debugfs_false_color_get(void *data, u64 *val) static int hsw_ips_debugfs_false_color_set(void *data, u64 val) { struct intel_crtc *crtc = data; - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc_state *crtc_state; int ret; @@ -296,7 +302,7 @@ static int hsw_ips_debugfs_false_color_set(void *data, u64 val) if (ret) return ret; - i915->display.ips.false_color = val; + display->ips.false_color = val; crtc_state = to_intel_crtc_state(crtc->base.state); @@ -323,18 +329,19 @@ DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops, static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); intel_wakeref_t wakeref; wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", - str_yes_no(i915->display.params.enable_ips)); + str_yes_no(display->params.enable_ips)); - if (DISPLAY_VER(i915) >= 8) { + if (DISPLAY_VER(display) >= 8) { seq_puts(m, "Currently: unknown\n"); } else { - if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE) + if (intel_de_read(display, IPS_CTL) & IPS_ENABLE) seq_puts(m, "Currently: enabled\n"); else seq_puts(m, "Currently: disabled\n"); diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 9447f7229b60..17a1e3801a85 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -416,7 +416,8 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } -static void i9xx_plane_update_noarm(struct intel_plane *plane, +static void i9xx_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -444,7 +445,8 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane, } } -static void i9xx_plane_update_arm(struct intel_plane *plane, +static void i9xx_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -507,7 +509,8 @@ static void i9xx_plane_update_arm(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + dspaddr_offset); } -static void i830_plane_update_arm(struct intel_plane *plane, +static void i830_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -517,11 +520,12 @@ static void i830_plane_update_arm(struct intel_plane *plane, * Additional breakage on i830 causes register reads to return * the last latched value instead of the last written value [ALM026]. */ - i9xx_plane_update_noarm(plane, crtc_state, plane_state); - i9xx_plane_update_arm(plane, crtc_state, plane_state); + i9xx_plane_update_noarm(dsb, plane, crtc_state, plane_state); + i9xx_plane_update_arm(dsb, plane, crtc_state, plane_state); } -static void i9xx_plane_disable_arm(struct intel_plane *plane, +static void i9xx_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -549,7 +553,8 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane, } static void -g4x_primary_async_flip(struct intel_plane *plane, +g4x_primary_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) @@ -569,7 +574,8 @@ g4x_primary_async_flip(struct intel_plane *plane, } static void -vlv_primary_async_flip(struct intel_plane *plane, +vlv_primary_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index 15cda57fbc91..e3b13886177a 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -7,13 +7,23 @@ #include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" +#include "intel_bo.h" #include "intel_display.h" #include "intel_display_trace.h" +#include "intel_fb.h" #include "intel_mchbar_regs.h" #include "intel_wm.h" #include "skl_watermark.h" #include "vlv_sideband.h" +struct intel_watermark_params { + u16 fifo_size; + u16 max_wm; + u8 default_wm; + u8 guard_size; + u8 cacheline_size; +}; + /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; @@ -136,6 +146,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { + struct intel_display *display = &dev_priv->display; bool was_enabled; u32 val; @@ -177,7 +188,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl return false; } - trace_intel_memory_cxsr(dev_priv, was_enabled, enable); + trace_intel_memory_cxsr(display, was_enabled, enable); drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", str_enabled_disabled(enable), @@ -695,6 +706,76 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) } } +static bool i9xx_wm_need_update(const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) +{ + /* Update watermarks on tiling or size changes. */ + if (old_plane_state->uapi.visible != new_plane_state->uapi.visible) + return true; + + if (!old_plane_state->hw.fb || !new_plane_state->hw.fb) + return false; + + if (old_plane_state->hw.fb->modifier != new_plane_state->hw.fb->modifier || + old_plane_state->hw.rotation != new_plane_state->hw.rotation || + drm_rect_width(&old_plane_state->uapi.src) != drm_rect_width(&new_plane_state->uapi.src) || + drm_rect_height(&old_plane_state->uapi.src) != drm_rect_height(&new_plane_state->uapi.src) || + drm_rect_width(&old_plane_state->uapi.dst) != drm_rect_width(&new_plane_state->uapi.dst) || + drm_rect_height(&old_plane_state->uapi.dst) != drm_rect_height(&new_plane_state->uapi.dst)) + return true; + + return false; +} + +static void i9xx_wm_compute(struct intel_crtc_state *new_crtc_state, + const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) +{ + bool turn_off, turn_on, visible, was_visible, mode_changed; + + mode_changed = intel_crtc_needs_modeset(new_crtc_state); + was_visible = old_plane_state->uapi.visible; + visible = new_plane_state->uapi.visible; + + if (!was_visible && !visible) + return; + + turn_off = was_visible && (!visible || mode_changed); + turn_on = visible && (!was_visible || mode_changed); + + /* FIXME nuke when all wm code is atomic */ + if (turn_on) { + new_crtc_state->update_wm_pre = true; + } else if (turn_off) { + new_crtc_state->update_wm_post = true; + } else if (i9xx_wm_need_update(old_plane_state, new_plane_state)) { + /* FIXME bollocks */ + new_crtc_state->update_wm_pre = true; + new_crtc_state->update_wm_post = true; + } +} + +static int i9xx_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + int i; + + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + + i9xx_wm_compute(new_crtc_state, old_plane_state, new_plane_state); + } + + return 0; +} + /* * Documentation says: * "If the line size is small, the TLB fetches can get in the way of the @@ -715,10 +796,11 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) static void g4x_write_wm_values(struct drm_i915_private *dev_priv, const struct g4x_wm_values *wm) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm); intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), FW_WM(wm->sr.plane, SR) | @@ -747,10 +829,11 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv, static void vlv_write_wm_values(struct drm_i915_private *dev_priv, const struct vlv_wm_values *wm) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm); intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | @@ -1276,6 +1359,22 @@ out: return 0; } +static int g4x_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + int ret; + + ret = g4x_compute_pipe_wm(state, crtc); + if (ret) + return ret; + + ret = g4x_compute_intermediate_wm(state, crtc); + if (ret) + return ret; + + return 0; +} + static void g4x_merge_wm(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { @@ -1902,6 +2001,22 @@ out: return 0; } +static int vlv_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + int ret; + + ret = vlv_compute_pipe_wm(state, crtc); + if (ret) + return ret; + + ret = vlv_compute_intermediate_wm(state, crtc); + if (ret) + return ret; + + return 0; +} + static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { @@ -2088,12 +2203,13 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, enum i9xx_plane_id i9xx_plane) { + struct intel_display *display = &i915->display; struct intel_plane *plane; for_each_intel_plane(&i915->drm, plane) { if (plane->id == PLANE_PRIMARY && plane->i9xx_plane == i9xx_plane) - return intel_crtc_for_pipe(i915, plane->pipe); + return intel_crtc_for_pipe(display, plane->pipe); } return NULL; @@ -2172,12 +2288,12 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) crtc = single_enabled_crtc(dev_priv); if (IS_I915GM(dev_priv) && crtc) { - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; - obj = intel_fb_obj(crtc->base.primary->state->fb); + obj = intel_fb_bo(crtc->base.primary->state->fb); /* self-refresh seems busted with untiled */ - if (!i915_gem_object_is_tiled(obj)) + if (!intel_bo_is_tiled(obj)) crtc = NULL; } @@ -2878,8 +2994,9 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; - const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; + struct intel_pipe_wm *intermediate = &new_crtc_state->wm.ilk.intermediate; + const struct intel_pipe_wm *optimal = &new_crtc_state->wm.ilk.optimal; + const struct intel_pipe_wm *active = &old_crtc_state->wm.ilk.optimal; int level; /* @@ -2887,25 +3004,29 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, * currently active watermarks to get values that are safe both before * and after the vblank. */ - *a = new_crtc_state->wm.ilk.optimal; + *intermediate = *optimal; if (!new_crtc_state->hw.active || intel_crtc_needs_modeset(new_crtc_state) || state->skip_intermediate_wm) return 0; - a->pipe_enabled |= b->pipe_enabled; - a->sprites_enabled |= b->sprites_enabled; - a->sprites_scaled |= b->sprites_scaled; + intermediate->pipe_enabled |= active->pipe_enabled; + intermediate->sprites_enabled |= active->sprites_enabled; + intermediate->sprites_scaled |= active->sprites_scaled; for (level = 0; level < dev_priv->display.wm.num_levels; level++) { - struct intel_wm_level *a_wm = &a->wm[level]; - const struct intel_wm_level *b_wm = &b->wm[level]; - - a_wm->enable &= b_wm->enable; - a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); - a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); - a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); - a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); + struct intel_wm_level *intermediate_wm = &intermediate->wm[level]; + const struct intel_wm_level *active_wm = &active->wm[level]; + + intermediate_wm->enable &= active_wm->enable; + intermediate_wm->pri_val = max(intermediate_wm->pri_val, + active_wm->pri_val); + intermediate_wm->spr_val = max(intermediate_wm->spr_val, + active_wm->spr_val); + intermediate_wm->cur_val = max(intermediate_wm->cur_val, + active_wm->cur_val); + intermediate_wm->fbc_val = max(intermediate_wm->fbc_val, + active_wm->fbc_val); } /* @@ -2914,19 +3035,35 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, * there's no safe way to transition from the old state to * the new state, so we need to fail the atomic transaction. */ - if (!ilk_validate_pipe_wm(dev_priv, a)) + if (!ilk_validate_pipe_wm(dev_priv, intermediate)) return -EINVAL; /* * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ - if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) new_crtc_state->wm.need_postvbl_update = true; return 0; } +static int ilk_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + int ret; + + ret = ilk_compute_pipe_wm(state, crtc); + if (ret) + return ret; + + ret = ilk_compute_intermediate_wm(state, crtc); + if (ret) + return ret; + + return 0; +} + /* * Merge the watermarks from all active pipes for a specific level. */ @@ -3265,7 +3402,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, dev_priv->display.wm.hw = *results; } -bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) +bool ilk_disable_cxsr(struct drm_i915_private *dev_priv) { return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } @@ -3716,6 +3853,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; struct intel_crtc *crtc; @@ -3723,7 +3861,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(display, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -3871,6 +4009,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; struct intel_crtc *crtc; @@ -3878,7 +4017,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(display, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -3971,16 +4110,14 @@ static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) } static const struct intel_wm_funcs ilk_wm_funcs = { - .compute_pipe_wm = ilk_compute_pipe_wm, - .compute_intermediate_wm = ilk_compute_intermediate_wm, + .compute_watermarks = ilk_compute_watermarks, .initial_watermarks = ilk_initial_watermarks, .optimize_watermarks = ilk_optimize_watermarks, .get_hw_state = ilk_wm_get_hw_state, }; static const struct intel_wm_funcs vlv_wm_funcs = { - .compute_pipe_wm = vlv_compute_pipe_wm, - .compute_intermediate_wm = vlv_compute_intermediate_wm, + .compute_watermarks = vlv_compute_watermarks, .initial_watermarks = vlv_initial_watermarks, .optimize_watermarks = vlv_optimize_watermarks, .atomic_update_watermarks = vlv_atomic_update_fifo, @@ -3988,26 +4125,29 @@ static const struct intel_wm_funcs vlv_wm_funcs = { }; static const struct intel_wm_funcs g4x_wm_funcs = { - .compute_pipe_wm = g4x_compute_pipe_wm, - .compute_intermediate_wm = g4x_compute_intermediate_wm, + .compute_watermarks = g4x_compute_watermarks, .initial_watermarks = g4x_initial_watermarks, .optimize_watermarks = g4x_optimize_watermarks, .get_hw_state = g4x_wm_get_hw_state_and_sanitize, }; static const struct intel_wm_funcs pnv_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = pnv_update_wm, }; static const struct intel_wm_funcs i965_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = i965_update_wm, }; static const struct intel_wm_funcs i9xx_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = i9xx_update_wm, }; static const struct intel_wm_funcs i845_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = i845_update_wm, }; diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h index de0920730ab2..06ac37c6c94b 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.h +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -13,12 +13,12 @@ struct intel_crtc_state; struct intel_plane_state; #ifdef I915 -bool ilk_disable_lp_wm(struct drm_i915_private *i915); +bool ilk_disable_cxsr(struct drm_i915_private *i915); void ilk_wm_sanitize(struct drm_i915_private *i915); bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); void i9xx_wm_init(struct drm_i915_private *i915); #else -static inline bool ilk_disable_lp_wm(struct drm_i915_private *i915) +static inline bool ilk_disable_cxsr(struct drm_i915_private *i915) { return false; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 293efc1f841d..8a49f499e3fb 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -29,6 +29,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fixed.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_probe_helper.h> #include "i915_reg.h" #include "icl_dsi.h" @@ -45,43 +46,44 @@ #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" -static int header_credits_available(struct drm_i915_private *dev_priv, +static int header_credits_available(struct intel_display *display, enum transcoder dsi_trans) { - return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) + return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) >> FREE_HEADER_CREDIT_SHIFT; } -static int payload_credits_available(struct drm_i915_private *dev_priv, +static int payload_credits_available(struct intel_display *display, enum transcoder dsi_trans) { - return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) + return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) >> FREE_PLOAD_CREDIT_SHIFT; } -static bool wait_for_header_credits(struct drm_i915_private *dev_priv, +static bool wait_for_header_credits(struct intel_display *display, enum transcoder dsi_trans, int hdr_credit) { - if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= + if (wait_for_us(header_credits_available(display, dsi_trans) >= hdr_credit, 100)) { - drm_err(&dev_priv->drm, "DSI header credits not released\n"); + drm_err(display->drm, "DSI header credits not released\n"); return false; } return true; } -static bool wait_for_payload_credits(struct drm_i915_private *dev_priv, +static bool wait_for_payload_credits(struct intel_display *display, enum transcoder dsi_trans, int payld_credit) { - if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= + if (wait_for_us(payload_credits_available(display, dsi_trans) >= payld_credit, 100)) { - drm_err(&dev_priv->drm, "DSI payload credits not released\n"); + drm_err(display->drm, "DSI payload credits not released\n"); return false; } @@ -98,7 +100,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port) static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; @@ -108,8 +110,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for header/payload credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); - wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT); + wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT); + wait_for_payload_credits(display, dsi_trans, MAX_PLOAD_CREDIT); } /* send nop DCS command */ @@ -119,22 +121,22 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) dsi->channel = 0; ret = mipi_dsi_dcs_nop(dsi); if (ret < 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "error sending DCS NOP command\n"); } /* wait for header credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); + wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT); } /* wait for LP TX in progress bit to be cleared */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) & LPTX_IN_PROGRESS), 20)) - drm_err(&dev_priv->drm, "LPTX bit not cleared\n"); + drm_err(display->drm, "LPTX bit not cleared\n"); } } @@ -142,7 +144,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, const struct mipi_dsi_packet *packet) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); const u8 *data = packet->payload; u32 len = packet->payload_length; @@ -150,20 +152,20 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, /* payload queue can accept *256 bytes*, check limit */ if (len > MAX_PLOAD_CREDIT * 4) { - drm_err(&i915->drm, "payload size exceeds max queue limit\n"); + drm_err(display->drm, "payload size exceeds max queue limit\n"); return -EINVAL; } for (i = 0; i < len; i += 4) { u32 tmp = 0; - if (!wait_for_payload_credits(i915, dsi_trans, 1)) + if (!wait_for_payload_credits(display, dsi_trans, 1)) return -EBUSY; for (j = 0; j < min_t(u32, len - i, 4); j++) tmp |= *data++ << 8 * j; - intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp); + intel_de_write(display, DSI_CMD_TXPYLD(dsi_trans), tmp); } return 0; @@ -174,14 +176,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, bool enable_lpdt) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); u32 tmp; - if (!wait_for_header_credits(dev_priv, dsi_trans, 1)) + if (!wait_for_header_credits(display, dsi_trans, 1)) return -EBUSY; - tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)); + tmp = intel_de_read(display, DSI_CMD_TXHDR(dsi_trans)); if (packet->payload) tmp |= PAYLOAD_PRESENT; @@ -200,15 +202,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT); tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT); tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT); - intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp); + intel_de_write(display, DSI_CMD_TXHDR(dsi_trans), tmp); return 0; } void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); u32 mode_flags; enum port port; @@ -226,12 +227,13 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) else return; - intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); + intel_de_rmw(display, DSI_CMD_FRMCTL(port), 0, + DSI_FRAME_UPDATE_REQUEST); } static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp, mask, val; @@ -245,31 +247,31 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | RTERM_SELECT(0x6); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~mask; tmp |= val; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), mask, val); mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK; val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | RCOMP_SCALAR(0x98); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~mask; tmp |= val; - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); + intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), mask, val); mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK; val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | CURSOR_COEFF(0x3f); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); + intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), mask, val); /* Bspec: must not use GRP register for write */ for (lane = 0; lane <= 3; lane++) - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy), mask, val); } } @@ -277,13 +279,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) static void configure_dual_link_mode(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1; /* FIXME: Move all DSS handling to intel_vdsc.c */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); @@ -293,7 +295,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dss_ctl2_reg = DSS_CTL2; } - dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); + dss_ctl1 = intel_de_read(display, dss_ctl1_reg); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); @@ -308,19 +310,19 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DL buffer depth exceed max value\n"); dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, + intel_de_rmw(display, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } - intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); + intel_de_write(display, dss_ctl1_reg, dss_ctl1); } /* aka DSI 8X clock */ @@ -341,6 +343,7 @@ static int afe_clk(struct intel_encoder *encoder, static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -360,33 +363,34 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port), + intel_de_write(display, ICL_DSI_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port)); + intel_de_posting_read(display, ICL_DSI_ESC_CLK_DIV(port)); } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port), + intel_de_write(display, ICL_DPHY_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)); + intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port)); } if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8), + intel_de_write(display, ADL_MIPIO_DW(port, 8), esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY); - intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8)); + intel_de_posting_read(display, ADL_MIPIO_DW(port, 8)); } } } -static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, - struct intel_dsi *intel_dsi) +static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi) { + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]); + drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]); intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? @@ -397,15 +401,15 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + intel_de_rmw(display, ICL_DSI_IO_MODECTL(port), 0, COMBO_PHY_MODE_DSI); - get_dsi_io_power_domains(dev_priv, intel_dsi); + get_dsi_io_power_domains(intel_dsi); } static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) @@ -421,6 +425,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; @@ -429,32 +434,33 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); + intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), + LOADGEN_SELECT, 0); for (lane = 0; lane <= 3; lane++) - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy), LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), + intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) || - (DISPLAY_VER(dev_priv) >= 12)) { - intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), + (DISPLAY_VER(display) >= 12)) { + intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); - tmp = intel_de_read(dev_priv, + tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp); } } @@ -463,17 +469,17 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; /* clear common keeper enable bit */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); } /* @@ -482,14 +488,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * as part of lane phy sequence configuration */ for_each_dsi_phy(phy, intel_dsi->phys) - intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); + intel_de_rmw(display, ICL_PORT_CL_DW5(phy), 0, + SUS_CLOCK_CONFIG); /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); } /* Program swing and de-emphasis */ @@ -497,26 +504,26 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* Set training enable to trigger update */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); } } static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); + intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); - if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 500)) - drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n", + drm_err(display->drm, "DDI port:%c buffer idle\n", port_name(port)); } } @@ -525,6 +532,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -532,12 +540,12 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port), + intel_de_write(display, DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* Program DPHY data lanes timings */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port), + intel_de_write(display, DPHY_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* @@ -546,10 +554,10 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, * a value '0' inside TA_PARAM_REGISTERS otherwise * leave all fields at HW default values. */ - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), + intel_de_rmw(display, DPHY_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } @@ -557,7 +565,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { for_each_dsi_phy(phy, intel_dsi->phys) - intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), + intel_de_rmw(display, ICL_DPHY_CHKN(phy), 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); } } @@ -566,30 +574,30 @@ static void gen11_dsi_setup_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), + intel_de_rmw(display, ICL_DSI_T_INIT_MASTER(port), DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), + intel_de_write(display, DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), + intel_de_write(display, DSI_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* shadow register inside display core */ - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), + intel_de_rmw(display, DSI_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } @@ -599,45 +607,45 @@ gen11_dsi_setup_timings(struct intel_encoder *encoder, static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; - mutex_lock(&dev_priv->display.dpll.lock); - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + mutex_lock(&display->dpll.lock); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->display.dpll.lock); + intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&display->dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; - mutex_lock(&dev_priv->display.dpll.lock); - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + mutex_lock(&display->dpll.lock); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->display.dpll.lock); + intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&display->dpll.lock); } static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); bool clock_enabled = false; enum phy phy; u32 tmp; - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy))) @@ -650,36 +658,36 @@ static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy; u32 val; - mutex_lock(&dev_priv->display.dpll.lock); + mutex_lock(&display->dpll.lock); - val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + val = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_write(display, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_write(display, ICL_DPCLKA_CFGCR0, val); - intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); + intel_de_posting_read(display, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } static void gen11_dsi_configure_transcoder(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = crtc->pipe; @@ -689,7 +697,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + tmp = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); if (intel_dsi->eotp_pkt) tmp &= ~EOTP_DISABLED; @@ -745,7 +753,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, } } - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (is_vid_mode(intel_dsi)) tmp |= BLANKING_PACKET_ENABLE; } @@ -778,15 +786,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, tmp |= TE_SOURCE_GPIO; } - intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); + intel_de_write(display, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); } /* enable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL2(display, dsi_trans), 0, PORT_SYNC_MODE_ENABLE); } @@ -798,8 +806,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* select data lane width */ - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans)); tmp &= ~DDI_PORT_WIDTH_MASK; tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); @@ -825,16 +833,16 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* enable DDI buffer */ tmp |= TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), tmp); + intel_de_write(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans), tmp); } /* wait for link ready */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) & + if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) & LINK_READY), 2500)) - drm_err(&dev_priv->drm, "DSI link not ready\n"); + drm_err(display->drm, "DSI link not ready\n"); } } @@ -842,7 +850,7 @@ static void gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -909,17 +917,17 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* minimum hactive as per bspec: 256 pixels */ if (adjusted_mode->crtc_hdisplay < 256) - drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n"); + drm_err(display->drm, "hactive is less then 256 pixels\n"); /* if RGB666 format, then hactive must be multiple of 4 pixels */ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "hactive pixels are not multiple of 4\n"); /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, dsi_trans), + intel_de_write(display, TRANS_HTOTAL(display, dsi_trans), HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); } @@ -928,12 +936,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ if (hsync_size < 16) - drm_err(&dev_priv->drm, + drm_err(display->drm, "hsync size < 16 pixels\n"); } if (hback_porch < 16) - drm_err(&dev_priv->drm, "hback porch < 16 pixels\n"); + drm_err(display->drm, "hback porch < 16 pixels\n"); if (intel_dsi->dual_link) { hsync_start /= 2; @@ -942,8 +950,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_HSYNC(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_HSYNC(display, dsi_trans), HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); } } @@ -957,22 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ - intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, dsi_trans), + intel_de_write(display, TRANS_VTOTAL(display, dsi_trans), VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); } if (vsync_end < vsync_start || vsync_end > vtotal) - drm_err(&dev_priv->drm, "Invalid vsync_end value\n"); + drm_err(display->drm, "Invalid vsync_end value\n"); if (vsync_start < vactive) - drm_err(&dev_priv->drm, "vsync_start less than vactive\n"); + drm_err(display->drm, "vsync_start less than vactive\n"); /* program TRANS_VSYNC register for video mode only */ if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VSYNC(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VSYNC(display, dsi_trans), VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); } } @@ -986,8 +994,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VSYNCSHIFT(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VSYNCSHIFT(display, dsi_trans), vsync_shift); } } @@ -998,11 +1006,11 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * FIXME get rid of these local hacks and do it right, * this will not handle eg. delayed vblank correctly. */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VBLANK(display, dsi_trans), VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); } } @@ -1010,20 +1018,20 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), 0, + intel_de_rmw(display, TRANSCONF(display, dsi_trans), 0, TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ - if (intel_de_wait_for_set(dev_priv, TRANSCONF(dev_priv, dsi_trans), + if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans), TRANSCONF_STATE_ENABLE, 10)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DSI transcoder not enabled\n"); } } @@ -1031,7 +1039,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1055,21 +1063,21 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ - intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), + intel_de_rmw(display, DSI_HSTX_TO(dsi_trans), HSTX_TIMEOUT_VALUE_MASK, HSTX_TIMEOUT_VALUE(hs_tx_timeout)); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ - intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), + intel_de_rmw(display, DSI_LPRX_HOST_TO(dsi_trans), LPRX_TIMEOUT_VALUE_MASK, LPRX_TIMEOUT_VALUE(lp_rx_timeout)); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ - intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), + intel_de_rmw(display, DSI_TA_TO(dsi_trans), TA_TIMEOUT_VALUE_MASK, TA_TIMEOUT_VALUE(ta_timeout)); } @@ -1078,7 +1086,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, bool enable) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; @@ -1090,7 +1098,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B))) return; - tmp = intel_de_read(dev_priv, UTIL_PIN_CTL); + tmp = intel_de_read(display, UTIL_PIN_CTL); if (enable) { tmp |= UTIL_PIN_DIRECTION_INPUT; @@ -1098,7 +1106,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, } else { tmp &= ~UTIL_PIN_ENABLE; } - intel_de_write(dev_priv, UTIL_PIN_CTL, tmp); + intel_de_write(display, UTIL_PIN_CTL, tmp); } static void @@ -1136,7 +1144,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; @@ -1152,14 +1160,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) * FIXME: This uses the number of DW's currently in the payload * receive queue. This is probably not what we want here. */ - tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans)); + tmp = intel_de_read(display, DSI_CMD_RXCTL(dsi_trans)); tmp &= NUMBER_RX_PLOAD_DW_MASK; /* multiply "Number Rx Payload DW" by 4 to get max value */ tmp = tmp * 4; dsi = intel_dsi->dsi_hosts[port]->device; ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); if (ret < 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "error setting max return pkt size%d\n", tmp); } @@ -1219,10 +1227,10 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + if (DISPLAY_VER(display) == 11 && pipe == PIPE_B) + intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_KVMR_PIPE_A, enable ? IGNORE_KVMR_PIPE_A : 0); } @@ -1235,13 +1243,13 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, */ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - if (DISPLAY_VER(i915) == 13) { + if (DISPLAY_VER(display) == 13) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), + intel_de_rmw(display, TGL_DSI_CHKN_REG(port), TGL_DSI_CHKN_LSHS_GB_MASK, TGL_DSI_CHKN_LSHS_GB(4)); } @@ -1275,7 +1283,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1284,13 +1292,13 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ - intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), + intel_de_rmw(display, TRANSCONF(display, dsi_trans), TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ - if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, dsi_trans), + if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans), TRANSCONF_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DSI trancoder not disabled\n"); } } @@ -1307,7 +1315,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1316,29 +1324,29 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), + intel_de_rmw(display, DSI_CMD_FRMCTL(port), DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); } /* put dsi link in ULPS */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)); + tmp = intel_de_read(display, DSI_LP_MSG(dsi_trans)); tmp |= LINK_ENTER_ULPS; tmp &= ~LINK_ULPS_TYPE_LP11; - intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp); + intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp); - if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) & LINK_IN_ULPS), 10)) - drm_err(&dev_priv->drm, "DSI link not in ULPS\n"); + drm_err(display->drm, "DSI link not in ULPS\n"); } /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans), TRANS_DDI_FUNC_ENABLE, 0); } @@ -1346,8 +1354,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL2(display, dsi_trans), PORT_SYNC_MODE_ENABLE, 0); } } @@ -1355,18 +1363,18 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) static void gen11_dsi_disable_port(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); + intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); - if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 8)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DDI port:%c buffer not idle\n", port_name(port)); } @@ -1375,6 +1383,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -1392,7 +1401,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + intel_de_rmw(display, ICL_DSI_IO_MODECTL(port), COMBO_PHY_MODE_DSI, 0); } @@ -1504,8 +1513,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans; u32 val; @@ -1514,7 +1522,7 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) else dsi_trans = TRANSCODER_DSI_0; - val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE); } @@ -1557,7 +1565,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, static void gen11_dsi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *intel_crtc; enum pipe pipe; @@ -1568,9 +1576,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, pipe = intel_crtc->pipe; /* wa verify 1409054076:icl,jsl,ehl */ - if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B && - !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) - drm_dbg_kms(&dev_priv->drm, + if (DISPLAY_VER(display) == 11 && pipe == PIPE_B && + !(intel_de_read(display, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n", encoder->base.base.id, encoder->base.name); @@ -1579,9 +1587,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10; + int dsc_max_bpc = DISPLAY_VER(display) >= 12 ? 12 : 10; bool use_dsc; int ret; @@ -1606,12 +1614,12 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, return ret; /* DSI specific sanity checks on the common code */ - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable); - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, vdsc_cfg->vbr_enable); + drm_WARN_ON(display->drm, vdsc_cfg->simple_422); + drm_WARN_ON(display->drm, vdsc_cfg->pic_width % vdsc_cfg->slice_width); - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, vdsc_cfg->slice_height < 8); + drm_WARN_ON(display->drm, vdsc_cfg->pic_height % vdsc_cfg->slice_height); ret = drm_dsc_compute_rc_parameters(vdsc_cfg); @@ -1627,7 +1635,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = @@ -1661,7 +1669,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->clock_set = true; if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) - drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n"); + drm_dbg_kms(display->drm, "Attempting to use DSC failed\n"); pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; @@ -1679,15 +1687,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - - get_dsi_io_power_domains(i915, - enc_to_intel_dsi(encoder)); + get_dsi_io_power_domains(enc_to_intel_dsi(encoder)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum transcoder dsi_trans; @@ -1703,8 +1709,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: *pipe = PIPE_A; @@ -1719,11 +1725,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, *pipe = PIPE_D; break; default: - drm_err(&dev_priv->drm, "Invalid PIPE input\n"); + drm_err(display->drm, "Invalid PIPE input\n"); goto out; } - tmp = intel_de_read(dev_priv, TRANSCONF(dev_priv, dsi_trans)); + tmp = intel_de_read(display, TRANSCONF(display, dsi_trans)); ret = tmp & TRANSCONF_ENABLE; } out: @@ -1833,8 +1839,7 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { static void icl_dphy_param_init(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns; @@ -1858,7 +1863,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) */ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); if (prepare_cnt > ICL_PREPARE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n", prepare_cnt); prepare_cnt = ICL_PREPARE_CNT_MAX; } @@ -1867,7 +1872,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - ths_prepare_ns, tlpx_ns); if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "clk_zero_cnt out of range (%d)\n", clk_zero_cnt); clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; } @@ -1875,7 +1880,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* trail cnt in escape clocks*/ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); if (trail_cnt > ICL_TRAIL_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n", trail_cnt); trail_cnt = ICL_TRAIL_CNT_MAX; } @@ -1883,7 +1888,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* tclk pre count in escape clocks */ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; } @@ -1892,7 +1897,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - ths_prepare_ns, tlpx_ns); if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n", hs_zero_cnt); hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; } @@ -1900,7 +1905,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* hs exit zero cnt in escape clocks */ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "exit_zero_cnt out of range (%d)\n", exit_zero_cnt); exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; @@ -1942,10 +1947,9 @@ static void icl_dsi_add_properties(struct intel_connector *connector) fixed_mode->vdisplay); } -void icl_dsi_init(struct drm_i915_private *dev_priv, +void icl_dsi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata) { - struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *intel_connector; @@ -1973,7 +1977,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, encoder->devdata = devdata; /* register DSI encoder with DRM subsystem */ - drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs, + drm_encoder_init(display->drm, &encoder->base, + &gen11_dsi_encoder_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; @@ -1998,7 +2003,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, encoder->shutdown = intel_dsi_shutdown; /* register DSI connector with DRM subsystem */ - drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs, + drm_connector_init(display->drm, connector, + &gen11_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; @@ -2011,12 +2017,12 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL); - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!intel_panel_preferred_fixed_mode(intel_connector)) { - drm_err(&dev_priv->drm, "DSI fixed mode info missing\n"); + drm_err(display->drm, "DSI fixed mode info missing\n"); goto err; } @@ -2029,10 +2035,10 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, else intel_dsi->ports = BIT(port); - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; for_each_dsi_port(port, intel_dsi->ports) { @@ -2046,7 +2052,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - drm_dbg_kms(&dev_priv->drm, "no device found\n"); + drm_dbg_kms(display->drm, "no device found\n"); goto err; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h index 43fa7d72eeb1..099fc50e35b4 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.h +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -6,11 +6,11 @@ #ifndef __ICL_DSI_H__ #define __ICL_DSI_H__ -struct drm_i915_private; struct intel_bios_encoder_data; struct intel_crtc_state; +struct intel_display; -void icl_dsi_init(struct drm_i915_private *dev_priv, +void icl_dsi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata); void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c index 186cf4833f71..55f3ae1e68c9 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.c +++ b/drivers/gpu/drm/i915/display/intel_alpm.c @@ -3,6 +3,8 @@ * Copyright 2024, Intel Corporation. */ +#include <linux/debugfs.h> + #include "intel_alpm.h" #include "intel_crtc.h" #include "intel_de.h" @@ -330,7 +332,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines); intel_de_write(display, - PORT_ALPM_CTL(display, port), + PORT_ALPM_CTL(port), PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE | PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) | PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) | @@ -338,7 +340,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, intel_dp->alpm_parameters.silence_period_sym_clocks)); intel_de_write(display, - PORT_ALPM_LFPS_CTL(display, port), + PORT_ALPM_LFPS_CTL(port), PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) | PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION( intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) | diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 12d6ed940751..03dc54c802d3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -266,7 +266,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_pipe = false; crtc_state->update_m_n = false; crtc_state->update_lrr = false; - crtc_state->disable_lp_wm = false; crtc_state->disable_cxsr = false; crtc_state->update_wm_pre = false; crtc_state->update_wm_post = false; @@ -277,7 +276,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->fb_bits = 0; crtc_state->update_planes = 0; crtc_state->dsb_color_vblank = NULL; - crtc_state->dsb_color_commit = NULL; + crtc_state->dsb_commit = NULL; + crtc_state->use_dsb = false; return &crtc_state->uapi; } @@ -312,7 +312,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, struct intel_crtc_state *crtc_state = to_intel_crtc_state(state); drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank); - drm_WARN_ON(crtc->dev, crtc_state->dsb_color_commit); + drm_WARN_ON(crtc->dev, crtc_state->dsb_commit); __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index e979786aa5cf..d89630b2d5c1 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -35,9 +35,10 @@ #include <linux/dma-resv.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_atomic_helper.h> #include "i915_config.h" #include "i9xx_plane_regs.h" @@ -391,28 +392,6 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, plane_state->uapi.visible = false; } -/* FIXME nuke when all wm code is atomic */ -static bool intel_wm_need_update(const struct intel_plane_state *cur, - struct intel_plane_state *new) -{ - /* Update watermarks on tiling or size changes. */ - if (new->uapi.visible != cur->uapi.visible) - return true; - - if (!cur->hw.fb || !new->hw.fb) - return false; - - if (cur->hw.fb->modifier != new->hw.fb->modifier || - cur->hw.rotation != new->hw.rotation || - drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || - drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || - drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || - drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) - return true; - - return false; -} - static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state) { int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; @@ -492,6 +471,61 @@ static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state return old_ctl != new_ctl; } +static bool ilk_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state, + const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) +{ + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); + bool old_visible = old_plane_state->uapi.visible; + bool new_visible = new_plane_state->uapi.visible; + bool modeset, turn_on; + + if (plane->id == PLANE_CURSOR) + return false; + + modeset = intel_crtc_needs_modeset(new_crtc_state); + turn_on = new_visible && (!old_visible || modeset); + + /* + * ILK/SNB DVSACNTR/Sprite Enable + * IVB SPR_CTL/Sprite Enable + * "When in Self Refresh Big FIFO mode, a write to enable the + * plane will be internally buffered and delayed while Big FIFO + * mode is exiting." + * + * Which means that enabling the sprite can take an extra frame + * when we start in big FIFO mode (LP1+). Thus we need to drop + * down to LP0 and wait for vblank in order to make sure the + * sprite gets enabled on the next vblank after the register write. + * Doing otherwise would risk enabling the sprite one frame after + * we've already signalled flip completion. We can resume LP1+ + * once the sprite has been enabled. + * + * With experimental results seems this is needed also for primary + * plane, not only sprite plane. + */ + if (turn_on) + return true; + + /* + * WaCxSRDisabledForSpriteScaling:ivb + * IVB SPR_SCALE/Scaling Enable + * "Low Power watermarks must be disabled for at least one + * frame before enabling sprite scaling, and kept disabled + * until sprite scaling is disabled." + * + * ILK/SNB DVSASCALE/Scaling Enable + * "When in Self Refresh Big FIFO mode, scaling enable will be + * masked off while Big FIFO mode is exiting." + * + * Despite the w/a only being listed for IVB we assume that + * the ILK/SNB note has similar ramifications, hence we apply + * the w/a on all three platforms. + */ + return !intel_plane_is_scaled(old_plane_state) && + intel_plane_is_scaled(new_plane_state); +} + static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, @@ -546,20 +580,6 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr was_visible, visible, turn_off, turn_on, mode_changed); - if (turn_on) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - new_crtc_state->update_wm_pre = true; - } else if (turn_off) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - new_crtc_state->update_wm_post = true; - } else if (intel_wm_need_update(old_plane_state, new_plane_state)) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { - /* FIXME bollocks */ - new_crtc_state->update_wm_pre = true; - new_crtc_state->update_wm_post = true; - } - } - if (visible || was_visible) new_crtc_state->fb_bits |= plane->frontbuffer_bit; @@ -567,45 +587,9 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) new_crtc_state->disable_cxsr = true; - /* - * ILK/SNB DVSACNTR/Sprite Enable - * IVB SPR_CTL/Sprite Enable - * "When in Self Refresh Big FIFO mode, a write to enable the - * plane will be internally buffered and delayed while Big FIFO - * mode is exiting." - * - * Which means that enabling the sprite can take an extra frame - * when we start in big FIFO mode (LP1+). Thus we need to drop - * down to LP0 and wait for vblank in order to make sure the - * sprite gets enabled on the next vblank after the register write. - * Doing otherwise would risk enabling the sprite one frame after - * we've already signalled flip completion. We can resume LP1+ - * once the sprite has been enabled. - * - * - * WaCxSRDisabledForSpriteScaling:ivb - * IVB SPR_SCALE/Scaling Enable - * "Low Power watermarks must be disabled for at least one - * frame before enabling sprite scaling, and kept disabled - * until sprite scaling is disabled." - * - * ILK/SNB DVSASCALE/Scaling Enable - * "When in Self Refresh Big FIFO mode, scaling enable will be - * masked off while Big FIFO mode is exiting." - * - * Despite the w/a only being listed for IVB we assume that - * the ILK/SNB note has similar ramifications, hence we apply - * the w/a on all three platforms. - * - * With experimental results seems this is needed also for primary - * plane, not only sprite plane. - */ - if (plane->id != PLANE_CURSOR && - (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || - IS_IVYBRIDGE(dev_priv)) && - (turn_on || (!intel_plane_is_scaled(old_plane_state) && - intel_plane_is_scaled(new_plane_state)))) - new_crtc_state->disable_lp_wm = true; + if ((IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) && + ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) + new_crtc_state->disable_cxsr = true; if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { new_crtc_state->do_async_flip = true; @@ -710,13 +694,13 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id) int intel_plane_atomic_check(struct intel_atomic_state *state, struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_primary_crtc_plane_state; - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -790,7 +774,8 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } -void intel_plane_update_noarm(struct intel_plane *plane, +void intel_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -799,10 +784,11 @@ void intel_plane_update_noarm(struct intel_plane *plane, trace_intel_plane_update_noarm(plane, crtc); if (plane->update_noarm) - plane->update_noarm(plane, crtc_state, plane_state); + plane->update_noarm(dsb, plane, crtc_state, plane_state); } -void intel_plane_async_flip(struct intel_plane *plane, +void intel_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) @@ -810,34 +796,37 @@ void intel_plane_async_flip(struct intel_plane *plane, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_async_flip(plane, crtc, async_flip); - plane->async_flip(plane, crtc_state, plane_state, async_flip); + plane->async_flip(dsb, plane, crtc_state, plane_state, async_flip); } -void intel_plane_update_arm(struct intel_plane *plane, +void intel_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->do_async_flip && plane->async_flip) { - intel_plane_async_flip(plane, crtc_state, plane_state, true); + intel_plane_async_flip(dsb, plane, crtc_state, plane_state, true); return; } trace_intel_plane_update_arm(plane, crtc); - plane->update_arm(plane, crtc_state, plane_state); + plane->update_arm(dsb, plane, crtc_state, plane_state); } -void intel_plane_disable_arm(struct intel_plane *plane, +void intel_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_disable_arm(plane, crtc); - plane->disable_arm(plane, crtc_state); + plane->disable_arm(dsb, plane, crtc_state); } -void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, +void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = @@ -862,11 +851,13 @@ void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, /* TODO: for mailbox updates this should be skipped */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) - intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); + intel_plane_update_noarm(dsb, plane, + new_crtc_state, new_plane_state); } } -static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, +static void skl_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = @@ -893,13 +884,14 @@ static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) - intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); else - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(dsb, plane, new_crtc_state); } } -static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, +static void i9xx_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = @@ -919,21 +911,22 @@ static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, * would have to be called here as well. */ if (new_plane_state->uapi.visible) - intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); else - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(dsb, plane, new_crtc_state); } } -void intel_crtc_planes_update_arm(struct intel_atomic_state *state, +void intel_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (DISPLAY_VER(i915) >= 9) - skl_crtc_planes_update_arm(state, crtc); + skl_crtc_planes_update_arm(dsb, state, crtc); else - i9xx_crtc_planes_update_arm(state, crtc); + i9xx_crtc_planes_update_arm(dsb, state, crtc); } int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, @@ -1031,6 +1024,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) */ hsub = 1; vsub = 1; + + /* Wa_16023981245 */ + if ((DISPLAY_VERx100(i915) == 2000 || + DISPLAY_VERx100(i915) == 3000) && + src_x % 2 != 0) + hsub = 2; } else { hsub = fb->format->hsub; vsub = fb->format->vsub; @@ -1114,8 +1113,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); - struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); + struct drm_gem_object *obj = intel_fb_bo(new_plane_state->hw.fb); + struct drm_gem_object *old_obj = intel_fb_bo(old_plane_state->hw.fb); int ret; if (old_obj) { @@ -1135,7 +1134,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * can safely continue. */ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { - ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv, + ret = add_dma_resv_fences(old_obj->resv, &new_plane_state->uapi); if (ret < 0) return ret; @@ -1195,7 +1194,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, struct intel_atomic_state *state = to_intel_atomic_state(old_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); + struct drm_gem_object *obj = intel_fb_bo(old_plane_state->hw.fb); if (!obj) return; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 6c4fe3596465..0f982f452ff3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -14,6 +14,7 @@ struct drm_rect; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_dsb; struct intel_plane; struct intel_plane_state; enum plane_id; @@ -32,26 +33,32 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, struct intel_crtc *crtc); void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state); -void intel_plane_async_flip(struct intel_plane *plane, +void intel_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip); -void intel_plane_update_noarm(struct intel_plane *plane, +void intel_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_plane_update_arm(struct intel_plane *plane, +void intel_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_plane_disable_arm(struct intel_plane *plane, +void intel_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, +void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc); -void intel_crtc_planes_update_arm(struct intel_atomic_state *state, +void intel_crtc_planes_update_arm(struct intel_dsb *dsbx, + struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index f5e7eefab2f1..32aa9ec1a204 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -982,12 +982,12 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) { struct intel_display *display = to_intel_display(kdev); struct drm_i915_private *i915 = to_i915(display->drm); - intel_wakeref_t ret; + intel_wakeref_t wakeref; /* Catch potential impedance mismatches before they occur! */ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long)); - ret = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK); if (i915->display.audio.power_refcount++ == 0) { if (DISPLAY_VER(i915) >= 9) { @@ -1007,7 +1007,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) 0, AUD_PIN_BUF_ENABLE); } - return ret; + return (unsigned long)wakeref; } static void i915_audio_component_put_power(struct device *kdev, @@ -1015,13 +1015,14 @@ static void i915_audio_component_put_power(struct device *kdev, { struct intel_display *display = to_intel_display(kdev); struct drm_i915_private *i915 = to_i915(display->drm); + intel_wakeref_t wakeref = (intel_wakeref_t)cookie; /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--i915->display.audio.power_refcount == 0) if (IS_GEMINILAKE(i915)) glk_force_audio_cdclk(i915, false); - intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, cookie); + intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref); } static void i915_audio_component_codec_wake_override(struct device *kdev, diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 9e05745d797d..3f81a726cc7d 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector) else props.power = BACKLIGHT_POWER_OFF; - name = kstrdup_const("intel_backlight", GFP_KERNEL); + name = kstrdup("intel_backlight", GFP_KERNEL); if (!name) return -ENOMEM; @@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector) * compatibility. Use unique names for subsequent backlight devices as a * fallback when the default name already exists. */ - kfree_const(name); + kfree(name); name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", i915->drm.primary->index, connector->base.name); if (!name) @@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector) connector->base.base.id, connector->base.name, name); out: - kfree_const(name); + kfree(name); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index bed485374ab0..a4cdd82c4a75 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -25,6 +25,7 @@ * */ +#include <linux/debugfs.h> #include <linux/firmware.h> #include <drm/display/drm_dp_helper.h> @@ -32,12 +33,12 @@ #include <drm/drm_edid.h> #include <drm/drm_fixed.h> +#include "soc/intel_rom.h" + #include "i915_drv.h" -#include "i915_reg.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_gmbus.h" -#include "intel_uncore.h" #define _INTEL_BIOS_PRIVATE #include "intel_vbt_defs.h" @@ -1168,7 +1169,6 @@ static int intel_bios_ssc_frequency(struct intel_display *display, static void parse_general_features(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_general_features *general; general = bdb_find_section(display, BDB_GENERAL_FEATURES); @@ -1178,7 +1178,7 @@ parse_general_features(struct intel_display *display) display->vbt.int_tv_support = general->int_tv_support; /* int_crt_support can't be trusted on earlier platforms */ if (display->vbt.version >= 155 && - (HAS_DDI(display) || IS_VALLEYVIEW(i915))) + (HAS_DDI(display) || display->platform.valleyview)) display->vbt.int_crt_support = general->int_crt_support; display->vbt.lvds_use_ssc = general->enable_ssc; display->vbt.lvds_ssc_freq = @@ -1541,7 +1541,6 @@ static void parse_psr(struct intel_display *display, struct intel_panel *panel) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_psr *psr; const struct psr_table *psr_table; int panel_type = panel->vbt.panel_type; @@ -1566,7 +1565,7 @@ parse_psr(struct intel_display *display, * Old decimal value is wake up time in multiples of 100 us. */ if (display->vbt.version >= 205 && - (DISPLAY_VER(display) >= 9 && !IS_BROXTON(i915))) { + (DISPLAY_VER(display) >= 9 && !display->platform.broxton)) { switch (psr_table->tp1_wakeup_time) { case 0: panel->vbt.psr.tp1_wakeup_time_us = 500; @@ -1705,8 +1704,8 @@ parse_mipi_config(struct intel_display *display, return; } - drm_dbg(display->drm, "Found MIPI Config block, panel index = %d\n", - panel_type); + drm_dbg_kms(display->drm, "Found MIPI Config block, panel index = %d\n", + panel_type); /* * get hold of the correct configuration block and pps data as per @@ -2028,11 +2027,9 @@ static void icl_fixup_mipi_sequences(struct intel_display *display, static void fixup_mipi_sequences(struct intel_display *display, struct intel_panel *panel) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(display) >= 11) icl_fixup_mipi_sequences(display, panel); - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) vlv_fixup_mipi_sequences(display, panel); } @@ -2066,8 +2063,8 @@ parse_mipi_sequence(struct intel_display *display, return; } - drm_dbg(display->drm, "Found MIPI sequence block v%u\n", - sequence->version); + drm_dbg_kms(display->drm, "Found MIPI sequence block v%u\n", + sequence->version); seq_data = find_panel_sequence_block(display, sequence, panel_type, &seq_size); if (!seq_data) @@ -2113,7 +2110,7 @@ parse_mipi_sequence(struct intel_display *display, fixup_mipi_sequences(display, panel); - drm_dbg(display->drm, "MIPI related VBT parsing complete\n"); + drm_dbg_kms(display->drm, "MIPI related VBT parsing complete\n"); return; err: @@ -2242,15 +2239,15 @@ static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin) const u8 *ddc_pin_map; int i, n_entries; - if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) { + if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) { ddc_pin_map = adlp_ddc_pin_map; n_entries = ARRAY_SIZE(adlp_ddc_pin_map); - } else if (IS_ALDERLAKE_S(i915)) { + } else if (display->platform.alderlake_s) { ddc_pin_map = adls_ddc_pin_map; n_entries = ARRAY_SIZE(adls_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { return vbt_pin; - } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { + } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) { ddc_pin_map = rkl_pch_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) { @@ -2333,7 +2330,6 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo, static enum port dvo_port_to_port(struct intel_display *display, u8 dvo_port) { - struct drm_i915_private *i915 = to_i915(display->drm); /* * Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port. @@ -2390,12 +2386,12 @@ static enum port dvo_port_to_port(struct intel_display *display, ARRAY_SIZE(xelpd_port_mapping[0]), xelpd_port_mapping, dvo_port); - else if (IS_ALDERLAKE_S(i915)) + else if (display->platform.alderlake_s) return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping), ARRAY_SIZE(adls_port_mapping[0]), adls_port_mapping, dvo_port); - else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) + else if (display->platform.dg1 || display->platform.rocketlake) return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping), ARRAY_SIZE(rkl_port_mapping[0]), rkl_port_mapping, @@ -2518,7 +2514,6 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, enum port port) { struct intel_display *display = devdata->display; - struct drm_i915_private *i915 = to_i915(display->drm); if (!intel_bios_encoder_supports_dvi(devdata)) return; @@ -2528,7 +2523,7 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, * with a HSW VBT where the level shifter value goes * up to 11, whereas the BDW max is 9. */ - if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) { + if (display->platform.broadwell && devdata->child.hdmi_level_shifter_value > 9) { drm_dbg_kms(display->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n", port_name(port), devdata->child.hdmi_level_shifter_value, 9); @@ -2617,14 +2612,13 @@ int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata static bool is_port_valid(struct intel_display *display, enum port port) { - struct drm_i915_private *i915 = to_i915(display->drm); /* * On some ICL SKUs port F is not present, but broken VBTs mark * the port as present. Only try to initialize port F for the * SKUs that may actually have it. */ - if (port == PORT_F && IS_ICELAKE(i915)) - return IS_ICL_WITH_PORT_F(i915); + if (port == PORT_F && display->platform.icelake) + return display->platform.icelake_port_f; return true; } @@ -2722,9 +2716,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) static bool has_ddi_port_info(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - return DISPLAY_VER(display) >= 5 || IS_G4X(i915); + return DISPLAY_VER(display) >= 5 || display->platform.g4x; } static void parse_ddi_ports(struct intel_display *display) @@ -2770,9 +2762,9 @@ static bool child_device_size_valid(struct intel_display *display, int size) expected_size = child_device_expected_size(display->vbt.version); if (expected_size < 0) { expected_size = sizeof(struct child_device_config); - drm_dbg(display->drm, - "Expected child device config size for VBT version %u not known; assuming %d\n", - display->vbt.version, expected_size); + drm_dbg_kms(display->drm, + "Expected child device config size for VBT version %u not known; assuming %d\n", + display->vbt.version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ @@ -2795,7 +2787,6 @@ static bool child_device_size_valid(struct intel_display *display, int size) static void parse_general_definitions(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_general_definitions *defs; struct intel_bios_encoder_data *devdata; const struct child_device_config *child; @@ -2820,7 +2811,7 @@ parse_general_definitions(struct intel_display *display) bus_pin = defs->crt_ddc_gmbus_pin; drm_dbg_kms(display->drm, "crt_ddc_bus_pin: %d\n", bus_pin); - if (intel_gmbus_is_valid_pin(i915, bus_pin)) + if (intel_gmbus_is_valid_pin(display, bus_pin)) display->vbt.crt_ddc_pin = bus_pin; if (!child_device_size_valid(display, defs->child_dev_size)) @@ -2906,7 +2897,7 @@ init_vbt_missing_defaults(struct intel_display *display) unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask; enum port port; - if (!HAS_DDI(display) && !IS_CHERRYVIEW(i915)) + if (!HAS_DDI(display) && !display->platform.cherryview) return; for_each_port_masked(port, ports) { @@ -2963,6 +2954,9 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) return _vbt + vbt->bdb_offset; } +static const char vbt_signature[] = "$VBT"; +static const int vbt_signature_len = 4; + /** * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT * @display: display device @@ -2985,7 +2979,7 @@ bool intel_bios_is_valid_vbt(struct intel_display *display, return false; } - if (memcmp(vbt->signature, "$VBT", 4)) { + if (memcmp(vbt->signature, vbt_signature, vbt_signature_len)) { drm_dbg_kms(display->drm, "VBT invalid signature\n"); return false; } @@ -3052,131 +3046,59 @@ static struct vbt_header *firmware_get_vbt(struct intel_display *display, return vbt; } -static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) -{ - intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset); - - return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER); -} - -static struct vbt_header *spi_oprom_get_vbt(struct intel_display *display, - size_t *size) -{ - struct drm_i915_private *i915 = to_i915(display->drm); - u32 count, data, found, store = 0; - u32 static_region, oprom_offset; - u32 oprom_size = 0x200000; - u16 vbt_size; - u32 *vbt; - - static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS); - static_region &= OPTIONROM_SPI_REGIONID_MASK; - intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region); - - oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET); - oprom_offset &= OROM_OFFSET_MASK; - - for (count = 0; count < oprom_size; count += 4) { - data = intel_spi_read(&i915->uncore, oprom_offset + count); - if (data == *((const u32 *)"$VBT")) { - found = oprom_offset + count; - break; - } - } - - if (count >= oprom_size) - goto err_not_found; - - /* Get VBT size and allocate space for the VBT */ - vbt_size = intel_spi_read(&i915->uncore, - found + offsetof(struct vbt_header, vbt_size)); - vbt_size &= 0xffff; - - vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); - if (!vbt) - goto err_not_found; - - for (count = 0; count < vbt_size; count += 4) - *(vbt + store++) = intel_spi_read(&i915->uncore, found + count); - - if (!intel_bios_is_valid_vbt(display, vbt, vbt_size)) - goto err_free_vbt; - - drm_dbg_kms(display->drm, "Found valid VBT in SPI flash\n"); - - if (size) - *size = vbt_size; - - return (struct vbt_header *)vbt; - -err_free_vbt: - kfree(vbt); -err_not_found: - return NULL; -} - static struct vbt_header *oprom_get_vbt(struct intel_display *display, - size_t *sizep) + struct intel_rom *rom, + size_t *size, const char *type) { - struct pci_dev *pdev = to_pci_dev(display->drm->dev); - void __iomem *p = NULL, *oprom; struct vbt_header *vbt; - u16 vbt_size; - size_t i, size; + size_t vbt_size; + loff_t offset; - oprom = pci_map_rom(pdev, &size); - if (!oprom) + if (!rom) return NULL; - /* Scour memory looking for the VBT signature. */ - for (i = 0; i + 4 < size; i += 4) { - if (ioread32(oprom + i) != *((const u32 *)"$VBT")) - continue; + BUILD_BUG_ON(vbt_signature_len != sizeof(vbt_signature) - 1); + BUILD_BUG_ON(vbt_signature_len != sizeof(u32)); - p = oprom + i; - size -= i; - break; - } + offset = intel_rom_find(rom, *(const u32 *)vbt_signature); + if (offset < 0) + goto err_free_rom; - if (!p) - goto err_unmap_oprom; - - if (sizeof(struct vbt_header) > size) { - drm_dbg(display->drm, "VBT header incomplete\n"); - goto err_unmap_oprom; + if (sizeof(struct vbt_header) > intel_rom_size(rom) - offset) { + drm_dbg_kms(display->drm, "VBT header incomplete\n"); + goto err_free_rom; } - vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size)); - if (vbt_size > size) { - drm_dbg(display->drm, - "VBT incomplete (vbt_size overflows)\n"); - goto err_unmap_oprom; + BUILD_BUG_ON(sizeof(vbt->vbt_size) != sizeof(u16)); + + vbt_size = intel_rom_read16(rom, offset + offsetof(struct vbt_header, vbt_size)); + if (vbt_size > intel_rom_size(rom) - offset) { + drm_dbg_kms(display->drm, "VBT incomplete (vbt_size overflows)\n"); + goto err_free_rom; } - /* The rest will be validated by intel_bios_is_valid_vbt() */ - vbt = kmalloc(vbt_size, GFP_KERNEL); + vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); if (!vbt) - goto err_unmap_oprom; + goto err_free_rom; - memcpy_fromio(vbt, p, vbt_size); + intel_rom_read_block(rom, vbt, offset, vbt_size); if (!intel_bios_is_valid_vbt(display, vbt, vbt_size)) goto err_free_vbt; - pci_unmap_rom(pdev, oprom); + drm_dbg_kms(display->drm, "Found valid VBT in %s\n", type); - if (sizep) - *sizep = vbt_size; + if (size) + *size = vbt_size; - drm_dbg_kms(display->drm, "Found valid VBT in PCI ROM\n"); + intel_rom_free(rom); return vbt; err_free_vbt: kfree(vbt); -err_unmap_oprom: - pci_unmap_rom(pdev, oprom); - +err_free_rom: + intel_rom_free(rom); return NULL; } @@ -3198,11 +3120,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display */ if (!vbt && IS_DGFX(i915)) with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vbt = spi_oprom_get_vbt(display, sizep); + vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash"); if (!vbt) with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vbt = oprom_get_vbt(display, sizep); + vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM"); return vbt; } @@ -3406,7 +3328,6 @@ bool intel_bios_is_tv_present(struct intel_display *display) */ bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_bios_encoder_data *devdata; if (list_empty(&display->vbt.display_devices)) @@ -3423,7 +3344,7 @@ bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin) child->device_type != DEVICE_TYPE_LFP) continue; - if (intel_gmbus_is_valid_pin(i915, child->i2c_pin)) + if (intel_gmbus_is_valid_pin(display, child->i2c_pin)) *i2c_pin = child->i2c_pin; /* However, we cannot trust the BIOS writers to populate @@ -3671,17 +3592,16 @@ static const u8 direct_aux_ch_map[] = { static enum aux_ch map_aux_ch(struct intel_display *display, u8 aux_channel) { - struct drm_i915_private *i915 = to_i915(display->drm); const u8 *aux_ch_map; int i, n_entries; if (DISPLAY_VER(display) >= 13) { aux_ch_map = adlp_aux_ch_map; n_entries = ARRAY_SIZE(adlp_aux_ch_map); - } else if (IS_ALDERLAKE_S(i915)) { + } else if (display->platform.alderlake_s) { aux_ch_map = adls_aux_ch_map; n_entries = ARRAY_SIZE(adls_aux_ch_map); - } else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) { + } else if (display->platform.dg1 || display->platform.rocketlake) { aux_ch_map = rkl_aux_ch_map; n_entries = ARRAY_SIZE(rkl_aux_ch_map); } else { diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c new file mode 100644 index 000000000000..fbd16d7b58d9 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_bo.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include "gem/i915_gem_mman.h" +#include "gem/i915_gem_object.h" +#include "gem/i915_gem_object_frontbuffer.h" +#include "i915_debugfs.h" +#include "intel_bo.h" + +bool intel_bo_is_tiled(struct drm_gem_object *obj) +{ + return i915_gem_object_is_tiled(to_intel_bo(obj)); +} + +bool intel_bo_is_userptr(struct drm_gem_object *obj) +{ + return i915_gem_object_is_userptr(to_intel_bo(obj)); +} + +bool intel_bo_is_shmem(struct drm_gem_object *obj) +{ + return i915_gem_object_is_shmem(to_intel_bo(obj)); +} + +bool intel_bo_is_protected(struct drm_gem_object *obj) +{ + return i915_gem_object_is_protected(to_intel_bo(obj)); +} + +void intel_bo_flush_if_display(struct drm_gem_object *obj) +{ + i915_gem_object_flush_if_display(to_intel_bo(obj)); +} + +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + return i915_gem_fb_mmap(to_intel_bo(obj), vma); +} + +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size) +{ + return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size); +} + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +{ + return i915_gem_object_get_frontbuffer(to_intel_bo(obj)); +} + +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front) +{ + return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front); +} + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) +{ + i915_debugfs_describe_obj(m, to_intel_bo(obj)); +} diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h new file mode 100644 index 000000000000..ea7a2253aaa5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_bo.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_BO__ +#define __INTEL_BO__ + +#include <linux/types.h> + +struct drm_gem_object; +struct seq_file; +struct vm_area_struct; + +bool intel_bo_is_tiled(struct drm_gem_object *obj); +bool intel_bo_is_userptr(struct drm_gem_object *obj); +bool intel_bo_is_shmem(struct drm_gem_object *obj); +bool intel_bo_is_protected(struct drm_gem_object *obj); +void intel_bo_flush_if_display(struct drm_gem_object *obj); +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size); + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj); +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front); + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj); + +#endif /* __INTEL_BO__ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 47036d4abb33..a52b0ae68b96 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -743,7 +743,7 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (DISPLAY_VER_FULL(dev_priv) >= IP_VER(14, 1) && IS_DGFX(dev_priv)) + if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info); else if (DISPLAY_VER(dev_priv) >= 14) tgl_get_bw_info(dev_priv, &mtl_sa_info); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index aa3ba66c5307..03c4eef3f92a 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/time.h> #include <drm/drm_fixed.h> @@ -112,81 +113,81 @@ */ struct intel_cdclk_funcs { - void (*get_cdclk)(struct drm_i915_private *i915, + void (*get_cdclk)(struct intel_display *display, struct intel_cdclk_config *cdclk_config); - void (*set_cdclk)(struct drm_i915_private *i915, + void (*set_cdclk)(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); u8 (*calc_voltage_level)(int cdclk); }; -void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, +void intel_cdclk_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config); + display->funcs.cdclk->get_cdclk(display, cdclk_config); } -static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, +static void intel_cdclk_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe); + display->funcs.cdclk->set_cdclk(display, cdclk_config, pipe); } static int intel_cdclk_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); - return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(state); + return display->funcs.cdclk->modeset_calc_cdclk(state); } -static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, +static u8 intel_cdclk_calc_voltage_level(struct intel_display *display, int cdclk) { - return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk); + return display->funcs.cdclk->calc_voltage_level(cdclk); } -static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_133mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 133333; } -static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_200mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 200000; } -static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_266mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 266667; } -static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_333mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 333333; } -static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_400mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 400000; } -static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_450mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 450000; } -static void i85x_get_cdclk(struct drm_i915_private *dev_priv, +static void i85x_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 hpllcc = 0; /* @@ -225,10 +226,10 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv, } } -static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, +static void i915gm_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -249,10 +250,10 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, } } -static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, +static void i945gm_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -273,7 +274,7 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, } } -static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) +static unsigned int intel_hpll_vco(struct intel_display *display) { static const unsigned int blb_vco[8] = { [0] = 3200000, @@ -312,6 +313,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) [4] = 2666667, [5] = 4266667, }; + struct drm_i915_private *dev_priv = to_i915(display->drm); const unsigned int *vco_table; unsigned int vco; u8 tmp = 0; @@ -330,23 +332,23 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) else return 0; - tmp = intel_de_read(dev_priv, + tmp = intel_de_read(display, IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) - drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", + drm_err(display->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); else - drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco); + drm_dbg_kms(display->drm, "HPLL VCO %u kHz\n", vco); return vco; } -static void g33_get_cdclk(struct drm_i915_private *dev_priv, +static void g33_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; @@ -355,7 +357,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, unsigned int cdclk_sel; u16 tmp = 0; - cdclk_config->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(display); pci_read_config_word(pdev, GCFGC, &tmp); @@ -386,16 +388,16 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, return; fail: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 190476; } -static void pnv_get_cdclk(struct drm_i915_private *dev_priv, +static void pnv_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -414,7 +416,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, cdclk_config->cdclk = 200000; break; default: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unknown pnv display core clock 0x%04x\n", gcfgc); fallthrough; case GC_DISPLAY_CLOCK_133_MHZ_PNV: @@ -426,10 +428,10 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, } } -static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, +static void i965gm_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); static const u8 div_3200[] = { 16, 10, 8 }; static const u8 div_4000[] = { 20, 12, 10 }; static const u8 div_5333[] = { 24, 16, 14 }; @@ -437,7 +439,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, unsigned int cdclk_sel; u16 tmp = 0; - cdclk_config->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(display); pci_read_config_word(pdev, GCFGC, &tmp); @@ -465,20 +467,20 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, return; fail: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 200000; } -static void gm45_get_cdclk(struct drm_i915_private *dev_priv, +static void gm45_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); unsigned int cdclk_sel; u16 tmp = 0; - cdclk_config->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(display); pci_read_config_word(pdev, GCFGC, &tmp); @@ -494,7 +496,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, cdclk_config->cdclk = cdclk_sel ? 320000 : 228571; break; default: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 222222; @@ -502,15 +504,16 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, } } -static void hsw_get_cdclk(struct drm_i915_private *dev_priv, +static void hsw_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); + struct drm_i915_private *dev_priv = to_i915(display->drm); + u32 lcpll = intel_de_read(display, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_config->cdclk = 800000; - else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) + else if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) cdclk_config->cdclk = 450000; @@ -520,8 +523,9 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv, cdclk_config->cdclk = 540000; } -static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) +static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; @@ -540,8 +544,10 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) return 200000; } -static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) +static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (IS_VALLEYVIEW(dev_priv)) { if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ return 2; @@ -559,9 +565,10 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) } } -static void vlv_get_cdclk(struct drm_i915_private *dev_priv, +static void vlv_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val; vlv_iosf_sb_get(dev_priv, @@ -585,8 +592,9 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, DSPFREQGUAR_SHIFT_CHV; } -static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) +static void vlv_program_pfi_credits(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); unsigned int credits, default_credits; if (IS_CHERRYVIEW(dev_priv)) @@ -594,7 +602,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) else default_credits = PFI_CREDIT(8); - if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) { + if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) { /* CHV suggested value is 31 or 63 */ if (IS_CHERRYVIEW(dev_priv)) credits = PFI_CREDIT_63; @@ -608,24 +616,25 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) * WA - write default credits before re-programming * FIXME: should we also set the resend bit here? */ - intel_de_write(dev_priv, GCI_CONTROL, + intel_de_write(display, GCI_CONTROL, VGA_FAST_MODE_DISABLE | default_credits); - intel_de_write(dev_priv, GCI_CONTROL, + intel_de_write(display, GCI_CONTROL, VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND); /* * FIXME is this guaranteed to clear * immediately or should we poll for it? */ - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND); + drm_WARN_ON(display->drm, + intel_de_read(display, GCI_CONTROL) & PFI_CREDIT_RESEND); } -static void vlv_set_cdclk(struct drm_i915_private *dev_priv, +static void vlv_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; @@ -662,7 +671,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for CDclk change\n"); } @@ -681,7 +690,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for CDclk change\n"); } @@ -704,17 +713,18 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, BIT(VLV_IOSF_SB_BUNIT) | BIT(VLV_IOSF_SB_PUNIT)); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); - vlv_program_pfi_credits(dev_priv); + vlv_program_pfi_credits(display); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } -static void chv_set_cdclk(struct drm_i915_private *dev_priv, +static void chv_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; @@ -746,15 +756,15 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for CDclk change\n"); } vlv_punit_put(dev_priv); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); - vlv_program_pfi_credits(dev_priv); + vlv_program_pfi_credits(display); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } @@ -786,15 +796,15 @@ static u8 bdw_calc_voltage_level(int cdclk) } } -static void bdw_get_cdclk(struct drm_i915_private *dev_priv, +static void bdw_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); + u32 lcpll = intel_de_read(display, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_config->cdclk = 800000; - else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) + else if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) cdclk_config->cdclk = 450000; @@ -830,15 +840,16 @@ static u32 bdw_cdclk_freq_sel(int cdclk) } } -static void bdw_set_cdclk(struct drm_i915_private *dev_priv, +static void bdw_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int ret; - if (drm_WARN(&dev_priv->drm, - (intel_de_read(dev_priv, LCPLL_CTL) & + if (drm_WARN(display->drm, + (intel_de_read(display, LCPLL_CTL) & (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | @@ -848,39 +859,39 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "failed to inform pcode about cdclk change\n"); return; } - intel_de_rmw(dev_priv, LCPLL_CTL, + intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_CD_SOURCE_FCLK); /* * According to the spec, it should be enough to poll for this 1 us. * However, extensive testing shows that this can take longer. */ - if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & + if (wait_for_us(intel_de_read(display, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE, 100)) - drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); + drm_err(display->drm, "Switching to FCLK failed\n"); - intel_de_rmw(dev_priv, LCPLL_CTL, + intel_de_rmw(display, LCPLL_CTL, LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk)); - intel_de_rmw(dev_priv, LCPLL_CTL, + intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); - if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & + if (wait_for_us((intel_de_read(display, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) - drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); + drm_err(display->drm, "Switching back to LCPLL failed\n"); snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level); - intel_de_write(dev_priv, CDCLK_FREQ, + intel_de_write(display, CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); } static int skl_calc_cdclk(int min_cdclk, int vco) @@ -918,7 +929,7 @@ static u8 skl_calc_voltage_level(int cdclk) return 0; } -static void skl_dpll0_update(struct drm_i915_private *dev_priv, +static void skl_dpll0_update(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { u32 val; @@ -926,16 +937,16 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv, cdclk_config->ref = 24000; cdclk_config->vco = 0; - val = intel_de_read(dev_priv, LCPLL1_CTL); + val = intel_de_read(display, LCPLL1_CTL); if ((val & LCPLL_PLL_ENABLE) == 0) return; - if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0)) + if (drm_WARN_ON(display->drm, (val & LCPLL_PLL_LOCK) == 0)) return; - val = intel_de_read(dev_priv, DPLL_CTRL1); + val = intel_de_read(display, DPLL_CTRL1); - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != @@ -959,19 +970,19 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv, } } -static void skl_get_cdclk(struct drm_i915_private *dev_priv, +static void skl_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { u32 cdctl; - skl_dpll0_update(dev_priv, cdclk_config); + skl_dpll0_update(display, cdclk_config); cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref; if (cdclk_config->vco == 0) goto out; - cdctl = intel_de_read(dev_priv, CDCLK_CTL); + cdctl = intel_de_read(display, CDCLK_CTL); if (cdclk_config->vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { @@ -1026,19 +1037,19 @@ static int skl_cdclk_decimal(int cdclk) return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco) +static void skl_set_preferred_cdclk_vco(struct intel_display *display, int vco) { - bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco; + bool changed = display->cdclk.skl_preferred_vco_freq != vco; - i915->display.cdclk.skl_preferred_vco_freq = vco; + display->cdclk.skl_preferred_vco_freq = vco; if (changed) - intel_update_max_cdclk(i915); + intel_update_max_cdclk(display); } -static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) +static u32 skl_dpll0_link_rate(struct intel_display *display, int vco) { - drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); + drm_WARN_ON(display->drm, vco != 8100000 && vco != 8640000); /* * We always enable DPLL0 with the lowest link rate possible, but still @@ -1055,47 +1066,47 @@ static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0); } -static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) +static void skl_dpll0_enable(struct intel_display *display, int vco) { - intel_de_rmw(dev_priv, DPLL_CTRL1, + intel_de_rmw(display, DPLL_CTRL1, DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0), DPLL_CTRL1_OVERRIDE(SKL_DPLL0) | - skl_dpll0_link_rate(dev_priv, vco)); - intel_de_posting_read(dev_priv, DPLL_CTRL1); + skl_dpll0_link_rate(display, vco)); + intel_de_posting_read(display, DPLL_CTRL1); - intel_de_rmw(dev_priv, LCPLL1_CTL, + intel_de_rmw(display, LCPLL1_CTL, 0, LCPLL_PLL_ENABLE); - if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) - drm_err(&dev_priv->drm, "DPLL0 not locked\n"); + if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) + drm_err(display->drm, "DPLL0 not locked\n"); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; /* We'll want to keep using the current vco from now on. */ - skl_set_preferred_cdclk_vco(dev_priv, vco); + skl_set_preferred_cdclk_vco(display, vco); } -static void skl_dpll0_disable(struct drm_i915_private *dev_priv) +static void skl_dpll0_disable(struct intel_display *display) { - intel_de_rmw(dev_priv, LCPLL1_CTL, + intel_de_rmw(display, LCPLL1_CTL, LCPLL_PLL_ENABLE, 0); - if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); + if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) + drm_err(display->drm, "Couldn't disable DPLL0\n"); - dev_priv->display.cdclk.hw.vco = 0; + display->cdclk.hw.vco = 0; } -static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, +static u32 skl_cdclk_freq_sel(struct intel_display *display, int cdclk, int vco) { switch (cdclk) { default: - drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->display.cdclk.hw.bypass); - drm_WARN_ON(&dev_priv->drm, vco != 0); + drm_WARN_ON(display->drm, + cdclk != display->cdclk.hw.bypass); + drm_WARN_ON(display->drm, vco != 0); fallthrough; case 308571: case 337500: @@ -1111,10 +1122,11 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, } } -static void skl_set_cdclk(struct drm_i915_private *dev_priv, +static void skl_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 freq_select, cdclk_ctl; @@ -1128,7 +1140,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, * use the corresponding VCO freq as that always leads to using the * minimum 308MHz CDCLK. */ - drm_WARN_ON_ONCE(&dev_priv->drm, + drm_WARN_ON_ONCE(display->drm, IS_SKYLAKE(dev_priv) && vco == 8640000); ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, @@ -1136,54 +1148,54 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to inform PCU about cdclk change (%d)\n", ret); return; } - freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco); + freq_select = skl_cdclk_freq_sel(display, cdclk, vco); - if (dev_priv->display.cdclk.hw.vco != 0 && - dev_priv->display.cdclk.hw.vco != vco) - skl_dpll0_disable(dev_priv); + if (display->cdclk.hw.vco != 0 && + display->cdclk.hw.vco != vco) + skl_dpll0_disable(display); - cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); + cdclk_ctl = intel_de_read(display, CDCLK_CTL); - if (dev_priv->display.cdclk.hw.vco != vco) { + if (display->cdclk.hw.vco != vco) { /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); } /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); - intel_de_posting_read(dev_priv, CDCLK_CTL); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); + intel_de_posting_read(display, CDCLK_CTL); - if (dev_priv->display.cdclk.hw.vco != vco) - skl_dpll0_enable(dev_priv, vco); + if (display->cdclk.hw.vco != vco) + skl_dpll0_enable(display, vco); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); - intel_de_posting_read(dev_priv, CDCLK_CTL); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); + intel_de_posting_read(display, CDCLK_CTL); /* inform PCU of the change */ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); } -static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void skl_sanitize_cdclk(struct intel_display *display) { u32 cdctl, expected; @@ -1192,15 +1204,15 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * There is SWF18 scratchpad register defined which is set by the * pre-os which can be used by the OS drivers to check the status */ - if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) + if ((intel_de_read(display, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; - intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ - if (dev_priv->display.cdclk.hw.vco == 0 || - dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) + if (display->cdclk.hw.vco == 0 || + display->cdclk.hw.cdclk == display->cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock @@ -1209,60 +1221,60 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - cdctl = intel_de_read(dev_priv, CDCLK_CTL); + cdctl = intel_de_read(display, CDCLK_CTL); expected = (cdctl & CDCLK_FREQ_SEL_MASK) | - skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk); + skl_cdclk_decimal(display->cdclk.hw.cdclk); if (cdctl == expected) /* All well; nothing to sanitize */ return; sanitize: - drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); + drm_dbg_kms(display->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->display.cdclk.hw.cdclk = 0; + display->cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->display.cdclk.hw.vco = ~0; + display->cdclk.hw.vco = ~0; } -static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) +static void skl_cdclk_init_hw(struct intel_display *display) { struct intel_cdclk_config cdclk_config; - skl_sanitize_cdclk(dev_priv); + skl_sanitize_cdclk(display); - if (dev_priv->display.cdclk.hw.cdclk != 0 && - dev_priv->display.cdclk.hw.vco != 0) { + if (display->cdclk.hw.cdclk != 0 && + display->cdclk.hw.vco != 0) { /* * Use the current vco as our initial * guess as to what the preferred vco is. */ - if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0) - skl_set_preferred_cdclk_vco(dev_priv, - dev_priv->display.cdclk.hw.vco); + if (display->cdclk.skl_preferred_vco_freq == 0) + skl_set_preferred_cdclk_vco(display, + display->cdclk.hw.vco); return; } - cdclk_config = dev_priv->display.cdclk.hw; + cdclk_config = display->cdclk.hw; - cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq; + cdclk_config.vco = display->cdclk.skl_preferred_vco_freq; if (cdclk_config.vco == 0) cdclk_config.vco = 8100000; cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); - skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + skl_set_cdclk(display, &cdclk_config, INVALID_PIPE); } -static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) +static void skl_cdclk_uninit_hw(struct intel_display *display) { - struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; + struct intel_cdclk_config cdclk_config = display->cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); - skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + skl_set_cdclk(display, &cdclk_config, INVALID_PIPE); } struct intel_cdclk_vals { @@ -1456,6 +1468,39 @@ static const struct intel_cdclk_vals xe2hpd_cdclk_table[] = { {} }; +static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = { + { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 326400, .ratio = 17, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 345600, .ratio = 18, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 364800, .ratio = 19, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 384000, .ratio = 20, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff }, + {} +}; + static const int cdclk_squash_len = 16; static int cdclk_squash_divider(u16 waveform) @@ -1470,37 +1515,37 @@ static int cdclk_divider(int cdclk, int vco, u16 waveform) cdclk * cdclk_squash_len); } -static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) +static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk) { - const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; + const struct intel_cdclk_vals *table = display->cdclk.table; int i; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->display.cdclk.hw.ref && + if (table[i].refclk == display->cdclk.hw.ref && table[i].cdclk >= min_cdclk) return table[i].cdclk; - drm_WARN(&dev_priv->drm, 1, + drm_WARN(display->drm, 1, "Cannot satisfy minimum cdclk %d with refclk %u\n", - min_cdclk, dev_priv->display.cdclk.hw.ref); + min_cdclk, display->cdclk.hw.ref); return 0; } -static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) +static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; + const struct intel_cdclk_vals *table = display->cdclk.table; int i; - if (cdclk == dev_priv->display.cdclk.hw.bypass) + if (cdclk == display->cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->display.cdclk.hw.ref && + if (table[i].refclk == display->cdclk.hw.ref && table[i].cdclk == cdclk) - return dev_priv->display.cdclk.hw.ref * table[i].ratio; + return display->cdclk.hw.ref * table[i].ratio; - drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->display.cdclk.hw.ref); + drm_WARN(display->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, display->cdclk.hw.ref); return 0; } @@ -1582,10 +1627,20 @@ static u8 rplu_calc_voltage_level(int cdclk) rplu_voltage_level_max_cdclk); } -static void icl_readout_refclk(struct drm_i915_private *dev_priv, +static u8 xe3lpd_calc_voltage_level(int cdclk) +{ + /* + * Starting with xe3lpd power controller does not need the voltage + * index when doing the modeset update. This function is best left + * defined but returning 0 to the mask. + */ + return 0; +} + +static void icl_readout_refclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; + u32 dssm = intel_de_read(display, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; switch (dssm) { default: @@ -1603,19 +1658,20 @@ static void icl_readout_refclk(struct drm_i915_private *dev_priv, } } -static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, +static void bxt_de_pll_readout(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val, ratio; if (IS_DG2(dev_priv)) cdclk_config->ref = 38400; - else if (DISPLAY_VER(dev_priv) >= 11) - icl_readout_refclk(dev_priv, cdclk_config); + else if (DISPLAY_VER(display) >= 11) + icl_readout_refclk(display, cdclk_config); else cdclk_config->ref = 19200; - val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); + val = intel_de_read(display, BXT_DE_PLL_ENABLE); if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || (val & BXT_DE_PLL_LOCK) == 0) { /* @@ -1630,26 +1686,26 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register, * gen9lp had it in a separate PLL control register. */ - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) ratio = val & ICL_CDCLK_PLL_RATIO_MASK; else - ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; + ratio = intel_de_read(display, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; cdclk_config->vco = ratio * cdclk_config->ref; } -static void bxt_get_cdclk(struct drm_i915_private *dev_priv, +static void bxt_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { u32 squash_ctl = 0; u32 divider; int div; - bxt_de_pll_readout(dev_priv, cdclk_config); + bxt_de_pll_readout(display, cdclk_config); - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) cdclk_config->bypass = cdclk_config->ref / 2; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) cdclk_config->bypass = 50000; else cdclk_config->bypass = cdclk_config->ref; @@ -1659,7 +1715,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, goto out; } - divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; + divider = intel_de_read(display, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: @@ -1679,8 +1735,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, return; } - if (HAS_CDCLK_SQUASH(dev_priv)) - squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); + if (HAS_CDCLK_SQUASH(display)) + squash_ctl = intel_de_read(display, CDCLK_SQUASH_CTL); if (squash_ctl & CDCLK_SQUASH_ENABLE) { u16 waveform; @@ -1696,107 +1752,107 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, } out: - if (DISPLAY_VER(dev_priv) >= 20) - cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN; + if (DISPLAY_VER(display) >= 20) + cdclk_config->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN; /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ cdclk_config->voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk); + intel_cdclk_calc_voltage_level(display, cdclk_config->cdclk); } -static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) +static void bxt_de_pll_disable(struct intel_display *display) { - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0); + intel_de_write(display, BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_de_wait_for_clear(dev_priv, + if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); + drm_err(display->drm, "timeout waiting for DE PLL unlock\n"); - dev_priv->display.cdclk.hw.vco = 0; + display->cdclk.hw.vco = 0; } -static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) +static void bxt_de_pll_enable(struct intel_display *display, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref); - intel_de_rmw(dev_priv, BXT_DE_PLL_CTL, + intel_de_rmw(display, BXT_DE_PLL_CTL, BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio)); - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ - if (intel_de_wait_for_set(dev_priv, + if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); + drm_err(display->drm, "timeout waiting for DE PLL lock\n"); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; } -static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) +static void icl_cdclk_pll_disable(struct intel_display *display) { - intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE, + intel_de_rmw(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n"); + if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n"); - dev_priv->display.cdclk.hw.vco = 0; + display->cdclk.hw.vco = 0; } -static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) +static void icl_cdclk_pll_enable(struct intel_display *display, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref); u32 val; val = ICL_CDCLK_PLL_RATIO(ratio); - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); val |= BXT_DE_PLL_PLL_ENABLE; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n"); + if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n"); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; } -static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) +static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref); u32 val; /* Write PLL ratio without disabling */ val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Submit freq change request */ val |= BXT_DE_PLL_FREQ_REQ; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, + if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n"); + drm_err(display->drm, "timeout waiting for FREQ change request ack\n"); val &= ~BXT_DE_PLL_FREQ_REQ; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; } -static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +static u32 bxt_cdclk_cd2x_pipe(struct intel_display *display, enum pipe pipe) { - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (pipe == INVALID_PIPE) return TGL_CDCLK_CD2X_PIPE_NONE; else return TGL_CDCLK_CD2X_PIPE(pipe); - } else if (DISPLAY_VER(dev_priv) >= 11) { + } else if (DISPLAY_VER(display) >= 11) { if (pipe == INVALID_PIPE) return ICL_CDCLK_CD2X_PIPE_NONE; else @@ -1809,15 +1865,15 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe } } -static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, +static u32 bxt_cdclk_cd2x_div_sel(struct intel_display *display, int cdclk, int vco, u16 waveform) { /* cdclk = vco / 2 / div{1,1.5,2,4} */ switch (cdclk_divider(cdclk, vco, waveform)) { default: - drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->display.cdclk.hw.bypass); - drm_WARN_ON(&dev_priv->drm, vco != 0); + drm_WARN_ON(display->drm, + cdclk != display->cdclk.hw.bypass); + drm_WARN_ON(display->drm, vco != 0); fallthrough; case 2: return BXT_CDCLK_CD2X_DIV_SEL_1; @@ -1830,47 +1886,47 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, } } -static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv, +static u16 cdclk_squash_waveform(struct intel_display *display, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; + const struct intel_cdclk_vals *table = display->cdclk.table; int i; - if (cdclk == dev_priv->display.cdclk.hw.bypass) + if (cdclk == display->cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->display.cdclk.hw.ref && + if (table[i].refclk == display->cdclk.hw.ref && table[i].cdclk == cdclk) return table[i].waveform; - drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->display.cdclk.hw.ref); + drm_WARN(display->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, display->cdclk.hw.ref); return 0xffff; } -static void icl_cdclk_pll_update(struct drm_i915_private *i915, int vco) +static void icl_cdclk_pll_update(struct intel_display *display, int vco) { - if (i915->display.cdclk.hw.vco != 0 && - i915->display.cdclk.hw.vco != vco) - icl_cdclk_pll_disable(i915); + if (display->cdclk.hw.vco != 0 && + display->cdclk.hw.vco != vco) + icl_cdclk_pll_disable(display); - if (i915->display.cdclk.hw.vco != vco) - icl_cdclk_pll_enable(i915, vco); + if (display->cdclk.hw.vco != vco) + icl_cdclk_pll_enable(display, vco); } -static void bxt_cdclk_pll_update(struct drm_i915_private *i915, int vco) +static void bxt_cdclk_pll_update(struct intel_display *display, int vco) { - if (i915->display.cdclk.hw.vco != 0 && - i915->display.cdclk.hw.vco != vco) - bxt_de_pll_disable(i915); + if (display->cdclk.hw.vco != 0 && + display->cdclk.hw.vco != vco) + bxt_de_pll_disable(display); - if (i915->display.cdclk.hw.vco != vco) - bxt_de_pll_enable(i915, vco); + if (display->cdclk.hw.vco != vco) + bxt_de_pll_enable(display, vco); } -static void dg2_cdclk_squash_program(struct drm_i915_private *i915, +static void dg2_cdclk_squash_program(struct intel_display *display, u16 waveform) { u32 squash_ctl = 0; @@ -1879,7 +1935,7 @@ static void dg2_cdclk_squash_program(struct drm_i915_private *i915, squash_ctl = CDCLK_SQUASH_ENABLE | CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; - intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl); + intel_de_write(display, CDCLK_SQUASH_CTL, squash_ctl); } static bool cdclk_pll_is_unknown(unsigned int vco) @@ -1892,38 +1948,40 @@ static bool cdclk_pll_is_unknown(unsigned int vco) return vco == ~0; } -static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915) +static bool mdclk_source_is_cdclk_pll(struct intel_display *display) { - return DISPLAY_VER(i915) >= 20; + return DISPLAY_VER(display) >= 20; } -static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915) +static u32 xe2lpd_mdclk_source_sel(struct intel_display *display) { - if (mdclk_source_is_cdclk_pll(i915)) + if (mdclk_source_is_cdclk_pll(display)) return MDCLK_SOURCE_SEL_CDCLK_PLL; return MDCLK_SOURCE_SEL_CD2XCLK; } -int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915, +int intel_mdclk_cdclk_ratio(struct intel_display *display, const struct intel_cdclk_config *cdclk_config) { - if (mdclk_source_is_cdclk_pll(i915)) + if (mdclk_source_is_cdclk_pll(display)) return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk); /* Otherwise, source for MDCLK is CD2XCLK. */ return 2; } -static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915, +static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display, const struct intel_cdclk_config *cdclk_config) { + struct drm_i915_private *i915 = to_i915(display->drm); + intel_dbuf_mdclk_cdclk_ratio_update(i915, - intel_mdclk_cdclk_ratio(i915, cdclk_config), + intel_mdclk_cdclk_ratio(display, cdclk_config), cdclk_config->joined_mbus); } -static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915, +static bool cdclk_compute_crawl_and_squash_midpoint(struct intel_display *display, const struct intel_cdclk_config *old_cdclk_config, const struct intel_cdclk_config *new_cdclk_config, struct intel_cdclk_config *mid_cdclk_config) @@ -1936,11 +1994,11 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 return false; /* Return if both Squash and Crawl are not present */ - if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915)) + if (!HAS_CDCLK_CRAWL(display) || !HAS_CDCLK_SQUASH(display)) return false; - old_waveform = cdclk_squash_waveform(i915, old_cdclk_config->cdclk); - new_waveform = cdclk_squash_waveform(i915, new_cdclk_config->cdclk); + old_waveform = cdclk_squash_waveform(display, old_cdclk_config->cdclk); + new_waveform = cdclk_squash_waveform(display, new_cdclk_config->cdclk); /* Return if Squash only or Crawl only is the desired action */ if (old_cdclk_config->vco == 0 || new_cdclk_config->vco == 0 || @@ -1957,7 +2015,7 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 * Should not happen currently. We might need more midpoint * transitions if we need to also change the cd2x divider. */ - if (drm_WARN_ON(&i915->drm, old_div != new_div)) + if (drm_WARN_ON(display->drm, old_div != new_div)) return false; *mid_cdclk_config = *new_cdclk_config; @@ -1986,37 +2044,40 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 /* make sure the mid clock came out sane */ - drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk < + drm_WARN_ON(display->drm, mid_cdclk_config->cdclk < min(old_cdclk_config->cdclk, new_cdclk_config->cdclk)); - drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk > - i915->display.cdclk.max_cdclk_freq); - drm_WARN_ON(&i915->drm, cdclk_squash_waveform(i915, mid_cdclk_config->cdclk) != + drm_WARN_ON(display->drm, mid_cdclk_config->cdclk > + display->cdclk.max_cdclk_freq); + drm_WARN_ON(display->drm, cdclk_squash_waveform(display, mid_cdclk_config->cdclk) != mid_waveform); return true; } -static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) +static bool pll_enable_wa_needed(struct intel_display *display) { - return (DISPLAY_VER_FULL(dev_priv) == IP_VER(20, 0) || - DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0) || + struct drm_i915_private *dev_priv = to_i915(display->drm); + + return (DISPLAY_VERx100(display) == 2000 || + DISPLAY_VERx100(display) == 1400 || IS_DG2(dev_priv)) && - dev_priv->display.cdclk.hw.vco > 0; + display->cdclk.hw.vco > 0; } -static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, +static u32 bxt_cdclk_ctl(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *i915 = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u16 waveform; u32 val; - waveform = cdclk_squash_waveform(i915, cdclk); + waveform = cdclk_squash_waveform(display, cdclk); - val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) | - bxt_cdclk_cd2x_pipe(i915, pipe); + val = bxt_cdclk_cd2x_div_sel(display, cdclk, vco, waveform) | + bxt_cdclk_cd2x_pipe(display, pipe); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, @@ -2026,50 +2087,52 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, cdclk >= 500000) val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - if (DISPLAY_VER(i915) >= 20) - val |= xe2lpd_mdclk_source_sel(i915); + if (DISPLAY_VER(display) >= 20) + val |= xe2lpd_mdclk_source_sel(display); else val |= skl_cdclk_decimal(cdclk); return val; } -static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, +static void _bxt_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; - if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 && - !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { - if (dev_priv->display.cdclk.hw.vco != vco) - adlp_cdclk_pll_crawl(dev_priv, vco); - } else if (DISPLAY_VER(dev_priv) >= 11) { + if (HAS_CDCLK_CRAWL(display) && display->cdclk.hw.vco > 0 && vco > 0 && + !cdclk_pll_is_unknown(display->cdclk.hw.vco)) { + if (display->cdclk.hw.vco != vco) + adlp_cdclk_pll_crawl(display, vco); + } else if (DISPLAY_VER(display) >= 11) { /* wa_15010685871: dg2, mtl */ - if (pll_enable_wa_needed(dev_priv)) - dg2_cdclk_squash_program(dev_priv, 0); + if (pll_enable_wa_needed(display)) + dg2_cdclk_squash_program(display, 0); - icl_cdclk_pll_update(dev_priv, vco); - } else - bxt_cdclk_pll_update(dev_priv, vco); + icl_cdclk_pll_update(display, vco); + } else { + bxt_cdclk_pll_update(display, vco); + } - if (HAS_CDCLK_SQUASH(dev_priv)) { - u16 waveform = cdclk_squash_waveform(dev_priv, cdclk); + if (HAS_CDCLK_SQUASH(display)) { + u16 waveform = cdclk_squash_waveform(display, cdclk); - dg2_cdclk_squash_program(dev_priv, waveform); + dg2_cdclk_squash_program(display, waveform); } - intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe)); + intel_de_write(display, CDCLK_CTL, bxt_cdclk_ctl(display, cdclk_config, pipe)); if (pipe != INVALID_PIPE) - intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe)); } -static void bxt_set_cdclk(struct drm_i915_private *dev_priv, +static void bxt_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_config mid_cdclk_config; int cdclk = cdclk_config->cdclk; int ret = 0; @@ -2080,9 +2143,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * mailbox communication, skip * this step. */ - if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) + if (DISPLAY_VER(display) >= 14 || IS_DG2(dev_priv)) /* NOOP */; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, @@ -2097,35 +2160,35 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 0x80000000, 150, 2); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to inform PCU about cdclk change (err %d, freq %d)\n", ret, cdclk); return; } - if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk) - xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config); + if (DISPLAY_VER(display) >= 20 && cdclk < display->cdclk.hw.cdclk) + xe2lpd_mdclk_cdclk_ratio_program(display, cdclk_config); - if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw, + if (cdclk_compute_crawl_and_squash_midpoint(display, &display->cdclk.hw, cdclk_config, &mid_cdclk_config)) { - _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe); - _bxt_set_cdclk(dev_priv, cdclk_config, pipe); + _bxt_set_cdclk(display, &mid_cdclk_config, pipe); + _bxt_set_cdclk(display, cdclk_config, pipe); } else { - _bxt_set_cdclk(dev_priv, cdclk_config, pipe); + _bxt_set_cdclk(display, cdclk_config, pipe); } - if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk) - xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config); + if (DISPLAY_VER(display) >= 20 && cdclk > display->cdclk.hw.cdclk) + xe2lpd_mdclk_cdclk_ratio_program(display, cdclk_config); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) /* * NOOP - No Pcode communication needed for * Display versions 14 and beyond */; - else if (DISPLAY_VER(dev_priv) >= 11 && !IS_DG2(dev_priv)) + else if (DISPLAY_VER(display) >= 11 && !IS_DG2(dev_priv)) ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); - if (DISPLAY_VER(dev_priv) < 11) { + if (DISPLAY_VER(display) < 11) { /* * The timeout isn't specified, the 2ms used here is based on * experiment. @@ -2138,42 +2201,42 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 150, 2); } if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "PCode CDCLK freq set failed, (err %d, freq %d)\n", ret, cdclk); return; } - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) /* * Can't read out the voltage level :( * Let's just assume everything is as expected. */ - dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level; + display->cdclk.hw.voltage_level = cdclk_config->voltage_level; } -static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct intel_display *display) { u32 cdctl, expected; int cdclk, vco; - intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); - if (dev_priv->display.cdclk.hw.vco == 0 || - dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) + if (display->cdclk.hw.vco == 0 || + display->cdclk.hw.cdclk == display->cdclk.hw.bypass) goto sanitize; /* Make sure this is a legal cdclk value for the platform */ - cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk); - if (cdclk != dev_priv->display.cdclk.hw.cdclk) + cdclk = bxt_calc_cdclk(display, display->cdclk.hw.cdclk); + if (cdclk != display->cdclk.hw.cdclk) goto sanitize; /* Make sure the VCO is correct for the cdclk */ - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - if (vco != dev_priv->display.cdclk.hw.vco) + vco = bxt_calc_cdclk_pll_vco(display, cdclk); + if (vco != display->cdclk.hw.vco) goto sanitize; /* @@ -2181,129 +2244,133 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, * so sanitize this register. */ - cdctl = intel_de_read(dev_priv, CDCLK_CTL); - expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE); + cdctl = intel_de_read(display, CDCLK_CTL); + expected = bxt_cdclk_ctl(display, &display->cdclk.hw, INVALID_PIPE); /* * Let's ignore the pipe field, since BIOS could have configured the * dividers both synching to an active pipe, or asynchronously * (PIPE_NONE). */ - cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); - expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); + cdctl &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE); + expected &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE); if (cdctl == expected) /* All well; nothing to sanitize */ return; sanitize: - drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); + drm_dbg_kms(display->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->display.cdclk.hw.cdclk = 0; + display->cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->display.cdclk.hw.vco = ~0; + display->cdclk.hw.vco = ~0; } -static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) +static void bxt_cdclk_init_hw(struct intel_display *display) { struct intel_cdclk_config cdclk_config; - bxt_sanitize_cdclk(dev_priv); + bxt_sanitize_cdclk(display); - if (dev_priv->display.cdclk.hw.cdclk != 0 && - dev_priv->display.cdclk.hw.vco != 0) + if (display->cdclk.hw.cdclk != 0 && + display->cdclk.hw.vco != 0) return; - cdclk_config = dev_priv->display.cdclk.hw; + cdclk_config = display->cdclk.hw; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. */ - cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); - cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); + cdclk_config.cdclk = bxt_calc_cdclk(display, 0); + cdclk_config.vco = bxt_calc_cdclk_pll_vco(display, cdclk_config.cdclk); cdclk_config.voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); + intel_cdclk_calc_voltage_level(display, cdclk_config.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + bxt_set_cdclk(display, &cdclk_config, INVALID_PIPE); } -static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) +static void bxt_cdclk_uninit_hw(struct intel_display *display) { - struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; + struct intel_cdclk_config cdclk_config = display->cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); + intel_cdclk_calc_voltage_level(display, cdclk_config.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + bxt_set_cdclk(display, &cdclk_config, INVALID_PIPE); } /** * intel_cdclk_init_hw - Initialize CDCLK hardware - * @i915: i915 device + * @display: display instance * - * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and + * Initialize CDCLK. This consists mainly of initializing display->cdclk.hw and * sanitizing the state of the hardware if needed. This is generally done only * during the display core initialization sequence, after which the DMC will * take care of turning CDCLK off/on as needed. */ -void intel_cdclk_init_hw(struct drm_i915_private *i915) +void intel_cdclk_init_hw(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) - bxt_cdclk_init_hw(i915); - else if (DISPLAY_VER(i915) == 9) - skl_cdclk_init_hw(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915)) + bxt_cdclk_init_hw(display); + else if (DISPLAY_VER(display) == 9) + skl_cdclk_init_hw(display); } /** * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware - * @i915: i915 device + * @display: display instance * * Uninitialize CDCLK. This is done only during the display core * uninitialization sequence. */ -void intel_cdclk_uninit_hw(struct drm_i915_private *i915) +void intel_cdclk_uninit_hw(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) - bxt_cdclk_uninit_hw(i915); - else if (DISPLAY_VER(i915) == 9) - skl_cdclk_uninit_hw(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915)) + bxt_cdclk_uninit_hw(display); + else if (DISPLAY_VER(display) == 9) + skl_cdclk_uninit_hw(display); } -static bool intel_cdclk_can_crawl_and_squash(struct drm_i915_private *i915, +static bool intel_cdclk_can_crawl_and_squash(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { u16 old_waveform; u16 new_waveform; - drm_WARN_ON(&i915->drm, cdclk_pll_is_unknown(a->vco)); + drm_WARN_ON(display->drm, cdclk_pll_is_unknown(a->vco)); if (a->vco == 0 || b->vco == 0) return false; - if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915)) + if (!HAS_CDCLK_CRAWL(display) || !HAS_CDCLK_SQUASH(display)) return false; - old_waveform = cdclk_squash_waveform(i915, a->cdclk); - new_waveform = cdclk_squash_waveform(i915, b->cdclk); + old_waveform = cdclk_squash_waveform(display, a->cdclk); + new_waveform = cdclk_squash_waveform(display, b->cdclk); return a->vco != b->vco && old_waveform != new_waveform; } -static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, +static bool intel_cdclk_can_crawl(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { int a_div, b_div; - if (!HAS_CDCLK_CRAWL(dev_priv)) + if (!HAS_CDCLK_CRAWL(display)) return false; /* @@ -2319,7 +2386,7 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, a->ref == b->ref; } -static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, +static bool intel_cdclk_can_squash(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { @@ -2329,7 +2396,7 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, * the moment all platforms with squasher use a fixed cd2x * divider. */ - if (!HAS_CDCLK_SQUASH(dev_priv)) + if (!HAS_CDCLK_SQUASH(display)) return false; return a->cdclk != b->cdclk && @@ -2358,7 +2425,7 @@ bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, /** * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK * configurations requires only a cd2x divider update - * @dev_priv: i915 device + * @display: display instance * @a: first CDCLK configuration * @b: second CDCLK configuration * @@ -2366,12 +2433,14 @@ bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, * True if changing between the two CDCLK configurations * can be done with just a cd2x divider update, false if not. */ -static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, +static bool intel_cdclk_can_cd2x_update(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + /* Older hw doesn't have the capability */ - if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) + if (DISPLAY_VER(display) < 10 && !IS_BROXTON(dev_priv)) return false; /* @@ -2380,7 +2449,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, * the moment all platforms with squasher use a fixed cd2x * divider. */ - if (HAS_CDCLK_SQUASH(dev_priv)) + if (HAS_CDCLK_SQUASH(display)) return false; return a->cdclk != b->cdclk && @@ -2404,23 +2473,24 @@ static bool intel_cdclk_changed(const struct intel_cdclk_config *a, a->voltage_level != b->voltage_level; } -void intel_cdclk_dump_config(struct drm_i915_private *i915, +void intel_cdclk_dump_config(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, const char *context) { - drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", + drm_dbg_kms(display->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", context, cdclk_config->cdclk, cdclk_config->vco, cdclk_config->ref, cdclk_config->bypass, cdclk_config->voltage_level); } -static void intel_pcode_notify(struct drm_i915_private *i915, +static void intel_pcode_notify(struct intel_display *display, u8 voltage_level, u8 active_pipe_count, u16 cdclk, bool cdclk_update_valid, bool pipe_count_update_valid) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; u32 update_mask = 0; @@ -2441,26 +2511,27 @@ static void intel_pcode_notify(struct drm_i915_private *i915, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) - drm_err(&i915->drm, + drm_err(display->drm, "Failed to inform PCU about display config (err %d)\n", ret); } -static void intel_set_cdclk(struct drm_i915_private *dev_priv, +static void intel_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe, const char *context) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config)) + if (!intel_cdclk_changed(&display->cdclk.hw, cdclk_config)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk)) + if (drm_WARN_ON_ONCE(display->drm, !display->funcs.cdclk->set_cdclk)) return; - intel_cdclk_dump_config(dev_priv, cdclk_config, context); + intel_cdclk_dump_config(display, cdclk_config, context); - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_pause(intel_dp); @@ -2473,24 +2544,24 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, * functions use cdclk. Not all platforms/ports do, * but we'll lock them all for simplicity. */ - mutex_lock(&dev_priv->display.gmbus.mutex); - for_each_intel_dp(&dev_priv->drm, encoder) { + mutex_lock(&display->gmbus.mutex); + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, - &dev_priv->display.gmbus.mutex); + &display->gmbus.mutex); } - intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); + intel_cdclk_set_cdclk(display, cdclk_config, pipe); - for_each_intel_dp(&dev_priv->drm, encoder) { + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_unlock(&intel_dp->aux.hw_mutex); } - mutex_unlock(&dev_priv->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_resume(intel_dp); @@ -2498,17 +2569,17 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_audio_cdclk_change_post(dev_priv); - if (drm_WARN(&dev_priv->drm, - intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config), + if (drm_WARN(display->drm, + intel_cdclk_changed(&display->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]"); - intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); + intel_cdclk_dump_config(display, &display->cdclk.hw, "[hw state]"); + intel_cdclk_dump_config(display, cdclk_config, "[sw state]"); } } static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2547,13 +2618,13 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) if (update_pipe_count) num_active_pipes = hweight8(new_cdclk_state->active_pipes); - intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, + intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk, change_cdclk, update_pipe_count); } static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_cdclk_state *old_cdclk_state = @@ -2584,7 +2655,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) if (update_pipe_count) num_active_pipes = hweight8(new_cdclk_state->active_pipes); - intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, + intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk, update_cdclk, update_pipe_count); } @@ -2609,7 +2680,8 @@ bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state) void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2646,9 +2718,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) */ cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus; - drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + drm_WARN_ON(display->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(i915, &cdclk_config, pipe, + intel_set_cdclk(display, &cdclk_config, pipe, "Pre changing CDCLK to"); } @@ -2662,7 +2734,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2682,20 +2755,21 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) else pipe = INVALID_PIPE; - drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + drm_WARN_ON(display->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(i915, &new_cdclk_state->actual, pipe, + intel_set_cdclk(display, &new_cdclk_state->actual, pipe, "Post changing CDCLK to"); } static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *dev_priv = to_i915(display->drm); int pixel_rate = crtc_state->pixel_rate; - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) return DIV_ROUND_UP(pixel_rate, 2); - else if (DISPLAY_VER(dev_priv) == 9 || + else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return pixel_rate; else if (IS_CHERRYVIEW(dev_priv)) @@ -2709,11 +2783,11 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_plane *plane; int min_cdclk = 0; - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + for_each_intel_plane_on_crtc(display->drm, crtc, plane) min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); return min_cdclk; @@ -2722,7 +2796,7 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state); int min_cdclk = 0; @@ -2751,7 +2825,7 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) * Since PPC = 2 with bigjoiner * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits */ - int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; + int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24; int min_cdclk_bj = (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) * pixel_clock) / (2 * bigjoiner_interface_bits); @@ -2764,8 +2838,8 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = - to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *dev_priv = to_i915(display->drm); int min_cdclk; if (!crtc_state->hw.enable) @@ -2786,10 +2860,10 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) crtc_state->has_audio && crtc_state->port_clock >= 540000 && crtc_state->lane_count == 4) { - if (DISPLAY_VER(dev_priv) == 10) { + if (DISPLAY_VER(display) == 10) { /* Display WA #1145: glk */ min_cdclk = max(316800, min_cdclk); - } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) { + } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) { /* Display WA #1144: skl,bxt */ min_cdclk = max(432000, min_cdclk); } @@ -2799,7 +2873,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) * According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. */ - if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9) + if (crtc_state->has_audio && DISPLAY_VER(display) >= 9) min_cdclk = max(2 * 96000, min_cdclk); /* @@ -2841,7 +2915,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) static int intel_compute_min_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_bw_state *bw_state; @@ -2884,7 +2959,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) min_cdclk = max(cdclk_state->force_min_cdclk, cdclk_state->bw_min_cdclk); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); /* @@ -2899,10 +2974,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) !is_power_of_2(cdclk_state->active_pipes)) min_cdclk = max(2 * 96000, min_cdclk); - if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { - drm_dbg_kms(&dev_priv->drm, + if (min_cdclk > display->cdclk.max_cdclk_freq) { + drm_dbg_kms(display->drm, "required cdclk (%d kHz) exceeds max (%d kHz)\n", - min_cdclk, dev_priv->display.cdclk.max_cdclk_freq); + min_cdclk, display->cdclk.max_cdclk_freq); return -EINVAL; } @@ -2924,7 +2999,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) */ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); struct intel_crtc *crtc; @@ -2952,7 +3027,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) } min_voltage_level = 0; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) min_voltage_level = max(cdclk_state->min_voltage_level[pipe], min_voltage_level); @@ -2961,7 +3036,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); int min_cdclk, cdclk; @@ -2970,18 +3045,18 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) if (min_cdclk < 0) return min_cdclk; - cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); + cdclk = vlv_calc_cdclk(display, min_cdclk); cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = - vlv_calc_voltage_level(dev_priv, cdclk); + vlv_calc_voltage_level(display, cdclk); if (!cdclk_state->active_pipes) { - cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); + cdclk = vlv_calc_cdclk(display, cdclk_state->force_min_cdclk); cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = - vlv_calc_voltage_level(dev_priv, cdclk); + vlv_calc_voltage_level(display, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } @@ -3020,7 +3095,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) static int skl_dpll0_vco(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); struct intel_crtc *crtc; @@ -3029,7 +3104,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state) vco = cdclk_state->logical.vco; if (!vco) - vco = dev_priv->display.cdclk.skl_preferred_vco_freq; + vco = display->cdclk.skl_preferred_vco_freq; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->hw.enable) @@ -3091,7 +3166,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); int min_cdclk, min_voltage_level, cdclk, vco; @@ -3104,23 +3179,23 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) if (min_voltage_level < 0) return min_voltage_level; - cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(display, min_cdclk); + vco = bxt_calc_cdclk_pll_vco(display, cdclk); cdclk_state->logical.vco = vco; cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = max_t(int, min_voltage_level, - intel_cdclk_calc_voltage_level(dev_priv, cdclk)); + intel_cdclk_calc_voltage_level(display, cdclk)); if (!cdclk_state->active_pipes) { - cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(display, cdclk_state->force_min_cdclk); + vco = bxt_calc_cdclk_pll_vco(display, cdclk); cdclk_state->actual.vco = vco; cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk); + intel_cdclk_calc_voltage_level(display, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } @@ -3172,10 +3247,10 @@ static const struct intel_global_state_funcs intel_cdclk_funcs = { struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *cdclk_state; - cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj); + cdclk_state = intel_atomic_get_global_obj_state(state, &display->cdclk.obj); if (IS_ERR(cdclk_state)) return ERR_CAST(cdclk_state); @@ -3231,24 +3306,26 @@ int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joi return intel_atomic_lock_global_state(&cdclk_state->base); } -int intel_cdclk_init(struct drm_i915_private *dev_priv) +int intel_cdclk_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_state *cdclk_state; cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL); if (!cdclk_state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj, + intel_atomic_global_obj_init(dev_priv, &display->cdclk.obj, &cdclk_state->base, &intel_cdclk_funcs); return 0; } -static bool intel_cdclk_need_serialize(struct drm_i915_private *i915, +static bool intel_cdclk_need_serialize(struct intel_display *display, const struct intel_cdclk_state *old_cdclk_state, const struct intel_cdclk_state *new_cdclk_state) { + struct drm_i915_private *i915 = to_i915(display->drm); bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) != hweight8(new_cdclk_state->active_pipes); bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual, @@ -3262,7 +3339,7 @@ static bool intel_cdclk_need_serialize(struct drm_i915_private *i915, int intel_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *old_cdclk_state; struct intel_cdclk_state *new_cdclk_state; enum pipe pipe = INVALID_PIPE; @@ -3281,7 +3358,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) if (ret) return ret; - if (intel_cdclk_need_serialize(dev_priv, old_cdclk_state, new_cdclk_state)) { + if (intel_cdclk_need_serialize(display, old_cdclk_state, new_cdclk_state)) { /* * Also serialize commits across all crtcs * if the actual hw needs to be poked. @@ -3301,14 +3378,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) } if (is_power_of_2(new_cdclk_state->active_pipes) && - intel_cdclk_can_cd2x_update(dev_priv, + intel_cdclk_can_cd2x_update(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; pipe = ilog2(new_cdclk_state->active_pipes); - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) @@ -3318,25 +3395,25 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) pipe = INVALID_PIPE; } - if (intel_cdclk_can_crawl_and_squash(dev_priv, + if (intel_cdclk_can_crawl_and_squash(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk via crawling and squashing\n"); - } else if (intel_cdclk_can_squash(dev_priv, + } else if (intel_cdclk_can_squash(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk via squashing\n"); - } else if (intel_cdclk_can_crawl(dev_priv, + } else if (intel_cdclk_can_crawl(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk via crawling\n"); } else if (pipe != INVALID_PIPE) { new_cdclk_state->pipe = pipe; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk cd2x divider with pipe %c active\n", pipe_name(pipe)); } else if (intel_cdclk_clock_changed(&old_cdclk_state->actual, @@ -3348,24 +3425,24 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) new_cdclk_state->disable_pipes = true; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Modeset required for cdclk change\n"); } - if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) != - intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) { - int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual); + if (intel_mdclk_cdclk_ratio(display, &old_cdclk_state->actual) != + intel_mdclk_cdclk_ratio(display, &new_cdclk_state->actual)) { + int ratio = intel_mdclk_cdclk_ratio(display, &new_cdclk_state->actual); ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio); if (ret) return ret; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "New cdclk calculated to be logical %u kHz, actual %u kHz\n", new_cdclk_state->logical.cdclk, new_cdclk_state->actual.cdclk); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "New voltage level calculated to be logical %u, actual %u\n", new_cdclk_state->logical.voltage_level, new_cdclk_state->actual.voltage_level); @@ -3373,18 +3450,19 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) return 0; } -static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) +static int intel_compute_max_dotclk(struct intel_display *display) { - int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq; + struct drm_i915_private *dev_priv = to_i915(display->drm); + int max_cdclk_freq = display->cdclk.max_cdclk_freq; - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) return 2 * max_cdclk_freq; - else if (DISPLAY_VER(dev_priv) == 9 || + else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; else if (IS_CHERRYVIEW(dev_priv)) return max_cdclk_freq*95/100; - else if (DISPLAY_VER(dev_priv) < 4) + else if (DISPLAY_VER(display) < 4) return 2*max_cdclk_freq*90/100; else return max_cdclk_freq*90/100; @@ -3392,34 +3470,38 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) /** * intel_update_max_cdclk - Determine the maximum support CDCLK frequency - * @dev_priv: i915 device + * @display: display instance * * Determine the maximum CDCLK frequency the platform supports, and also * derive the maximum dot clock frequency the maximum CDCLK frequency * allows. */ -void intel_update_max_cdclk(struct drm_i915_private *dev_priv) +void intel_update_max_cdclk(struct intel_display *display) { - if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { - if (dev_priv->display.cdclk.hw.ref == 24000) - dev_priv->display.cdclk.max_cdclk_freq = 552000; + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 30) { + display->cdclk.max_cdclk_freq = 691200; + } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { + if (display->cdclk.hw.ref == 24000) + display->cdclk.max_cdclk_freq = 552000; else - dev_priv->display.cdclk.max_cdclk_freq = 556800; - } else if (DISPLAY_VER(dev_priv) >= 11) { - if (dev_priv->display.cdclk.hw.ref == 24000) - dev_priv->display.cdclk.max_cdclk_freq = 648000; + display->cdclk.max_cdclk_freq = 556800; + } else if (DISPLAY_VER(display) >= 11) { + if (display->cdclk.hw.ref == 24000) + display->cdclk.max_cdclk_freq = 648000; else - dev_priv->display.cdclk.max_cdclk_freq = 652800; + display->cdclk.max_cdclk_freq = 652800; } else if (IS_GEMINILAKE(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 316800; + display->cdclk.max_cdclk_freq = 316800; } else if (IS_BROXTON(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 624000; - } else if (DISPLAY_VER(dev_priv) == 9) { - u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + display->cdclk.max_cdclk_freq = 624000; + } else if (DISPLAY_VER(display) == 9) { + u32 limit = intel_de_read(display, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; - vco = dev_priv->display.cdclk.skl_preferred_vco_freq; - drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); + vco = display->cdclk.skl_preferred_vco_freq; + drm_WARN_ON(display->drm, vco != 8100000 && vco != 8640000); /* * Use the lower (vco 8640) cdclk values as a @@ -3435,7 +3517,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) else max_cdclk = 308571; - dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); + display->cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROADWELL(dev_priv)) { /* * FIXME with extra cooling we can allow @@ -3443,41 +3525,43 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) * How can we know if extra cooling is * available? PCI ID, VTB, something else? */ - if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) - dev_priv->display.cdclk.max_cdclk_freq = 450000; + if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT) + display->cdclk.max_cdclk_freq = 450000; else if (IS_BROADWELL_ULX(dev_priv)) - dev_priv->display.cdclk.max_cdclk_freq = 450000; + display->cdclk.max_cdclk_freq = 450000; else if (IS_BROADWELL_ULT(dev_priv)) - dev_priv->display.cdclk.max_cdclk_freq = 540000; + display->cdclk.max_cdclk_freq = 540000; else - dev_priv->display.cdclk.max_cdclk_freq = 675000; + display->cdclk.max_cdclk_freq = 675000; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 320000; + display->cdclk.max_cdclk_freq = 320000; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 400000; + display->cdclk.max_cdclk_freq = 400000; } else { /* otherwise assume cdclk is fixed */ - dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk; + display->cdclk.max_cdclk_freq = display->cdclk.hw.cdclk; } - dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv); + display->cdclk.max_dotclk_freq = intel_compute_max_dotclk(display); - drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", - dev_priv->display.cdclk.max_cdclk_freq); + drm_dbg(display->drm, "Max CD clock rate: %d kHz\n", + display->cdclk.max_cdclk_freq); - drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", - dev_priv->display.cdclk.max_dotclk_freq); + drm_dbg(display->drm, "Max dotclock rate: %d kHz\n", + display->cdclk.max_dotclk_freq); } /** * intel_update_cdclk - Determine the current CDCLK frequency - * @dev_priv: i915 device + * @display: display instance * * Determine the current CDCLK frequency. */ -void intel_update_cdclk(struct drm_i915_private *dev_priv) +void intel_update_cdclk(struct intel_display *display) { - intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw); + struct drm_i915_private *dev_priv = to_i915(display->drm); + + intel_cdclk_get_cdclk(display, &display->cdclk.hw); /* * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): @@ -3486,28 +3570,29 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) * generate GMBus clock. This will vary with the cdclk freq. */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - intel_de_write(dev_priv, GMBUSFREQ_VLV, - DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000)); + intel_de_write(display, GMBUSFREQ_VLV, + DIV_ROUND_UP(display->cdclk.hw.cdclk, 1000)); } -static int dg1_rawclk(struct drm_i915_private *dev_priv) +static int dg1_rawclk(struct intel_display *display) { /* * DG1 always uses a 38.4 MHz rawclk. The bspec tells us * "Program Numerator=2, Denominator=4, Divider=37 decimal." */ - intel_de_write(dev_priv, PCH_RAWCLK_FREQ, + intel_de_write(display, PCH_RAWCLK_FREQ, CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); return 38400; } -static int cnp_rawclk(struct drm_i915_private *dev_priv) +static int cnp_rawclk(struct intel_display *display) { - u32 rawclk; + struct drm_i915_private *dev_priv = to_i915(display->drm); int divider, fraction; + u32 rawclk; - if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { + if (intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { /* 24 MHz */ divider = 24000; fraction = 0; @@ -3527,37 +3612,42 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) rawclk |= ICP_RAWCLK_NUM(numerator); } - intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk); + intel_de_write(display, PCH_RAWCLK_FREQ, rawclk); return divider + fraction; } -static int pch_rawclk(struct drm_i915_private *dev_priv) +static int pch_rawclk(struct intel_display *display) { - return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; + return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } -static int vlv_hrawclk(struct drm_i915_private *dev_priv) +static int vlv_hrawclk(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } -static int i9xx_hrawclk(struct drm_i915_private *i915) +static int i9xx_hrawclk(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + /* hrawclock is 1/4 the FSB frequency */ return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4); } /** * intel_read_rawclk - Determine the current RAWCLK frequency - * @dev_priv: i915 device + * @display: display instance * * Determine the current RAWCLK frequency. RAWCLK is a fixed * frequency clock so this needs to done only once. */ -u32 intel_read_rawclk(struct drm_i915_private *dev_priv) +u32 intel_read_rawclk(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 freq; if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL) @@ -3568,15 +3658,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) */ freq = 38400; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) - freq = dg1_rawclk(dev_priv); + freq = dg1_rawclk(display); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - freq = cnp_rawclk(dev_priv); + freq = cnp_rawclk(display); else if (HAS_PCH_SPLIT(dev_priv)) - freq = pch_rawclk(dev_priv); + freq = pch_rawclk(display); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - freq = vlv_hrawclk(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 3) - freq = i9xx_hrawclk(dev_priv); + freq = vlv_hrawclk(display); + else if (DISPLAY_VER(display) >= 3) + freq = i9xx_hrawclk(display); else /* no rawclk on other platforms, or no need to know it */ return 0; @@ -3586,25 +3676,32 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) static int i915_cdclk_info_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; + struct intel_display *display = m->private; - seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); - seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); - seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq); + seq_printf(m, "Current CD clock frequency: %d kHz\n", display->cdclk.hw.cdclk); + seq_printf(m, "Max CD clock frequency: %d kHz\n", display->cdclk.max_cdclk_freq); + seq_printf(m, "Max pixel clock frequency: %d kHz\n", display->cdclk.max_dotclk_freq); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info); -void intel_cdclk_debugfs_register(struct drm_i915_private *i915) +void intel_cdclk_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root, - i915, &i915_cdclk_info_fops); + display, &i915_cdclk_info_fops); } +static const struct intel_cdclk_funcs xe3lpd_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = xe3lpd_calc_voltage_level, +}; + static const struct intel_cdclk_funcs rplu_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, @@ -3743,97 +3840,102 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = { /** * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks - * @dev_priv: i915 device + * @display: display instance */ -void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 20) { - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; - dev_priv->display.cdclk.table = xe2lpd_cdclk_table; - } else if (DISPLAY_VER_FULL(dev_priv) >= IP_VER(14, 1)) { - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; - dev_priv->display.cdclk.table = xe2hpd_cdclk_table; - } else if (DISPLAY_VER(dev_priv) >= 14) { - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; - dev_priv->display.cdclk.table = mtl_cdclk_table; +void intel_init_cdclk_hooks(struct intel_display *display) +{ + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 30) { + display->funcs.cdclk = &xe3lpd_cdclk_funcs; + display->cdclk.table = xe3lpd_cdclk_table; + } else if (DISPLAY_VER(display) >= 20) { + display->funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = xe2lpd_cdclk_table; + } else if (DISPLAY_VERx100(display) >= 1401) { + display->funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = xe2hpd_cdclk_table; + } else if (DISPLAY_VER(display) >= 14) { + display->funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = mtl_cdclk_table; } else if (IS_DG2(dev_priv)) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; - dev_priv->display.cdclk.table = dg2_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { /* Wa_22011320316:adl-p[a0] */ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = adlp_a_step_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; } else if (IS_RAPTORLAKE_U(dev_priv)) { - dev_priv->display.cdclk.table = rplu_cdclk_table; - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = rplu_cdclk_table; + display->funcs.cdclk = &rplu_cdclk_funcs; } else { - dev_priv->display.cdclk.table = adlp_cdclk_table; - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = adlp_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; } } else if (IS_ROCKETLAKE(dev_priv)) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; - dev_priv->display.cdclk.table = rkl_cdclk_table; - } else if (DISPLAY_VER(dev_priv) >= 12) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; - dev_priv->display.cdclk.table = icl_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = rkl_cdclk_table; + } else if (DISPLAY_VER(display) >= 12) { + display->funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = icl_cdclk_table; } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { - dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs; - dev_priv->display.cdclk.table = icl_cdclk_table; - } else if (DISPLAY_VER(dev_priv) >= 11) { - dev_priv->display.funcs.cdclk = &icl_cdclk_funcs; - dev_priv->display.cdclk.table = icl_cdclk_table; + display->funcs.cdclk = &ehl_cdclk_funcs; + display->cdclk.table = icl_cdclk_table; + } else if (DISPLAY_VER(display) >= 11) { + display->funcs.cdclk = &icl_cdclk_funcs; + display->cdclk.table = icl_cdclk_table; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs; + display->funcs.cdclk = &bxt_cdclk_funcs; if (IS_GEMINILAKE(dev_priv)) - dev_priv->display.cdclk.table = glk_cdclk_table; + display->cdclk.table = glk_cdclk_table; else - dev_priv->display.cdclk.table = bxt_cdclk_table; - } else if (DISPLAY_VER(dev_priv) == 9) { - dev_priv->display.funcs.cdclk = &skl_cdclk_funcs; + display->cdclk.table = bxt_cdclk_table; + } else if (DISPLAY_VER(display) == 9) { + display->funcs.cdclk = &skl_cdclk_funcs; } else if (IS_BROADWELL(dev_priv)) { - dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs; + display->funcs.cdclk = &bdw_cdclk_funcs; } else if (IS_HASWELL(dev_priv)) { - dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs; + display->funcs.cdclk = &hsw_cdclk_funcs; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.funcs.cdclk = &chv_cdclk_funcs; + display->funcs.cdclk = &chv_cdclk_funcs; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs; + display->funcs.cdclk = &vlv_cdclk_funcs; } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { - dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; + display->funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_IRONLAKE(dev_priv)) { - dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs; + display->funcs.cdclk = &ilk_cdclk_funcs; } else if (IS_GM45(dev_priv)) { - dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs; + display->funcs.cdclk = &gm45_cdclk_funcs; } else if (IS_G45(dev_priv)) { - dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; + display->funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I965GM(dev_priv)) { - dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs; + display->funcs.cdclk = &i965gm_cdclk_funcs; } else if (IS_I965G(dev_priv)) { - dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; + display->funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_PINEVIEW(dev_priv)) { - dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs; + display->funcs.cdclk = &pnv_cdclk_funcs; } else if (IS_G33(dev_priv)) { - dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; + display->funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I945GM(dev_priv)) { - dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs; + display->funcs.cdclk = &i945gm_cdclk_funcs; } else if (IS_I945G(dev_priv)) { - dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; + display->funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_I915GM(dev_priv)) { - dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs; + display->funcs.cdclk = &i915gm_cdclk_funcs; } else if (IS_I915G(dev_priv)) { - dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs; + display->funcs.cdclk = &i915g_cdclk_funcs; } else if (IS_I865G(dev_priv)) { - dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs; + display->funcs.cdclk = &i865g_cdclk_funcs; } else if (IS_I85X(dev_priv)) { - dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs; + display->funcs.cdclk = &i85x_cdclk_funcs; } else if (IS_I845G(dev_priv)) { - dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs; + display->funcs.cdclk = &i845g_cdclk_funcs; } else if (IS_I830(dev_priv)) { - dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; + display->funcs.cdclk = &i830_cdclk_funcs; } - if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk, + if (drm_WARN(display->drm, !display->funcs.cdclk, "Unknown platform. Assuming i830\n")) - dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; + display->funcs.cdclk = &i830_cdclk_funcs; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index cfdcdec07a4d..6b0e7a41eba3 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -11,9 +11,9 @@ #include "intel_display_limits.h" #include "intel_global_state.h" -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; +struct intel_display; struct intel_cdclk_config { unsigned int cdclk, vco, ref, bypass; @@ -59,24 +59,24 @@ struct intel_cdclk_state { }; int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); -void intel_cdclk_init_hw(struct drm_i915_private *i915); -void intel_cdclk_uninit_hw(struct drm_i915_private *i915); -void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); -void intel_update_max_cdclk(struct drm_i915_private *dev_priv); -void intel_update_cdclk(struct drm_i915_private *dev_priv); -u32 intel_read_rawclk(struct drm_i915_private *dev_priv); +void intel_cdclk_init_hw(struct intel_display *display); +void intel_cdclk_uninit_hw(struct intel_display *display); +void intel_init_cdclk_hooks(struct intel_display *display); +void intel_update_max_cdclk(struct intel_display *display); +void intel_update_cdclk(struct intel_display *display); +u32 intel_read_rawclk(struct intel_display *display); bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b); -int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915, +int intel_mdclk_cdclk_ratio(struct intel_display *display, const struct intel_cdclk_config *cdclk_config); bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state); void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state); void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); -void intel_cdclk_dump_config(struct drm_i915_private *i915, +void intel_cdclk_dump_config(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, const char *context); int intel_modeset_calc_cdclk(struct intel_atomic_state *state); -void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, +void intel_cdclk_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config); int intel_cdclk_atomic_check(struct intel_atomic_state *state, bool *need_cdclk_calc); @@ -88,11 +88,11 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state); container_of_const((global_state), struct intel_cdclk_state, base) #define intel_atomic_get_old_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->cdclk.obj)) #define intel_atomic_get_new_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->cdclk.obj)) -int intel_cdclk_init(struct drm_i915_private *dev_priv); -void intel_cdclk_debugfs_register(struct drm_i915_private *i915); +int intel_cdclk_init(struct intel_display *display); +void intel_cdclk_debugfs_register(struct intel_display *display); #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 5d701f48351b..174753625bca 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -39,7 +39,8 @@ struct intel_color_funcs { * the next vblank start, alongside any other double buffered * registers involved with the same commit. This hook is optional. */ - void (*color_commit_noarm)(const struct intel_crtc_state *crtc_state); + void (*color_commit_noarm)(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* * Program arming double buffered color management registers * during vblank evasion. The registers (and whatever other registers @@ -47,7 +48,8 @@ struct intel_color_funcs { * during the next vblank start, alongside any other double buffered * registers involved with the same commit. */ - void (*color_commit_arm)(const struct intel_crtc_state *crtc_state); + void (*color_commit_arm)(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* * Perform any extra tasks needed after all the * double buffered registers have been latched. @@ -205,74 +207,81 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) return result; } -static void ilk_update_pipe_csc(struct intel_crtc *crtc, +static void ilk_update_pipe_csc(struct intel_dsb *dsb, + struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), csc->preoff[0]); - intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), csc->preoff[1]); - intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), csc->preoff[2]); - - intel_de_write_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe), - csc->coeff[0] << 16 | csc->coeff[1]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe), - csc->coeff[2] << 16); - - intel_de_write_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe), - csc->coeff[3] << 16 | csc->coeff[4]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe), - csc->coeff[5] << 16); - - intel_de_write_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe), - csc->coeff[6] << 16 | csc->coeff[7]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe), - csc->coeff[8] << 16); - - if (DISPLAY_VER(i915) < 7) + intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_HI(pipe), + csc->preoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_ME(pipe), + csc->preoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_LO(pipe), + csc->preoff[2]); + + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RY_GY(pipe), + csc->coeff[0] << 16 | csc->coeff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BY(pipe), + csc->coeff[2] << 16); + + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RU_GU(pipe), + csc->coeff[3] << 16 | csc->coeff[4]); + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BU(pipe), + csc->coeff[5] << 16); + + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RV_GV(pipe), + csc->coeff[6] << 16 | csc->coeff[7]); + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BV(pipe), + csc->coeff[8] << 16); + + if (DISPLAY_VER(display) < 7) return; - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), csc->postoff[0]); - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), csc->postoff[1]); - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), csc->postoff[2]); + intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_HI(pipe), + csc->postoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_ME(pipe), + csc->postoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_LO(pipe), + csc->postoff[2]); } static void ilk_read_pipe_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(pipe)); - csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_ME(pipe)); - csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_LO(pipe)); + csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(pipe)); + csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_PREOFF_ME(pipe)); + csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_PREOFF_LO(pipe)); - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RY_GY(pipe)); csc->coeff[0] = tmp >> 16; csc->coeff[1] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BY(pipe)); csc->coeff[2] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RU_GU(pipe)); csc->coeff[3] = tmp >> 16; csc->coeff[4] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BU(pipe)); csc->coeff[5] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RV_GV(pipe)); csc->coeff[6] = tmp >> 16; csc->coeff[7] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BV(pipe)); csc->coeff[8] = tmp >> 16; - if (DISPLAY_VER(i915) < 7) + if (DISPLAY_VER(display) < 7) return; - csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_HI(pipe)); - csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_ME(pipe)); - csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_LO(pipe)); + csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_HI(pipe)); + csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_ME(pipe)); + csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_LO(pipe)); } static void ilk_read_csc(struct intel_crtc_state *crtc_state) @@ -304,68 +313,75 @@ static void skl_read_csc(struct intel_crtc_state *crtc_state) ilk_read_pipe_csc(crtc, &crtc_state->csc); } -static void icl_update_output_csc(struct intel_crtc *crtc, +static void icl_update_output_csc(struct intel_dsb *dsb, + struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), csc->preoff[0]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), csc->preoff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), csc->preoff[2]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), + csc->preoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), + csc->preoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), + csc->preoff[2]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), - csc->coeff[0] << 16 | csc->coeff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe), - csc->coeff[2] << 16); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), + csc->coeff[0] << 16 | csc->coeff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BY(pipe), + csc->coeff[2] << 16); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), - csc->coeff[3] << 16 | csc->coeff[4]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe), - csc->coeff[5] << 16); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), + csc->coeff[3] << 16 | csc->coeff[4]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BU(pipe), + csc->coeff[5] << 16); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), - csc->coeff[6] << 16 | csc->coeff[7]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe), - csc->coeff[8] << 16); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), + csc->coeff[6] << 16 | csc->coeff[7]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BV(pipe), + csc->coeff[8] << 16); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), csc->postoff[0]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), csc->postoff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), csc->postoff[2]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), + csc->postoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), + csc->postoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), + csc->postoff[2]); } static void icl_read_output_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe)); - csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe)); - csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe)); + csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_HI(pipe)); + csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_ME(pipe)); + csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_LO(pipe)); - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)); csc->coeff[0] = tmp >> 16; csc->coeff[1] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BY(pipe)); csc->coeff[2] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)); csc->coeff[3] = tmp >> 16; csc->coeff[4] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BU(pipe)); csc->coeff[5] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)); csc->coeff[6] = tmp >> 16; csc->coeff[7] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BV(pipe)); csc->coeff[8] = tmp >> 16; - csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)); - csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)); - csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)); + csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)); + csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)); + csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)); } static void icl_read_csc(struct intel_crtc_state *crtc_state) @@ -386,14 +402,15 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state) static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *i915 = to_i915(display->drm); /* icl+ have dedicated output CSC */ - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) return false; /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ - if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915)) + if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915)) return false; return crtc_state->limited_color_range; @@ -401,7 +418,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (!ilk_limited_range(crtc_state)) return false; @@ -409,7 +426,7 @@ static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state) if (crtc_state->c8_planes) return false; - if (DISPLAY_VER(i915) == 10) + if (DISPLAY_VER(display) == 10) return crtc_state->hw.gamma_lut; else return crtc_state->hw.gamma_lut && @@ -424,13 +441,13 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) return !ilk_lut_limited_range(crtc_state); } -static void ilk_csc_copy(struct drm_i915_private *i915, +static void ilk_csc_copy(struct intel_display *display, struct intel_csc_matrix *dst, const struct intel_csc_matrix *src) { *dst = *src; - if (DISPLAY_VER(i915) < 7) + if (DISPLAY_VER(display) < 7) memset(dst->postoff, 0, sizeof(dst->postoff)); } @@ -438,7 +455,7 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, struct intel_csc_matrix *csc, bool limited_color_range) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; const u64 *input; u64 temp[9]; @@ -446,9 +463,9 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, /* for preoff/postoff */ if (limited_color_range) - ilk_csc_copy(i915, csc, &ilk_csc_matrix_limited_range); + ilk_csc_copy(display, csc, &ilk_csc_matrix_limited_range); else - ilk_csc_copy(i915, csc, &ilk_csc_matrix_identity); + ilk_csc_copy(display, csc, &ilk_csc_matrix_identity); if (limited_color_range) input = ctm_mult_by_limited(temp, ctm->matrix); @@ -496,21 +513,22 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void ilk_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *i915 = to_i915(display->drm); bool limited_color_range = ilk_csc_limited_range(crtc_state); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + drm_WARN_ON(display->drm, !crtc_state->csc_enable); ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range); } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { - drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + drm_WARN_ON(display->drm, !crtc_state->csc_enable); - ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr); + ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (limited_color_range) { - drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + drm_WARN_ON(display->drm, !crtc_state->csc_enable); - ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_limited_range); + ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_limited_range); } else if (crtc_state->csc_enable) { /* * On GLK both pipe CSC and degamma LUT are controlled @@ -518,60 +536,62 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state) * LUT is needed but CSC is not we need to load an * identity matrix. */ - drm_WARN_ON(&i915->drm, !IS_GEMINILAKE(i915)); + drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915)); - ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_identity); + ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity); } else { intel_csc_clear(&crtc_state->csc); } } -static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void ilk_load_csc_matrix(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_enable) - ilk_update_pipe_csc(crtc, &crtc_state->csc); + ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc); } static void icl_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0); ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false); } else { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0); intel_csc_clear(&crtc_state->csc); } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); - ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr); + ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (crtc_state->limited_color_range) { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); - ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_limited_range); + ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_limited_range); } else { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0); intel_csc_clear(&crtc_state->output_csc); } } -static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void icl_load_csc_matrix(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_mode & ICL_CSC_ENABLE) - ilk_update_pipe_csc(crtc, &crtc_state->csc); + ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc); if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) - icl_update_output_csc(crtc, &crtc_state->output_csc); + icl_update_output_csc(dsb, crtc, &crtc_state->output_csc); } static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits) @@ -614,51 +634,51 @@ static void vlv_wgc_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void vlv_load_wgc_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; - intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C01_C00(display, pipe), csc->coeff[1] << 16 | csc->coeff[0]); - intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C02(display, pipe), csc->coeff[2]); - intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C11_C10(display, pipe), csc->coeff[4] << 16 | csc->coeff[3]); - intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C12(display, pipe), csc->coeff[5]); - intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C21_C20(display, pipe), csc->coeff[7] << 16 | csc->coeff[6]); - intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C22(display, pipe), csc->coeff[8]); } static void vlv_read_wgc_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C01_C00(display, pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C02(display, pipe)); csc->coeff[2] = tmp & 0xffff; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C11_C10(display, pipe)); csc->coeff[3] = tmp & 0xffff; csc->coeff[4] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C12(display, pipe)); csc->coeff[5] = tmp & 0xffff; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C21_C20(display, pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C22(display, pipe)); csc->coeff[8] = tmp & 0xffff; } @@ -672,14 +692,14 @@ static void vlv_read_csc(struct intel_crtc_state *crtc_state) static void vlv_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, !crtc_state->wgc_enable); + drm_WARN_ON(display->drm, !crtc_state->wgc_enable); vlv_wgc_csc_convert_ctm(crtc_state, &crtc_state->csc); } else { - drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); + drm_WARN_ON(display->drm, crtc_state->wgc_enable); intel_csc_clear(&crtc_state->csc); } @@ -716,45 +736,45 @@ static const struct intel_csc_matrix chv_cgm_csc_matrix_identity = { static void chv_load_cgm_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF01(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF01(pipe), csc->coeff[1] << 16 | csc->coeff[0]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF23(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF23(pipe), csc->coeff[3] << 16 | csc->coeff[2]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF45(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF45(pipe), csc->coeff[5] << 16 | csc->coeff[4]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF67(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF67(pipe), csc->coeff[7] << 16 | csc->coeff[6]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF8(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF8(pipe), csc->coeff[8]); } static void chv_read_cgm_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF01(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF01(pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF23(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF23(pipe)); csc->coeff[2] = tmp & 0xffff; csc->coeff[3] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF45(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF45(pipe)); csc->coeff[4] = tmp & 0xffff; csc->coeff[5] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF67(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF67(pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF8(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF8(pipe)); csc->coeff[8] = tmp & 0xffff; } @@ -768,16 +788,16 @@ static void chv_read_csc(struct intel_crtc_state *crtc_state) static void chv_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); + drm_WARN_ON(display->drm, crtc_state->wgc_enable); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); + drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc); } else { - drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); + drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); crtc_state->csc = chv_cgm_csc_matrix_identity; } @@ -953,7 +973,8 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_LDW_MASK, ldw); } -static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) +static void icl_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { /* * Despite Wa_1406463849, ICL no longer suffers from the SKL @@ -963,10 +984,11 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) * * On TGL+ all CSC arming issues have been properly fixed. */ - icl_load_csc_matrix(crtc_state); + icl_load_csc_matrix(dsb, crtc_state); } -static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) +static void skl_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { /* * Possibly related to display WA #1184, SKL CSC loses the latched @@ -979,72 +1001,76 @@ static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) * which is called after PSR exit. */ if (!crtc_state->has_psr) - ilk_load_csc_matrix(crtc_state); + ilk_load_csc_matrix(dsb, crtc_state); } -static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) +static void ilk_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { - ilk_load_csc_matrix(crtc_state); + ilk_load_csc_matrix(dsb, crtc_state); } -static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void i9xx_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { /* update TRANSCONF GAMMA_MODE */ i9xx_set_pipeconf(crtc_state); } -static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void ilk_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); /* update TRANSCONF GAMMA_MODE */ ilk_set_pipeconf(crtc_state); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), + intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } -static void hsw_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void hsw_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - intel_de_write(i915, GAMMA_MODE(crtc->pipe), + intel_de_write(display, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), + intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static u32 hsw_read_gamma_mode(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - return intel_de_read(i915, GAMMA_MODE(crtc->pipe)); + return intel_de_read(display, GAMMA_MODE(crtc->pipe)); } static u32 ilk_read_csc_mode(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - return intel_de_read(i915, PIPE_CSC_MODE(crtc->pipe)); + return intel_de_read(display, PIPE_CSC_MODE(crtc->pipe)); } static void i9xx_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 tmp; - tmp = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane)); + tmp = intel_de_read(display, DSPCNTR(display, i9xx_plane)); if (tmp & DISP_PIPE_GAMMA_ENABLE) crtc_state->gamma_enable = true; - if (!HAS_GMCH(dev_priv) && tmp & DISP_PIPE_CSC_ENABLE) + if (!HAS_GMCH(display) && tmp & DISP_PIPE_CSC_ENABLE) crtc_state->csc_enable = true; } @@ -1060,14 +1086,14 @@ static void hsw_get_config(struct intel_crtc_state *crtc_state) static void skl_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 tmp; crtc_state->gamma_mode = hsw_read_gamma_mode(crtc); crtc_state->csc_mode = ilk_read_csc_mode(crtc); - tmp = intel_de_read(i915, SKL_BOTTOM_COLOR(crtc->pipe)); + tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe)); if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) crtc_state->gamma_enable = true; @@ -1076,15 +1102,16 @@ static void skl_get_config(struct intel_crtc_state *crtc_state) crtc_state->csc_enable = true; } -static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void skl_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val = 0; if (crtc_state->has_psr) - ilk_load_csc_matrix(crtc_state); + ilk_load_csc_matrix(dsb, crtc_state); /* * We don't (yet) allow userspace to control the pipe background color, @@ -1095,38 +1122,35 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; if (crtc_state->csc_enable) val |= SKL_BOTTOM_COLOR_CSC_ENABLE; - intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), val); + intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), val); - intel_de_write(i915, GAMMA_MODE(crtc->pipe), - crtc_state->gamma_mode); + intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), - crtc_state->csc_mode); + intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } -static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void icl_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * We don't (yet) allow userspace to control the pipe background color, * so force it to black. */ - intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0); + intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), 0); - intel_de_write(i915, GAMMA_MODE(crtc->pipe), - crtc_state->gamma_mode); + intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), - crtc_state->csc_mode); + intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static void icl_color_post_update(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* * Despite Wa_1406463849, ICL CSC is no longer disarmed by @@ -1142,17 +1166,17 @@ static void icl_color_post_update(const struct intel_crtc_state *crtc_state) * * TGL+ no longer need this workaround. */ - intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe)); + intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(crtc->pipe)); } static struct drm_property_blob * -create_linear_lut(struct drm_i915_private *i915, int lut_size) +create_linear_lut(struct intel_display *display, int lut_size) { struct drm_property_blob *blob; struct drm_color_lut *lut; int i; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -1180,7 +1204,7 @@ static u16 lut_limited_range(unsigned int value) } static struct drm_property_blob * -create_resized_lut(struct drm_i915_private *i915, +create_resized_lut(struct intel_display *display, const struct drm_property_blob *blob_in, int lut_out_size, bool limited_color_range) { @@ -1189,7 +1213,7 @@ create_resized_lut(struct drm_i915_private *i915, const struct drm_color_lut *lut_in; struct drm_color_lut *lut_out; - blob_out = drm_property_create_blob(&i915->drm, + blob_out = drm_property_create_blob(display->drm, sizeof(lut_out[0]) * lut_out_size, NULL); if (IS_ERR(blob_out)) @@ -1217,7 +1241,7 @@ create_resized_lut(struct drm_i915_private *i915, static void i9xx_load_lut_8(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut; enum pipe pipe = crtc->pipe; int i; @@ -1228,24 +1252,24 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i), + intel_de_write_fw(display, PALETTE(display, pipe, i), i9xx_lut_8(&lut[i])); } static void i9xx_load_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 0), i9xx_lut_10_ldw(&lut[i])); - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 1), i9xx_lut_10_udw(&lut[i])); } } @@ -1271,23 +1295,23 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) static void i965_load_lut_10p6(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 0), i965_lut_10p6_ldw(&lut[i])); - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 1), i965_lut_10p6_udw(&lut[i])); } - intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 0), lut[i].red); - intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 1), lut[i].green); - intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 2), lut[i].blue); + intel_de_write_fw(display, PIPEGCMAX(display, pipe, 0), lut[i].red); + intel_de_write_fw(display, PIPEGCMAX(display, pipe, 1), lut[i].green); + intel_de_write_fw(display, PIPEGCMAX(display, pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) @@ -1311,12 +1335,12 @@ static void i965_load_luts(const struct intel_crtc_state *crtc_state) static void ilk_lut_write(const struct intel_crtc_state *crtc_state, i915_reg_t reg, u32 val) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->dsb_color_vblank) intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val); else - intel_de_write_fw(i915, reg, val); + intel_de_write_fw(display, reg, val); } static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state, @@ -1523,9 +1547,9 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) } } -static int glk_degamma_lut_size(struct drm_i915_private *i915) +static int glk_degamma_lut_size(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(display) >= 13) return 131; else return 35; @@ -1557,8 +1581,8 @@ static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val) static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; @@ -1589,14 +1613,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, * as compared to just 16 to achieve this. */ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - DISPLAY_VER(i915) >= 14 ? + DISPLAY_VER(display) >= 14 ? mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i])); } /* Clamp values > 1.0. */ - while (i++ < glk_degamma_lut_size(i915)) + while (i++ < glk_degamma_lut_size(display)) ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - DISPLAY_VER(i915) >= 14 ? + DISPLAY_VER(display) >= 14 ? 1 << 24 : 1 << 16); ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0); @@ -1797,15 +1821,15 @@ static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) static void chv_load_cgm_degamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 0), + intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0), chv_cgm_degamma_ldw(&lut[i])); - intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 1), + intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1), chv_cgm_degamma_udw(&lut[i])); } } @@ -1831,23 +1855,23 @@ static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) static void chv_load_cgm_gamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0), + intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 0), chv_cgm_gamma_ldw(&lut[i])); - intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1), + intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 1), chv_cgm_gamma_udw(&lut[i])); } } static void chv_load_luts(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; @@ -1862,50 +1886,66 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) else i965_load_luts(crtc_state); - intel_de_write_fw(i915, CGM_PIPE_MODE(crtc->pipe), + intel_de_write_fw(display, CGM_PIPE_MODE(crtc->pipe), crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->dsb_color_vblank) return; - i915->display.funcs.color->load_luts(crtc_state); + display->funcs.color->load_luts(crtc_state); } -void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state) +void intel_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (i915->display.funcs.color->color_commit_noarm) - i915->display.funcs.color->color_commit_noarm(crtc_state); + if (display->funcs.color->color_commit_noarm) + display->funcs.color->color_commit_noarm(dsb, crtc_state); } -void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) +void intel_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - - i915->display.funcs.color->color_commit_arm(crtc_state); + struct intel_display *display = to_intel_display(crtc_state); - if (crtc_state->dsb_color_commit) - intel_dsb_commit(crtc_state->dsb_color_commit, false); + display->funcs.color->color_commit_arm(dsb, crtc_state); } void intel_color_post_update(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + + if (display->funcs.color->color_post_update) + display->funcs.color->color_post_update(crtc_state); +} + +void intel_color_modeset(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + intel_color_load_luts(crtc_state); + intel_color_commit_noarm(NULL, crtc_state); + intel_color_commit_arm(NULL, crtc_state); - if (i915->display.funcs.color->color_post_update) - i915->display.funcs.color->color_post_update(crtc_state); + if (DISPLAY_VER(display) < 9) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + /* update DSPCNTR to configure gamma/csc for pipe bottom color */ + plane->disable_arm(NULL, plane, crtc_state); + } } void intel_color_prepare_commit(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1923,30 +1963,16 @@ void intel_color_prepare_commit(struct intel_atomic_state *state, if (!crtc_state->dsb_color_vblank) return; - i915->display.funcs.color->load_luts(crtc_state); - - intel_dsb_finish(crtc_state->dsb_color_vblank); + display->funcs.color->load_luts(crtc_state); - crtc_state->dsb_color_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 16); - if (!crtc_state->dsb_color_commit) { - intel_dsb_cleanup(crtc_state->dsb_color_vblank); - crtc_state->dsb_color_vblank = NULL; - return; - } + intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank); + intel_dsb_interrupt(crtc_state->dsb_color_vblank); - intel_dsb_chain(state, crtc_state->dsb_color_commit, - crtc_state->dsb_color_vblank, true); - - intel_dsb_finish(crtc_state->dsb_color_commit); + intel_dsb_finish(crtc_state->dsb_color_vblank); } void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state) { - if (crtc_state->dsb_color_commit) { - intel_dsb_cleanup(crtc_state->dsb_color_commit); - crtc_state->dsb_color_commit = NULL; - } - if (crtc_state->dsb_color_vblank) { intel_dsb_cleanup(crtc_state->dsb_color_vblank); crtc_state->dsb_color_vblank = NULL; @@ -1955,8 +1981,6 @@ void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state) void intel_color_wait_commit(const struct intel_crtc_state *crtc_state) { - if (crtc_state->dsb_color_commit) - intel_dsb_wait(crtc_state->dsb_color_commit); if (crtc_state->dsb_color_vblank) intel_dsb_wait(crtc_state->dsb_color_vblank); } @@ -2008,7 +2032,7 @@ static bool chv_can_preload_luts(struct intel_atomic_state *state, int intel_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -2024,20 +2048,19 @@ int intel_color_check(struct intel_atomic_state *state, if (!intel_crtc_needs_color_update(new_crtc_state)) return 0; - return i915->display.funcs.color->color_check(state, crtc); + return display->funcs.color->color_check(state, crtc); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (i915->display.funcs.color->get_config) - i915->display.funcs.color->get_config(crtc_state); + display->funcs.color->get_config(crtc_state); - i915->display.funcs.color->read_luts(crtc_state); + display->funcs.color->read_luts(crtc_state); - if (i915->display.funcs.color->read_csc) - i915->display.funcs.color->read_csc(crtc_state); + if (display->funcs.color->read_csc) + display->funcs.color->read_csc(crtc_state); } bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, @@ -2045,7 +2068,7 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); /* * FIXME c8_planes readout missing thus @@ -2054,14 +2077,14 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, if (!is_pre_csc_lut && crtc_state->c8_planes) return true; - return i915->display.funcs.color->lut_equal(crtc_state, blob1, blob2, - is_pre_csc_lut); + return display->funcs.color->lut_equal(crtc_state, blob1, blob2, + is_pre_csc_lut); } static bool need_plane_update(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane); /* * On pre-SKL the pipe gamma enable and pipe csc enable for @@ -2069,15 +2092,14 @@ static bool need_plane_update(struct intel_plane *plane, * We have to reconfigure that even if the plane is inactive. */ return crtc_state->active_planes & BIT(plane->id) || - (DISPLAY_VER(i915) < 9 && - plane->id == PLANE_PRIMARY); + (DISPLAY_VER(display) < 9 && plane->id == PLANE_PRIMARY); } static int intel_color_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -2092,7 +2114,7 @@ intel_color_add_affected_planes(struct intel_atomic_state *state, new_crtc_state->csc_enable == old_crtc_state->csc_enable) return 0; - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { struct intel_plane_state *plane_state; if (!need_plane_update(plane, new_crtc_state)) @@ -2107,7 +2129,7 @@ intel_color_add_affected_planes(struct intel_atomic_state *state, new_crtc_state->do_async_flip = false; /* plane control register changes blocked by CxSR */ - if (HAS_GMCH(i915)) + if (HAS_GMCH(display)) new_crtc_state->disable_cxsr = true; } @@ -2116,43 +2138,44 @@ intel_color_add_affected_planes(struct intel_atomic_state *state, static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (lut_is_legacy(gamma_lut)) return 0; - return DISPLAY_INFO(i915)->color.gamma_lut_tests; + return DISPLAY_INFO(display)->color.gamma_lut_tests; } static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - return DISPLAY_INFO(i915)->color.degamma_lut_tests; + return DISPLAY_INFO(display)->color.degamma_lut_tests; } static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (lut_is_legacy(gamma_lut)) return LEGACY_LUT_LENGTH; - return DISPLAY_INFO(i915)->color.gamma_lut_size; + return DISPLAY_INFO(display)->color.gamma_lut_size; } static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - return DISPLAY_INFO(i915)->color.degamma_lut_size; + return DISPLAY_INFO(display)->color.degamma_lut_size; } -static int check_lut_size(struct drm_i915_private *i915, +static int check_lut_size(struct intel_crtc *crtc, const char *lut_name, const struct drm_property_blob *lut, int expected) { + struct intel_display *display = to_intel_display(crtc); int len; if (!lut) @@ -2160,8 +2183,9 @@ static int check_lut_size(struct drm_i915_private *i915, len = drm_color_lut_size(lut); if (len != expected) { - drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n", - len, expected); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] Invalid %s LUT size; got %d, expected %d\n", + crtc->base.base.id, crtc->base.name, lut_name, len, expected); return -EINVAL; } @@ -2171,23 +2195,25 @@ static int check_lut_size(struct drm_i915_private *i915, static int _check_luts(const struct intel_crtc_state *crtc_state, u32 degamma_tests, u32 gamma_tests) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; int gamma_length, degamma_length; /* C8 relies on its palette being stored in the legacy LUT */ if (crtc_state->c8_planes && !lut_is_legacy(crtc_state->hw.gamma_lut)) { - drm_dbg_kms(&i915->drm, - "C8 pixelformat requires the legacy LUT\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] C8 pixelformat requires the legacy LUT\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } degamma_length = intel_degamma_lut_size(crtc_state); gamma_length = intel_gamma_lut_size(crtc_state); - if (check_lut_size(i915, degamma_lut, degamma_length) || - check_lut_size(i915, gamma_lut, gamma_length)) + if (check_lut_size(crtc, "degamma", degamma_lut, degamma_length) || + check_lut_size(crtc, "gamma", gamma_lut, gamma_length)) return -EINVAL; if (drm_color_lut_check(degamma_lut, degamma_tests) || @@ -2219,9 +2245,10 @@ static int i9xx_lut_10_diff(u16 a, u16 b) drm_color_lut_extract(b, 10); } -static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, +static int i9xx_check_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int lut_size = drm_color_lut_size(blob); const struct drm_color_lut *a = &lut[lut_size - 2]; @@ -2230,7 +2257,9 @@ static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, if (i9xx_lut_10_diff(b->red, a->red) > 0x7f || i9xx_lut_10_diff(b->green, a->green) > 0x7f || i9xx_lut_10_diff(b->blue, a->blue) > 0x7f) { - drm_dbg_kms(&dev_priv->drm, "Last gamma LUT entry exceeds max slope\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] Last gamma LUT entry exceeds max slope\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2239,28 +2268,28 @@ static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, void intel_color_assert_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); /* make sure {pre,post}_csc_lut were correctly assigned */ - if (DISPLAY_VER(i915) >= 11 || HAS_GMCH(i915)) { - drm_WARN_ON(&i915->drm, + if (DISPLAY_VER(display) >= 11 || HAS_GMCH(display)) { + drm_WARN_ON(display->drm, crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); - } else if (DISPLAY_VER(i915) == 10) { - drm_WARN_ON(&i915->drm, + } else if (DISPLAY_VER(display) == 10) { + drm_WARN_ON(display->drm, crtc_state->post_csc_lut == crtc_state->hw.gamma_lut && crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut && - crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut); - drm_WARN_ON(&i915->drm, + crtc_state->pre_csc_lut != display->color.glk_linear_degamma_lut); + drm_WARN_ON(display->drm, !ilk_lut_limited_range(crtc_state) && crtc_state->post_csc_lut != NULL && crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); } else if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut && crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, !ilk_lut_limited_range(crtc_state) && crtc_state->post_csc_lut != crtc_state->hw.degamma_lut && crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); @@ -2278,7 +2307,7 @@ static void intel_assign_luts(struct intel_crtc_state *crtc_state) static int i9xx_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2293,9 +2322,9 @@ static int i9xx_color_check(struct intel_atomic_state *state, crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); - if (DISPLAY_VER(i915) < 4 && + if (DISPLAY_VER(display) < 4 && crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) { - ret = i9xx_check_lut_10(i915, crtc_state->hw.gamma_lut); + ret = i9xx_check_lut_10(crtc, crtc_state->hw.gamma_lut); if (ret) return ret; } @@ -2462,12 +2491,12 @@ static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state) static int ilk_assign_luts(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (ilk_lut_limited_range(crtc_state)) { struct drm_property_blob *gamma_lut; - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, drm_color_lut_size(crtc_state->hw.gamma_lut), true); if (IS_ERR(gamma_lut)) @@ -2501,7 +2530,7 @@ static int ilk_assign_luts(struct intel_crtc_state *crtc_state) static int ilk_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2511,15 +2540,17 @@ static int ilk_color_check(struct intel_atomic_state *state, return ret; if (crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { - drm_dbg_kms(&i915->drm, - "Degamma and gamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] Degamma and gamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { - drm_dbg_kms(&i915->drm, - "YCbCr and CTM together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and CTM together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2572,21 +2603,21 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state) static int ivb_assign_luts(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct drm_property_blob *degamma_lut, *gamma_lut; if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) return ilk_assign_luts(crtc_state); - drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024); - drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024); + drm_WARN_ON(display->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024); + drm_WARN_ON(display->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024); - degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512, + degamma_lut = create_resized_lut(display, crtc_state->hw.degamma_lut, 512, false); if (IS_ERR(degamma_lut)) return PTR_ERR(degamma_lut); - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, 512, ilk_lut_limited_range(crtc_state)); if (IS_ERR(gamma_lut)) { drm_property_blob_put(degamma_lut); @@ -2605,7 +2636,7 @@ static int ivb_assign_luts(struct intel_crtc_state *crtc_state) static int ivb_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2615,22 +2646,25 @@ static int ivb_color_check(struct intel_atomic_state *state, return ret; if (crtc_state->c8_planes && crtc_state->hw.degamma_lut) { - drm_dbg_kms(&i915->drm, - "C8 pixelformat and degamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] C8 pixelformat and degamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { - drm_dbg_kms(&i915->drm, - "YCbCr and CTM together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and CTM together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { - drm_dbg_kms(&i915->drm, - "YCbCr and degamma+gamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and degamma+gamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2675,13 +2709,13 @@ static bool glk_use_pre_csc_lut_for_gamma(const struct intel_crtc_state *crtc_st static int glk_assign_luts(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (glk_use_pre_csc_lut_for_gamma(crtc_state)) { struct drm_property_blob *gamma_lut; - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, - DISPLAY_INFO(i915)->color.degamma_lut_size, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, + DISPLAY_INFO(display)->color.degamma_lut_size, false); if (IS_ERR(gamma_lut)) return PTR_ERR(gamma_lut); @@ -2697,7 +2731,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state) if (ilk_lut_limited_range(crtc_state)) { struct drm_property_blob *gamma_lut; - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, drm_color_lut_size(crtc_state->hw.gamma_lut), true); if (IS_ERR(gamma_lut)) @@ -2720,7 +2754,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state) */ if (crtc_state->csc_enable && !crtc_state->pre_csc_lut) drm_property_replace_blob(&crtc_state->pre_csc_lut, - i915->display.color.glk_linear_degamma_lut); + display->color.glk_linear_degamma_lut); return 0; } @@ -2739,7 +2773,7 @@ static int glk_check_luts(const struct intel_crtc_state *crtc_state) static int glk_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2750,15 +2784,17 @@ static int glk_color_check(struct intel_atomic_state *state, if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { - drm_dbg_kms(&i915->drm, - "YCbCr and CTM together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and CTM together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { - drm_dbg_kms(&i915->drm, - "YCbCr and degamma+gamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and degamma+gamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2795,8 +2831,7 @@ static int glk_color_check(struct intel_atomic_state *state, static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); u32 gamma_mode = 0; if (crtc_state->hw.degamma_lut) @@ -2814,7 +2849,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) * ToDo: Extend to Logarithmic Gamma once the new UAPI * is accepted and implemented by a userspace consumer */ - else if (DISPLAY_VER(i915) >= 13) + else if (DISPLAY_VER(display) >= 13) gamma_mode |= GAMMA_MODE_MODE_10BIT; else gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEG; @@ -3195,13 +3230,13 @@ static bool icl_lut_equal(const struct intel_crtc_state *crtc_state, static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; int i; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * LEGACY_LUT_LENGTH, NULL); if (IS_ERR(blob)) @@ -3210,8 +3245,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, i)); + u32 val = intel_de_read_fw(display, + PALETTE(display, pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -3221,15 +3256,15 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + u32 lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; u32 ldw, udw; int i; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, lut_size * sizeof(lut[0]), NULL); if (IS_ERR(blob)) return NULL; @@ -3237,10 +3272,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - ldw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0)); - udw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1)); + ldw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 0)); + udw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 1)); i9xx_lut_10_pack(&lut[i], ldw, udw); } @@ -3272,13 +3307,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3287,17 +3322,17 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - u32 ldw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0)); - u32 udw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1)); + u32 ldw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 0)); + u32 udw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } - lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 0))); - lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 1))); - lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 2))); return blob; } @@ -3324,13 +3359,13 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3339,8 +3374,8 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0)); - u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1)); chv_cgm_degamma_pack(&lut[i], ldw, udw); } @@ -3350,13 +3385,13 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3365,8 +3400,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0)); - u32 udw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(display, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(display, CGM_PIPE_GAMMA(pipe, i, 1)); chv_cgm_gamma_pack(&lut[i], ldw, udw); } @@ -3376,10 +3411,10 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) static void chv_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - crtc_state->cgm_mode = intel_de_read(i915, CGM_PIPE_MODE(crtc->pipe)); + crtc_state->cgm_mode = intel_de_read(display, CGM_PIPE_MODE(crtc->pipe)); i9xx_get_config(crtc_state); } @@ -3399,13 +3434,13 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; int i; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * LEGACY_LUT_LENGTH, NULL); if (IS_ERR(blob)) @@ -3414,7 +3449,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read_fw(i915, LGC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(display, LGC_PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -3424,13 +3459,13 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3439,7 +3474,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read_fw(i915, PREC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(display, PREC_PALETTE(pipe, i)); ilk_lut_10_pack(&lut[i], val); } @@ -3487,13 +3522,13 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int i, lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3504,14 +3539,14 @@ static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc, for (i = 0; i < lut_size; i++) { u32 val; - intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), prec_index + i); - val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); + val = intel_de_read_fw(display, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); return blob; @@ -3552,13 +3587,13 @@ static void ivb_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int i, lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3566,19 +3601,19 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, lut = blob->data; - intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), prec_index); - intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT | prec_index); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read_fw(i915, PREC_PAL_DATA(pipe)); + u32 val = intel_de_read_fw(display, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); return blob; @@ -3617,13 +3652,13 @@ static void bdw_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3636,22 +3671,22 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); - intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT | PRE_CSC_GAMC_INDEX_VALUE(0)); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe)); + u32 val = intel_de_read_fw(display, PRE_CSC_GAMC_DATA(pipe)); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) mtl_degamma_lut_pack(&lut[i], val); else glk_degamma_lut_pack(&lut[i], val); } - intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); return blob; @@ -3683,13 +3718,13 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob * icl_read_lut_multi_segment(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3697,20 +3732,20 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc) lut = blob->data; - intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); - intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_AUTO_INCREMENT | PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); for (i = 0; i < 9; i++) { - u32 ldw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe)); - u32 udw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 ldw = intel_de_read_fw(display, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 udw = intel_de_read_fw(display, PREC_PAL_MULTI_SEG_DATA(pipe)); ilk_lut_12p4_pack(&lut[i], ldw, udw); } - intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); /* @@ -3877,15 +3912,15 @@ static const struct intel_color_funcs ilk_color_funcs = { void intel_color_crtc_init(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int degamma_lut_size, gamma_lut_size; bool has_ctm; drm_mode_crtc_set_gamma_size(&crtc->base, 256); - gamma_lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; - degamma_lut_size = DISPLAY_INFO(i915)->color.degamma_lut_size; - has_ctm = DISPLAY_VER(i915) >= 5; + gamma_lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; + degamma_lut_size = DISPLAY_INFO(display)->color.degamma_lut_size; + has_ctm = DISPLAY_VER(display) >= 5; /* * "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the @@ -3895,57 +3930,59 @@ void intel_color_crtc_init(struct intel_crtc *crtc) * Confirmed on alv,cst,pnv. Mobile gen2 parts (alm,mgm) * are confirmed not to suffer from this restriction. */ - if (DISPLAY_VER(i915) == 3 && crtc->pipe == PIPE_A) + if (DISPLAY_VER(display) == 3 && crtc->pipe == PIPE_A) gamma_lut_size = 256; drm_crtc_enable_color_mgmt(&crtc->base, degamma_lut_size, has_ctm, gamma_lut_size); } -int intel_color_init(struct drm_i915_private *i915) +int intel_color_init(struct intel_display *display) { struct drm_property_blob *blob; - if (DISPLAY_VER(i915) != 10) + if (DISPLAY_VER(display) != 10) return 0; - blob = create_linear_lut(i915, - DISPLAY_INFO(i915)->color.degamma_lut_size); + blob = create_linear_lut(display, + DISPLAY_INFO(display)->color.degamma_lut_size); if (IS_ERR(blob)) return PTR_ERR(blob); - i915->display.color.glk_linear_degamma_lut = blob; + display->color.glk_linear_degamma_lut = blob; return 0; } -void intel_color_init_hooks(struct drm_i915_private *i915) +void intel_color_init_hooks(struct intel_display *display) { - if (HAS_GMCH(i915)) { + struct drm_i915_private *i915 = to_i915(display->drm); + + if (HAS_GMCH(display)) { if (IS_CHERRYVIEW(i915)) - i915->display.funcs.color = &chv_color_funcs; + display->funcs.color = &chv_color_funcs; else if (IS_VALLEYVIEW(i915)) - i915->display.funcs.color = &vlv_color_funcs; - else if (DISPLAY_VER(i915) >= 4) - i915->display.funcs.color = &i965_color_funcs; + display->funcs.color = &vlv_color_funcs; + else if (DISPLAY_VER(display) >= 4) + display->funcs.color = &i965_color_funcs; else - i915->display.funcs.color = &i9xx_color_funcs; + display->funcs.color = &i9xx_color_funcs; } else { - if (DISPLAY_VER(i915) >= 12) - i915->display.funcs.color = &tgl_color_funcs; - else if (DISPLAY_VER(i915) == 11) - i915->display.funcs.color = &icl_color_funcs; - else if (DISPLAY_VER(i915) == 10) - i915->display.funcs.color = &glk_color_funcs; - else if (DISPLAY_VER(i915) == 9) - i915->display.funcs.color = &skl_color_funcs; - else if (DISPLAY_VER(i915) == 8) - i915->display.funcs.color = &bdw_color_funcs; + if (DISPLAY_VER(display) >= 12) + display->funcs.color = &tgl_color_funcs; + else if (DISPLAY_VER(display) == 11) + display->funcs.color = &icl_color_funcs; + else if (DISPLAY_VER(display) == 10) + display->funcs.color = &glk_color_funcs; + else if (DISPLAY_VER(display) == 9) + display->funcs.color = &skl_color_funcs; + else if (DISPLAY_VER(display) == 8) + display->funcs.color = &bdw_color_funcs; else if (IS_HASWELL(i915)) - i915->display.funcs.color = &hsw_color_funcs; - else if (DISPLAY_VER(i915) == 7) - i915->display.funcs.color = &ivb_color_funcs; + display->funcs.color = &hsw_color_funcs; + else if (DISPLAY_VER(display) == 7) + display->funcs.color = &ivb_color_funcs; else - i915->display.funcs.color = &ilk_color_funcs; + display->funcs.color = &ilk_color_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index 79f230a1709a..9d66457c1e89 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -11,11 +11,12 @@ struct intel_atomic_state; struct intel_crtc_state; struct intel_crtc; -struct drm_i915_private; +struct intel_display; +struct intel_dsb; struct drm_property_blob; -void intel_color_init_hooks(struct drm_i915_private *i915); -int intel_color_init(struct drm_i915_private *i915); +void intel_color_init_hooks(struct intel_display *display); +int intel_color_init(struct intel_display *display); void intel_color_crtc_init(struct intel_crtc *crtc); int intel_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -24,10 +25,13 @@ void intel_color_prepare_commit(struct intel_atomic_state *state, void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state); bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state); void intel_color_wait_commit(const struct intel_crtc_state *crtc_state); -void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state); -void intel_color_commit_arm(const struct intel_crtc_state *crtc_state); +void intel_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); +void intel_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); void intel_color_post_update(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); +void intel_color_modeset(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 835c8b844494..74c1983fe07e 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -81,12 +81,13 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector) return intel_encoder_to_crt(intel_attached_encoder(connector)); } -bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, +bool intel_crt_port_enabled(struct intel_display *display, i915_reg_t adpa_reg, enum pipe *pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val; - val = intel_de_read(dev_priv, adpa_reg); + val = intel_de_read(display, adpa_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) @@ -100,6 +101,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, static bool intel_crt_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); intel_wakeref_t wakeref; @@ -110,7 +112,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, if (!wakeref) return false; - ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); + ret = intel_crt_port_enabled(display, crt->adpa_reg, pipe); intel_display_power_put(dev_priv, encoder->power_domain, wakeref); @@ -119,11 +121,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; - tmp = intel_de_read(dev_priv, crt->adpa_reg); + tmp = intel_de_read(display, crt->adpa_reg); if (tmp & ADPA_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; @@ -168,13 +170,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int mode) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 adpa; - if (DISPLAY_VER(dev_priv) >= 5) + if (DISPLAY_VER(display) >= 5) adpa = ADPA_HOTPLUG_BITS; else adpa = 0; @@ -193,7 +196,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, adpa |= ADPA_PIPE_SEL(crtc->pipe); if (!HAS_PCH_SPLIT(dev_priv)) - intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0); + intel_de_write(display, BCLRPAT(display, crtc->pipe), 0); switch (mode) { case DRM_MODE_DPMS_ON: @@ -210,7 +213,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, break; } - intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_write(display, crt->adpa_reg, adpa); } static void intel_disable_crt(struct intel_atomic_state *state, @@ -241,9 +244,10 @@ static void hsw_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } @@ -253,6 +257,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(state); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -272,7 +277,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, hsw_fdi_disable(encoder); - drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -282,9 +287,10 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } @@ -294,11 +300,12 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); @@ -312,11 +319,12 @@ static void hsw_enable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder); intel_ddi_enable_transcoder_func(encoder, crtc_state); @@ -346,9 +354,10 @@ static enum drm_mode_status intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq; + int max_dotclk = display->cdclk.max_dotclk_freq; enum drm_mode_status status; int max_clock; @@ -367,7 +376,7 @@ intel_crt_mode_valid(struct drm_connector *connector, * DAC limit supposedly 355 MHz. */ max_clock = 270000; - else if (IS_DISPLAY_VER(dev_priv, 3, 4)) + else if (IS_DISPLAY_VER(display, 3, 4)) max_clock = 400000; else max_clock = 350000; @@ -428,6 +437,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -450,7 +460,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, if (HAS_PCH_LPT(dev_priv)) { /* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "LPT only supports 24bpp\n"); return -EINVAL; } @@ -470,6 +480,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, static bool ilk_crt_detect_hotplug(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); @@ -483,36 +494,36 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector) crt->force_hotplug_required = false; - save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); - drm_dbg_kms(&dev_priv->drm, + save_adpa = adpa = intel_de_read(display, crt->adpa_reg); + drm_dbg_kms(display->drm, "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; if (turn_off_dac) adpa &= ~ADPA_DAC_ENABLE; - intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_write(display, crt->adpa_reg, adpa); - if (intel_de_wait_for_clear(dev_priv, + if (intel_de_wait_for_clear(display, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { - intel_de_write(dev_priv, crt->adpa_reg, save_adpa); - intel_de_posting_read(dev_priv, crt->adpa_reg); + intel_de_write(display, crt->adpa_reg, save_adpa); + intel_de_posting_read(display, crt->adpa_reg); } } /* Check the status to see if both blue and green are on now */ - adpa = intel_de_read(dev_priv, crt->adpa_reg); + adpa = intel_de_read(display, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; - drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n", + drm_dbg_kms(display->drm, "ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); return ret; @@ -520,6 +531,7 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector) static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); @@ -542,29 +554,29 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) */ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); - save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); - drm_dbg_kms(&dev_priv->drm, + save_adpa = adpa = intel_de_read(display, crt->adpa_reg); + drm_dbg_kms(display->drm, "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; - intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_write(display, crt->adpa_reg, adpa); - if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg, + if (intel_de_wait_for_clear(display, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "timed out waiting for FORCE_TRIGGER"); - intel_de_write(dev_priv, crt->adpa_reg, save_adpa); + intel_de_write(display, crt->adpa_reg, save_adpa); } /* Check the status to see if both blue and green are on now */ - adpa = intel_de_read(dev_priv, crt->adpa_reg); + adpa = intel_de_read(display, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); if (reenable_hpd) @@ -575,6 +587,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_hotplug(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 stat; @@ -603,18 +616,18 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN(dev_priv), + if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display), CRT_HOTPLUG_FORCE_DETECT, 1000)) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "timed out waiting for FORCE_DETECT to go off"); } - stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv)); + stat = intel_de_read(display, PORT_HOTPLUG_STAT(display)); if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) ret = true; /* clear the interrupt we just generated, if any */ - intel_de_write(dev_priv, PORT_HOTPLUG_STAT(dev_priv), + intel_de_write(display, PORT_HOTPLUG_STAT(display), CRT_HOTPLUG_INT_STATUS); i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); @@ -660,8 +673,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, static bool intel_crt_detect_ddc(struct drm_connector *connector) { - struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); - struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); + struct intel_display *display = to_intel_display(connector->dev); const struct drm_edid *drm_edid; bool ret = false; @@ -674,15 +686,15 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) * have to check the EDID input spec of the attached device. */ if (drm_edid_is_digital(drm_edid)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT detected via DDC:0x50 [EDID]\n"); ret = true; } } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT not detected via DDC:0x50 [no valid EDID found]\n"); } @@ -694,8 +706,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) { - struct drm_device *dev = crt->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&crt->base); enum transcoder cpu_transcoder = (enum transcoder)pipe; u32 save_bclrpat; u32 save_vtotal; @@ -706,14 +717,14 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) u8 st00; enum drm_connector_status status; - drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); + drm_dbg_kms(display->drm, "starting load-detect on CRT\n"); - save_bclrpat = intel_de_read(dev_priv, - BCLRPAT(dev_priv, cpu_transcoder)); - save_vtotal = intel_de_read(dev_priv, - TRANS_VTOTAL(dev_priv, cpu_transcoder)); - vblank = intel_de_read(dev_priv, - TRANS_VBLANK(dev_priv, cpu_transcoder)); + save_bclrpat = intel_de_read(display, + BCLRPAT(display, cpu_transcoder)); + save_vtotal = intel_de_read(display, + TRANS_VTOTAL(display, cpu_transcoder)); + vblank = intel_de_read(display, + TRANS_VBLANK(display, cpu_transcoder)); vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1; vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1; @@ -722,25 +733,25 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1; /* Set the border color to purple. */ - intel_de_write(dev_priv, BCLRPAT(dev_priv, cpu_transcoder), 0x500050); + intel_de_write(display, BCLRPAT(display, cpu_transcoder), 0x500050); - if (DISPLAY_VER(dev_priv) != 2) { - u32 transconf = intel_de_read(dev_priv, - TRANSCONF(dev_priv, cpu_transcoder)); + if (DISPLAY_VER(display) != 2) { + u32 transconf = intel_de_read(display, + TRANSCONF(display, cpu_transcoder)); - intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), + intel_de_write(display, TRANSCONF(display, cpu_transcoder), transconf | TRANSCONF_FORCE_BORDER); - intel_de_posting_read(dev_priv, - TRANSCONF(dev_priv, cpu_transcoder)); + intel_de_posting_read(display, + TRANSCONF(display, cpu_transcoder)); /* Wait for next Vblank to substitue * border color for Color info */ - intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); - st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe)); + st00 = intel_de_read8(display, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : connector_status_disconnected; - intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), + intel_de_write(display, TRANSCONF(display, cpu_transcoder), transconf); } else { bool restore_vblank = false; @@ -751,13 +762,13 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - u32 vsync = intel_de_read(dev_priv, - TRANS_VSYNC(dev_priv, cpu_transcoder)); + u32 vsync = intel_de_read(display, + TRANS_VSYNC(display, cpu_transcoder)); u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1; vblank_start = vsync_start; - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, + TRANS_VBLANK(display, cpu_transcoder), VBLANK_START(vblank_start - 1) | VBLANK_END(vblank_end - 1)); restore_vblank = true; @@ -771,9 +782,9 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) /* * Wait for the border to be displayed */ - while (intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe)) >= vactive) + while (intel_de_read(display, PIPEDSL(display, pipe)) >= vactive) ; - while ((dsl = intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe))) <= vsample) + while ((dsl = intel_de_read(display, PIPEDSL(display, pipe))) <= vsample) ; /* * Watch ST00 for an entire scanline @@ -783,15 +794,15 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) do { count++; /* Read the ST00 VGA status register */ - st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE); + st00 = intel_de_read8(display, _VGA_MSR_WRITE); if (st00 & (1 << 4)) detect++; - } while ((intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe)) == dsl)); + } while ((intel_de_read(display, PIPEDSL(display, pipe)) == dsl)); /* restore vblank if necessary */ if (restore_vblank) - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, + TRANS_VBLANK(display, cpu_transcoder), vblank); /* * If more than 3/4 of the scanline detected a monitor, @@ -805,7 +816,7 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) } /* Restore previous settings */ - intel_de_write(dev_priv, BCLRPAT(dev_priv, cpu_transcoder), + intel_de_write(display, BCLRPAT(display, cpu_transcoder), save_bclrpat); return status; @@ -842,6 +853,7 @@ intel_crt_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; @@ -849,7 +861,7 @@ intel_crt_detect(struct drm_connector *connector, intel_wakeref_t wakeref; int status; - drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); @@ -859,7 +871,7 @@ intel_crt_detect(struct drm_connector *connector, if (!intel_display_driver_check_access(dev_priv)) return connector->status; - if (dev_priv->display.params.load_detect_test) { + if (display->params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); goto load_detect; @@ -872,18 +884,18 @@ intel_crt_detect(struct drm_connector *connector, wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); - if (I915_HAS_HOTPLUG(dev_priv)) { + if (I915_HAS_HOTPLUG(display)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so * only trust an assertion that the monitor is connected. */ if (intel_crt_detect_hotplug(connector)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT detected via hotplug\n"); status = connector_status_connected; goto out; } else - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT not detected via hotplug\n"); } @@ -896,7 +908,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev_priv)) { + if (I915_HAS_HOTPLUG(display)) { status = connector_status_disconnected; goto out; } @@ -916,10 +928,10 @@ load_detect: } else { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; - else if (DISPLAY_VER(dev_priv) < 4) + else if (DISPLAY_VER(display) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (dev_priv->display.params.load_detect_test) + else if (display->params.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; @@ -934,6 +946,7 @@ out: static int intel_crt_get_modes(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); @@ -953,7 +966,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) goto out; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - ddc = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB); + ddc = intel_gmbus_get_adapter(display, GMBUS_PIN_DPB); ret = intel_crt_ddc_get_modes(connector, ddc); out: @@ -964,19 +977,19 @@ out: void intel_crt_reset(struct drm_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_display *display = to_intel_display(encoder->dev); struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); - if (DISPLAY_VER(dev_priv) >= 5) { + if (DISPLAY_VER(display) >= 5) { u32 adpa; - adpa = intel_de_read(dev_priv, crt->adpa_reg); + adpa = intel_de_read(display, crt->adpa_reg); adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= ADPA_HOTPLUG_BITS; - intel_de_write(dev_priv, crt->adpa_reg, adpa); - intel_de_posting_read(dev_priv, crt->adpa_reg); + intel_de_write(display, crt->adpa_reg, adpa); + intel_de_posting_read(display, crt->adpa_reg); - drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa); + drm_dbg_kms(display->drm, "crt adpa set to 0x%x\n", adpa); crt->force_hotplug_required = true; } @@ -1006,8 +1019,9 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; -void intel_crt_init(struct drm_i915_private *dev_priv) +void intel_crt_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; @@ -1022,7 +1036,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) else adpa_reg = ADPA; - adpa = intel_de_read(dev_priv, adpa_reg); + adpa = intel_de_read(display, adpa_reg); if ((adpa & ADPA_DAC_ENABLE) == 0) { /* * On some machines (some IVB at least) CRT can be @@ -1032,11 +1046,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv) * take. So the only way to tell is attempt to enable * it and see what happens. */ - intel_de_write(dev_priv, adpa_reg, + intel_de_write(display, adpa_reg, adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); - if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0) + if ((intel_de_read(display, adpa_reg) & ADPA_DAC_ENABLE) == 0) return; - intel_de_write(dev_priv, adpa_reg, adpa); + intel_de_write(display, adpa_reg, adpa); } crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); @@ -1049,16 +1063,16 @@ void intel_crt_init(struct drm_i915_private *dev_priv) return; } - ddc_pin = dev_priv->display.vbt.crt_ddc_pin; + ddc_pin = display->vbt.crt_ddc_pin; connector = &intel_connector->base; crt->connector = intel_connector; - drm_connector_init_with_ddc(&dev_priv->drm, connector, + drm_connector_init_with_ddc(display->drm, connector, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA, - intel_gmbus_get_adapter(dev_priv, ddc_pin)); + intel_gmbus_get_adapter(display, ddc_pin)); - drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs, + drm_encoder_init(display->drm, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); @@ -1070,14 +1084,14 @@ void intel_crt_init(struct drm_i915_private *dev_priv) else crt->base.pipe_mask = ~0; - if (DISPLAY_VER(dev_priv) != 2) + if (DISPLAY_VER(display) != 2) connector->interlace_allowed = true; crt->adpa_reg = adpa_reg; crt->base.power_domain = POWER_DOMAIN_PORT_CRT; - if (I915_HAS_HOTPLUG(dev_priv) && + if (I915_HAS_HOTPLUG(display) && !dmi_check_system(intel_spurious_crt_detect)) { crt->base.hpd_pin = HPD_CRT; crt->base.hotplug = intel_encoder_hotplug; @@ -1087,7 +1101,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) } intel_connector->base.polled = intel_connector->polled; - if (HAS_DDI(dev_priv)) { + if (HAS_DDI(display)) { assert_port_valid(dev_priv, PORT_E); crt->base.port = PORT_E; @@ -1131,8 +1145,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | FDI_RX_LINK_REVERSAL_OVERRIDE; - dev_priv->display.fdi.rx_config = intel_de_read(dev_priv, - FDI_RX_CTL(PIPE_A)) & fdi_config; + display->fdi.rx_config = intel_de_read(display, + FDI_RX_CTL(PIPE_A)) & fdi_config; } intel_crt_reset(&crt->base.base); diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h index fe7690c2b948..e0abfe96a3d2 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.h +++ b/drivers/gpu/drm/i915/display/intel_crt.h @@ -10,20 +10,20 @@ enum pipe; struct drm_encoder; -struct drm_i915_private; +struct intel_display; #ifdef I915 -bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, +bool intel_crt_port_enabled(struct intel_display *display, i915_reg_t adpa_reg, enum pipe *pipe); -void intel_crt_init(struct drm_i915_private *dev_priv); +void intel_crt_init(struct intel_display *display); void intel_crt_reset(struct drm_encoder *encoder); #else -static inline bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, +static inline bool intel_crt_port_enabled(struct intel_display *display, i915_reg_t adpa_reg, enum pipe *pipe) { return false; } -static inline void intel_crt_init(struct drm_i915_private *dev_priv) +static inline void intel_crt_init(struct intel_display *display) { } static inline void intel_crt_reset(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 1b578cad2813..a2c528d707f4 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> +#include <drm/drm_vblank.h> #include <drm/drm_vblank_work.h> #include "i915_vgpu.h" @@ -35,11 +36,11 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); - if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0, - "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", - crtc->base.id, crtc->name)) + if (INTEL_DISPLAY_STATE_WARN(display, drm_crtc_vblank_get(crtc) == 0, + "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", + crtc->base.id, crtc->name)) drm_crtc_vblank_put(crtc); } @@ -48,12 +49,12 @@ struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); } -struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, +struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display, enum pipe pipe) { struct intel_crtc *crtc; - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { if (crtc->pipe == pipe) return crtc; } @@ -69,7 +70,8 @@ void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct intel_display *display = &i915->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); if (crtc->active) intel_crtc_wait_for_next_vblank(crtc); @@ -122,6 +124,8 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state); + assert_vblank_disabled(&crtc->base); drm_crtc_set_max_vblank_count(&crtc->base, intel_crtc_max_vblank_count(crtc_state)); @@ -138,6 +142,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_display *display = to_intel_display(crtc); /* * Should really happen exactly when we disable the pipe @@ -148,6 +153,10 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) drm_crtc_vblank_off(&crtc->base); assert_vblank_disabled(&crtc->base); + + crtc->block_dc_for_vblank = false; + + flush_work(&display->irq.vblank_dc_work); } struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) @@ -387,13 +396,31 @@ fail: return ret; } +int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; + struct drm_crtc *drm_crtc; + struct intel_crtc *crtc; + + drm_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); + if (!drm_crtc) + return -ENOENT; + + crtc = to_intel_crtc(drm_crtc); + pipe_from_crtc_id->pipe = crtc->pipe; + + return 0; +} + static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) { return crtc_state->hw.active && - !intel_crtc_needs_modeset(crtc_state) && !crtc_state->preload_luts && + !intel_crtc_needs_modeset(crtc_state) && intel_crtc_needs_color_update(crtc_state) && - !intel_color_uses_dsb(crtc_state); + !intel_color_uses_dsb(crtc_state) && + !crtc_state->use_dsb; } static void intel_crtc_vblank_work(struct kthread_work *base) @@ -457,6 +484,17 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 1000 * adjusted_mode->crtc_htotal); } +int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode, + int scanlines) +{ + /* paranoia */ + if (!adjusted_mode->crtc_clock) + return 1; + + return DIV_ROUND_UP_ULL(mul_u32_u32(scanlines, adjusted_mode->crtc_htotal * 1000), + adjusted_mode->crtc_clock); +} + /** * intel_pipe_update_start() - start update of a set of display registers * @state: the atomic state @@ -484,12 +522,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state, intel_psr_lock(new_crtc_state); if (new_crtc_state->do_async_flip) { - spin_lock_irq(&crtc->base.dev->event_lock); - /* arm the event for the flip done irq handler */ - crtc->flip_done_event = new_crtc_state->uapi.event; - spin_unlock_irq(&crtc->base.dev->event_lock); - - new_crtc_state->uapi.event = NULL; + intel_crtc_prepare_vblank_event(new_crtc_state, + &crtc->flip_done_event); return; } @@ -589,6 +623,19 @@ void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state) crtc_state->uapi.event = NULL; } +void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state, + struct drm_pending_vblank_event **event) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + unsigned long irqflags; + + spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags); + *event = crtc_state->uapi.event; + spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags); + + crtc_state->uapi.event = NULL; +} + /** * intel_pipe_update_end() - end update of a set of display registers * @state: the atomic state diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index b615b7ab5ccd..de54ae1deedf 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -10,11 +10,15 @@ enum i9xx_plane_id; enum pipe; +struct drm_device; struct drm_display_mode; +struct drm_file; struct drm_i915_private; +struct drm_pending_vblank_event; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; /* * FIXME: We should instead only take spinlocks once for the entire update @@ -28,9 +32,15 @@ struct intel_crtc_state; int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); +int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode, + int scanlines); void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state); +void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state, + struct drm_pending_vblank_event **event); u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe); +int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, struct intel_crtc *crtc); @@ -43,7 +53,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_wait_for_vblank_workers(struct intel_atomic_state *state); struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915); -struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, +struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display, enum pipe pipe); void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, enum pipe pipe); diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 9ad53e1cbbd0..9ba77970dab7 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -9,6 +9,7 @@ #include <drm/drm_blend.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_vblank.h> #include "i915_reg.h" #include "intel_atomic.h" @@ -26,8 +27,6 @@ #include "intel_vblank.h" #include "skl_watermark.h" -#include "gem/i915_gem_object.h" - /* Cursor formats */ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, @@ -275,7 +274,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, } /* TODO: split into noarm+arm pair */ -static void i845_cursor_update_arm(struct intel_plane *plane, +static void i845_cursor_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -315,10 +315,11 @@ static void i845_cursor_update_arm(struct intel_plane *plane, } } -static void i845_cursor_disable_arm(struct intel_plane *plane, +static void i845_cursor_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - i845_cursor_update_arm(plane, crtc_state, NULL); + i845_cursor_update_arm(dsb, plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -527,22 +528,25 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane, +static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), 0); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), 0); } -static void wa_16021440873(struct intel_plane *plane, +static void wa_16021440873(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 ctl = plane_state->ctl; int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1; @@ -551,16 +555,18 @@ static void wa_16021440873(struct intel_plane *plane, ctl &= ~MCURSOR_MODE_MASK; ctl |= MCURSOR_MODE_64_2B; - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), ctl); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl); - intel_de_write(dev_priv, CURPOS_ERLY_TPT(dev_priv, pipe), - CURSOR_POS_Y(et_y_position)); + intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), + CURSOR_POS_Y(et_y_position)); } -static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, +static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -571,19 +577,17 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, if (crtc_state->enable_psr2_su_region_et) { u32 val = intel_cursor_position(crtc_state, plane_state, true); - intel_de_write_fw(dev_priv, - CURPOS_ERLY_TPT(dev_priv, pipe), - val); + + intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val); } - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), - plane_state->ctl); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl); } else { /* Wa_16021440873 */ if (crtc_state->enable_psr2_su_region_et) - wa_16021440873(plane, crtc_state, plane_state); + wa_16021440873(dsb, plane, crtc_state, plane_state); else - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state); } } @@ -610,9 +614,11 @@ static u32 skl_cursor_wm_reg_val(const struct skl_wm_level *level) return val; } -static void skl_write_cursor_wm(struct intel_plane *plane, +static void skl_write_cursor_wm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -622,30 +628,32 @@ static void skl_write_cursor_wm(struct intel_plane *plane, int level; for (level = 0; level < i915->display.wm.num_levels; level++) - intel_de_write_fw(i915, CUR_WM(pipe, level), - skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); + intel_de_write_dsb(display, dsb, CUR_WM(pipe, level), + skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); - intel_de_write_fw(i915, CUR_WM_TRANS(pipe), - skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); + intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe), + skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - intel_de_write_fw(i915, CUR_WM_SAGV(pipe), - skl_cursor_wm_reg_val(&wm->sagv.wm0)); - intel_de_write_fw(i915, CUR_WM_SAGV_TRANS(pipe), - skl_cursor_wm_reg_val(&wm->sagv.trans_wm)); + intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe), + skl_cursor_wm_reg_val(&wm->sagv.wm0)); + intel_de_write_dsb(display, dsb, CUR_WM_SAGV_TRANS(pipe), + skl_cursor_wm_reg_val(&wm->sagv.trans_wm)); } - intel_de_write_fw(i915, CUR_BUF_CFG(pipe), - skl_cursor_ddb_reg_val(ddb)); + intel_de_write_dsb(display, dsb, CUR_BUF_CFG(pipe), + skl_cursor_ddb_reg_val(ddb)); } /* TODO: split into noarm+arm pair */ -static void i9xx_cursor_update_arm(struct intel_plane *plane, +static void i9xx_cursor_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; @@ -685,38 +693,36 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, */ if (DISPLAY_VER(dev_priv) >= 9) - skl_write_cursor_wm(plane, crtc_state); + skl_write_cursor_wm(dsb, plane, crtc_state); if (plane_state) - i9xx_cursor_update_sel_fetch_arm(plane, crtc_state, - plane_state); + i9xx_cursor_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); else - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { if (HAS_CUR_FBC(dev_priv)) - intel_de_write_fw(dev_priv, - CUR_FBC_CTL(dev_priv, pipe), - fbc_ctl); - intel_de_write_fw(dev_priv, CURCNTR(dev_priv, pipe), cntl); - intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base); + intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl); + intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl); + intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos); + intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base); plane->cursor.base = base; plane->cursor.size = fbc_ctl; plane->cursor.cntl = cntl; } else { - intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base); + intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos); + intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base); } } -static void i9xx_cursor_disable_arm(struct intel_plane *plane, +static void i9xx_cursor_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - i9xx_cursor_update_arm(plane, crtc_state, NULL); + i9xx_cursor_update_arm(dsb, plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -905,10 +911,10 @@ intel_legacy_cursor_update(struct drm_plane *_plane, } if (new_plane_state->uapi.visible) { - intel_plane_update_noarm(plane, crtc_state, new_plane_state); - intel_plane_update_arm(plane, crtc_state, new_plane_state); + intel_plane_update_noarm(NULL, plane, crtc_state, new_plane_state); + intel_plane_update_arm(NULL, plane, crtc_state, new_plane_state); } else { - intel_plane_disable_arm(plane, crtc_state); + intel_plane_disable_arm(NULL, plane, crtc_state); } local_irq_enable(); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 4a6c3040ca15..71dc659228ab 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -34,6 +34,9 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder) struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_encoder_to_phy(encoder); + if (IS_PANTHERLAKE(i915) && phy == PHY_A) + return true; + if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C) return true; @@ -65,22 +68,23 @@ static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder) } static void -assert_dc_off(struct drm_i915_private *i915) +assert_dc_off(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); bool enabled; enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF); - drm_WARN_ON(&i915->drm, !enabled); + drm_WARN_ON(display->drm, !enabled); } static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); int lane; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane) - intel_de_rmw(i915, - XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane), + intel_de_rmw(display, + XELPDP_PORT_MSGBUS_TIMER(display, encoder->port, lane), XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, XELPDP_PORT_MSGBUS_TIMER_VAL); } @@ -119,25 +123,28 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w static void intel_clear_response_ready_flag(struct intel_encoder *encoder, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane), + intel_de_rmw(display, + XELPDP_PORT_P2M_MSGBUS_STATUS(display, encoder->port, lane), 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); + drm_err_once(display->drm, + "Failed to bring PHY %c to idle.\n", + phy_name(phy)); return; } @@ -147,22 +154,23 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, int command, int lane, u32 *val) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - if (intel_de_wait_custom(i915, - XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), + if (intel_de_wait_custom(display, + XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_MSGBUS_TIMEOUT_FAST_US, XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { - drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", + drm_dbg_kms(display->drm, + "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); - if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) & + if (!(intel_de_read(display, XELPDP_PORT_MSGBUS_TIMER(display, port, lane)) & XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT)) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Hardware did not detect a timeout\n", phy_name(phy)); @@ -171,14 +179,18 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, } if (*val & XELPDP_PORT_P2M_ERROR_SET) { - drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy), + drm_dbg_kms(display->drm, + "PHY %c Error occurred during %s command. Status: 0x%x\n", + phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(encoder, lane); return -EINVAL; } if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { - drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy), + drm_dbg_kms(display->drm, + "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", + phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(encoder, lane); return -EINVAL; @@ -190,22 +202,22 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, static int __intel_cx0_read_once(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | XELPDP_PORT_M2P_COMMAND_READ | XELPDP_PORT_M2P_ADDRESS(addr)); @@ -221,7 +233,8 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(encoder, lane); + if (DISPLAY_VER(display) < 30) + intel_cx0_bus_reset(encoder, lane); return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); } @@ -229,11 +242,11 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, static u8 __intel_cx0_read(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); int i, status; - assert_dc_off(i915); + assert_dc_off(display); /* 3 tries is assumed to be enough to read successfully */ for (i = 0; i < 3; i++) { @@ -243,7 +256,8 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder, return status; } - drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n", + drm_err_once(display->drm, + "PHY %c Read %04x failed after %d retries.\n", phy_name(phy), addr, i); return 0; @@ -260,32 +274,32 @@ static u8 intel_cx0_read(struct intel_encoder *encoder, static int __intel_cx0_write_once(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | XELPDP_PORT_M2P_DATA(data) | XELPDP_PORT_M2P_ADDRESS(addr)); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; @@ -295,9 +309,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); if (ack < 0) return ack; - } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) & + } else if ((intel_de_read(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Error occurred during write command.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -EINVAL; @@ -310,7 +324,8 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(encoder, lane); + if (DISPLAY_VER(display) < 30) + intel_cx0_bus_reset(encoder, lane); return 0; } @@ -318,11 +333,11 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, static void __intel_cx0_write(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); int i, status; - assert_dc_off(i915); + assert_dc_off(display); /* 3 tries is assumed to be enough to write successfully */ for (i = 0; i < 3; i++) { @@ -332,7 +347,7 @@ static void __intel_cx0_write(struct intel_encoder *encoder, return; } - drm_err_once(&i915->drm, + drm_err_once(display->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); } @@ -348,9 +363,9 @@ static void intel_cx0_write(struct intel_encoder *encoder, static void intel_c20_sram_write(struct intel_encoder *encoder, int lane, u16 addr, u16 data) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - assert_dc_off(i915); + assert_dc_off(display); intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); @@ -362,10 +377,10 @@ static void intel_c20_sram_write(struct intel_encoder *encoder, static u16 intel_c20_sram_read(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u16 val; - assert_dc_off(i915); + assert_dc_off(display); intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); @@ -429,7 +444,7 @@ static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state) void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; u8 owned_lane_mask; intel_wakeref_t wakeref; @@ -444,7 +459,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, wakeref = intel_cx0_phy_transaction_begin(encoder); trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&i915->drm, !trans)) { + if (drm_WARN_ON_ONCE(display->drm, !trans)) { intel_cx0_phy_transaction_end(encoder, wakeref); return; } @@ -923,10 +938,10 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { }, .mplla = { 0x3104, /* mplla cfg0 */ 0xd105, /* mplla cfg1 */ - 0xc025, /* mplla cfg2 */ - 0xc025, /* mplla cfg3 */ - 0xa6ab, /* mplla cfg4 */ - 0x8c00, /* mplla cfg5 */ + 0x9217, /* mplla cfg2 */ + 0x9217, /* mplla cfg3 */ + 0x8c00, /* mplla cfg4 */ + 0x759a, /* mplla cfg5 */ 0x4000, /* mplla cfg6 */ 0x0003, /* mplla cfg7 */ 0x3555, /* mplla cfg8 */ @@ -1122,6 +1137,22 @@ static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = { NULL, }; +static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = { + &mtl_c20_dp_rbr, + &xe2hpd_c20_edp_r216, + &xe2hpd_c20_edp_r243, + &mtl_c20_dp_hbr1, + &xe2hpd_c20_edp_r324, + &xe2hpd_c20_edp_r432, + &mtl_c20_dp_hbr2, + &xe2hpd_c20_edp_r675, + &mtl_c20_dp_hbr3, + &mtl_c20_dp_uhbr10, + &xe2hpd_c20_dp_uhbr13_5, + &mtl_c20_dp_uhbr20, + NULL, +}; + /* * HDMI link rates with 38.4 MHz reference clock. */ @@ -2003,12 +2034,12 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state, static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll; int i; if (intel_crtc_has_dp_encoder(crtc_state)) { - if (intel_panel_use_ssc(i915)) { + if (intel_panel_use_ssc(display)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); pll_state->ssc_enabled = @@ -2019,7 +2050,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, if (pll_state->ssc_enabled) return; - drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); + drm_WARN_ON(display->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); for (i = 4; i < 9; i++) pll_state->c10.pll[i] = 0; } @@ -2073,7 +2104,7 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } -static void intel_c10_pll_program(struct drm_i915_private *i915, +static void intel_c10_pll_program(struct intel_display *display, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { @@ -2106,7 +2137,7 @@ static void intel_c10_pll_program(struct drm_i915_private *i915, MB_WRITE_COMMITTED); } -static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, +static void intel_c10pll_dump_hw_state(struct intel_display *display, const struct intel_c10pll_state *hw_state) { bool fracen; @@ -2115,35 +2146,39 @@ static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, unsigned int multiplier, tx_clk_div; fracen = hw_state->pll[0] & C10_PLL0_FRACEN; - drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ", + drm_dbg_kms(display->drm, "c10pll_hw_state: fracen: %s, ", str_yes_no(fracen)); if (fracen) { frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11]; frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13]; frac_den = hw_state->pll[10] << 8 | hw_state->pll[9]; - drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n", + drm_dbg_kms(display->drm, "quot: %u, rem: %u, den: %u,\n", frac_quot, frac_rem, frac_den); } multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 | hw_state->pll[2]) / 2 + 16; tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div); - drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:"); - drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn); + drm_dbg_kms(display->drm, "c10pll_rawhw_state:"); + drm_dbg_kms(display->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, + hw_state->cmn); BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4); for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4) - drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", + drm_dbg_kms(display->drm, + "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", i, hw_state->pll[i], i + 1, hw_state->pll[i + 1], i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]); } -static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state) +static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); + struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20; u64 datarate; u64 mpll_tx_clk_div; u64 vco_freq_shift; @@ -2152,13 +2187,14 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ u64 mpll_multiplier; u64 mpll_fracn_quot; u64 mpll_fracn_rem; + u16 tx_misc; u8 mpllb_ana_freq_vco; u8 mpll_div_multiplier; - if (pixel_clock < 25175 || pixel_clock > 600000) + if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000) return -EINVAL; - datarate = ((u64)pixel_clock * 1000) * 10; + datarate = ((u64)crtc_state->port_clock * 1000) * 10; mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate)); vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate)); vco_freq = (datarate << vco_freq_shift) >> 8; @@ -2171,6 +2207,11 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)), datarate), 255); + if (DISPLAY_VER(display) >= 20) + tx_misc = 0x5; + else + tx_misc = 0x0; + if (vco_freq <= DATARATE_3000000000) mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3; else if (vco_freq <= DATARATE_3500000000) @@ -2180,9 +2221,9 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ else mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; - pll_state->clock = pixel_clock; + pll_state->clock = crtc_state->port_clock; pll_state->tx[0] = 0xbe88; - pll_state->tx[1] = 0x9800; + pll_state->tx[1] = 0x9800 | C20_PHY_TX_MISC(tx_misc); pll_state->tx[2] = 0x0000; pll_state->cmn[0] = 0x0500; pll_state->cmn[1] = 0x0005; @@ -2239,13 +2280,19 @@ static const struct intel_c20pll_state * const * intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(crtc_state); if (intel_crtc_has_dp_encoder(crtc_state)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return xe2hpd_c20_edp_tables; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { + if (DISPLAY_RUNTIME_INFO(display)->edp_typec_support) + return xe3lpd_c20_dp_edp_tables; + if (DISPLAY_VERx100(display) == 1401) + return xe2hpd_c20_edp_tables; + } - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) + if (DISPLAY_VER(display) >= 30) + return xe3lpd_c20_dp_edp_tables; + else if (DISPLAY_VERx100(display) == 1401) return xe2hpd_c20_dp_tables; else return mtl_c20_dp_tables; @@ -2266,8 +2313,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, /* try computed C20 HDMI tables before using consolidated tables */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock, - &crtc_state->dpll_hw_state.cx0pll.c20) == 0) + if (intel_c20_compute_hdmi_tmds_pll(crtc_state) == 0) return 0; } @@ -2347,10 +2393,10 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c20pll_state *pll_state) { + struct intel_display *display = to_intel_display(encoder); bool cntx; intel_wakeref_t wakeref; int i; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -2362,11 +2408,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_TX_CNTX_CFG(i915, i)); + PHY_C20_B_TX_CNTX_CFG(display, i)); else pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_TX_CNTX_CFG(i915, i)); + PHY_C20_A_TX_CNTX_CFG(display, i)); } /* Read common configuration */ @@ -2374,11 +2420,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_CMN_CNTX_CFG(i915, i)); + PHY_C20_B_CMN_CNTX_CFG(display, i)); else pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_CMN_CNTX_CFG(i915, i)); + PHY_C20_A_CMN_CNTX_CFG(display, i)); } if (intel_c20phy_use_mpllb(pll_state)) { @@ -2387,11 +2433,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i915, i)); + PHY_C20_B_MPLLB_CNTX_CFG(display, i)); else pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i915, i)); + PHY_C20_A_MPLLB_CNTX_CFG(display, i)); } } else { /* MPLLA configuration */ @@ -2399,11 +2445,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i915, i)); + PHY_C20_B_MPLLA_CNTX_CFG(display, i)); else pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i915, i)); + PHY_C20_A_MPLLA_CNTX_CFG(display, i)); } } @@ -2412,33 +2458,37 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } -static void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, +static void intel_c20pll_dump_hw_state(struct intel_display *display, const struct intel_c20pll_state *hw_state) { int i; - drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n"); - drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", + drm_dbg_kms(display->drm, "c20pll_hw_state:\n"); + drm_dbg_kms(display->drm, + "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]); - drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", + drm_dbg_kms(display->drm, + "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); if (intel_c20phy_use_mpllb(hw_state)) { for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) - drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); + drm_dbg_kms(display->drm, "mpllb[%d] = 0x%.4x\n", i, + hw_state->mpllb[i]); } else { for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) - drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); + drm_dbg_kms(display->drm, "mplla[%d] = 0x%.4x\n", i, + hw_state->mplla[i]); } } -void intel_cx0pll_dump_hw_state(struct drm_i915_private *i915, +void intel_cx0pll_dump_hw_state(struct intel_display *display, const struct intel_cx0pll_state *hw_state) { if (hw_state->use_c10) - intel_c10pll_dump_hw_state(i915, &hw_state->c10); + intel_c10pll_dump_hw_state(display, &hw_state->c10); else - intel_c20pll_dump_hw_state(i915, &hw_state->c20); + intel_c20pll_dump_hw_state(display, &hw_state->c20); } static u8 intel_c20_get_dp_rate(u32 clock) @@ -2538,7 +2588,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp) return 0; } -static void intel_c20_pll_program(struct drm_i915_private *i915, +static void intel_c20_pll_program(struct intel_display *display, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { @@ -2571,11 +2621,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_TX_CNTX_CFG(i915, i), + PHY_C20_A_TX_CNTX_CFG(display, i), pll_state->tx[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_TX_CNTX_CFG(i915, i), + PHY_C20_B_TX_CNTX_CFG(display, i), pll_state->tx[i]); } @@ -2583,11 +2633,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_CMN_CNTX_CFG(i915, i), + PHY_C20_A_CMN_CNTX_CFG(display, i), pll_state->cmn[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_CMN_CNTX_CFG(i915, i), + PHY_C20_B_CMN_CNTX_CFG(display, i), pll_state->cmn[i]); } @@ -2596,22 +2646,22 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i915, i), + PHY_C20_A_MPLLB_CNTX_CFG(display, i), pll_state->mpllb[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i915, i), + PHY_C20_B_MPLLB_CNTX_CFG(display, i), pll_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i915, i), + PHY_C20_A_MPLLA_CNTX_CFG(display, i), pll_state->mplla[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i915, i), + PHY_C20_B_MPLLA_CNTX_CFG(display, i), pll_state->mplla[i]); } } @@ -2678,10 +2728,10 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, bool lane_reversal) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 val = 0; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), XELPDP_PORT_REVERSAL, lane_reversal ? XELPDP_PORT_REVERSAL : 0); @@ -2703,7 +2753,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, else val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA | XELPDP_SSC_ENABLE_PLLB, val); @@ -2734,48 +2784,49 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, u8 lane_mask, u8 state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port); + i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(display, port); int lane; - intel_de_rmw(i915, buf_ctl2_reg, + intel_de_rmw(display, buf_ctl2_reg, intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), intel_cx0_get_powerdown_state(lane_mask, state)); /* Wait for pending transactions.*/ for_each_cx0_lane_in_mask(lane_mask, lane) - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); } - intel_de_rmw(i915, buf_ctl2_reg, + intel_de_rmw(display, buf_ctl2_reg, intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ - if (intel_de_wait_custom(i915, buf_ctl2_reg, + if (intel_de_wait_custom(display, buf_ctl2_reg, intel_cx0_get_powerdown_update(lane_mask), 0, XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); } static void intel_cx0_setup_powerdown(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), XELPDP_POWER_STATE_READY_MASK, XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port), XELPDP_POWER_STATE_ACTIVE_MASK | XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | @@ -2807,7 +2858,7 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, bool lane_reversal) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); @@ -2820,48 +2871,51 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, XELPDP_LANE_PHY_CURRENT_STATUS(1)) : XELPDP_LANE_PHY_CURRENT_STATUS(0); - if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port), XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, lane_pipe_reset); - if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_current_status, lane_phy_current_status, XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_request(owned_lane_mask), intel_cx0_get_pclk_refclk_request(lane_mask)); - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_ack(owned_lane_mask), intel_cx0_get_pclk_refclk_ack(lane_mask), XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to request refclk after %dus.\n", phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_RESET); intel_cx0_setup_powerdown(encoder); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0); + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port), + if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_current_status, XELPDP_PORT_RESET_END_TIMEOUT)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dms.\n", phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); } -static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, - struct intel_encoder *encoder, int lane_count, +static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count, bool lane_reversal) { int i; @@ -2930,7 +2984,7 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask) static void intel_cx0pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; @@ -2962,15 +3016,15 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, /* 5. Program PHY internal PLL internal registers. */ if (intel_encoder_is_c10phy(encoder)) - intel_c10_pll_program(i915, crtc_state, encoder); + intel_c10_pll_program(display, crtc_state, encoder); else - intel_c20_pll_program(i915, crtc_state, encoder); + intel_c20_pll_program(display, crtc_state, encoder); /* * 6. Program the enabled and disabled owned PHY lane * transmitters over message bus */ - intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal); + intel_cx0_program_phy_lane(encoder, crtc_state->lane_count, lane_reversal); /* * 7. Follow the Display Voltage Frequency Switching - Sequence @@ -2981,23 +3035,23 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, * 8. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); /* * 9. Set PORT_CLOCK_CTL register PCLK PLL Request * LN<Lane for maxPCLK> to "1" to enable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_ack(maxpclk_lane), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n", + drm_warn(display->drm, "Port %c PLL not locked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); /* @@ -3011,15 +3065,16 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - u32 clock; - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); + struct intel_display *display = to_intel_display(encoder); + u32 clock, val; + + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); - drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); - drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); - drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK)); + drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); + drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); + drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_ACK)); switch (clock) { case XELPDP_DDI_CLOCK_SELECT_TBT_162: @@ -3036,7 +3091,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) } } -static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) +static int intel_mtl_tbt_clock_select(int clock) { switch (clock) { case 162000: @@ -3056,7 +3111,7 @@ static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); u32 val = 0; @@ -3064,13 +3119,13 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ - val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock)); + val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(crtc_state->port_clock)); val |= XELPDP_FORWARD_CLOCK_UNGATE; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); /* 2. Read back PORT_CLOCK_CTL REGISTER */ - val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); /* * 3. Follow the Display Voltage Frequency Switching - Sequence @@ -3081,14 +3136,15 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. */ val |= XELPDP_TBT_CLOCK_REQUEST; - intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val); + intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_ACK, XELPDP_TBT_CLOCK_ACK, 100, 0, NULL)) - drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", + drm_warn(display->drm, + "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3100,7 +3156,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 7. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); } @@ -3117,12 +3173,14 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder, static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (intel_encoder_is_c10phy(encoder)) return CX0_P2PG_STATE_DISABLE; - if (IS_BATTLEMAGE(i915) && encoder->port == PORT_A) + if ((IS_BATTLEMAGE(i915) && encoder->port == PORT_A) || + (DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP)) return CX0_P2PG_STATE_DISABLE; return CX0_P4PG_STATE_DISABLE; @@ -3130,7 +3188,7 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) static void intel_cx0pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -3147,21 +3205,22 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> * to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); /* 4. Program DDI_CLK_VALFREQ to 0. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n", + drm_warn(display->drm, + "Port %c PLL not unlocked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); /* @@ -3170,9 +3229,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) */ /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK, 0); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_FORWARD_CLOCK_UNGATE, 0); intel_cx0_phy_transaction_end(encoder, wakeref); @@ -3180,7 +3239,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); /* @@ -3191,13 +3250,14 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) - drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", + drm_warn(display->drm, + "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3208,12 +3268,12 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 5. Program PORT CLOCK CTRL register to disable and gate clocks */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, 0); /* 6. Program DDI_CLK_VALFREQ to 0. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); } void intel_mtl_pll_disable(struct intel_encoder *encoder) @@ -3230,13 +3290,15 @@ enum icl_port_dpll_id intel_mtl_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + u32 val, clock; + /* * TODO: Determine the PLL type from the SW state, once MTL PLL * handling is done via the standard shared DPLL framework. */ - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); - u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); + clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK) @@ -3250,28 +3312,28 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state, struct intel_encoder *encoder, struct intel_c10pll_state *mpllb_hw_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10; int i; for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { u8 expected = mpllb_sw_state->pll[i]; - I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, i, - expected, mpllb_hw_state->pll[i]); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->pll[i] != expected, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, i, + expected, mpllb_hw_state->pll[i]); } - I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->tx, mpllb_hw_state->tx); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->tx != mpllb_sw_state->tx, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->tx, mpllb_hw_state->tx); - I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->cmn, mpllb_hw_state->cmn); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->cmn != mpllb_sw_state->cmn, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->cmn, mpllb_hw_state->cmn); } void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, @@ -3357,64 +3419,64 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state, struct intel_encoder *encoder, struct intel_c20pll_state *mpll_hw_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20; bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state); bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state); int clock = intel_c20pll_calc_port_clock(encoder, mpll_sw_state); int i; - I915_STATE_WARN(i915, mpll_hw_state->clock != clock, - "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", - crtc->base.base.id, crtc->base.name, - mpll_sw_state->clock, mpll_hw_state->clock); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->clock != clock, + "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + mpll_sw_state->clock, mpll_hw_state->clock); - I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb, - "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", - crtc->base.base.id, crtc->base.name, - sw_use_mpllb, hw_use_mpllb); + INTEL_DISPLAY_STATE_WARN(display, sw_use_mpllb != hw_use_mpllb, + "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + sw_use_mpllb, hw_use_mpllb); if (hw_use_mpllb) { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { - I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], - "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], + "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { - I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], - "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], + "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); } } for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) { - I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], - "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->tx[i], mpll_hw_state->tx[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], + "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->tx[i], mpll_hw_state->tx[i]); } for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) { - I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], - "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], + "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); } } void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; struct intel_cx0pll_state mpll_hw_state = {}; - if (DISPLAY_VER(i915) < 14) + if (DISPLAY_VER(display) < 14) return; if (!new_crtc_state->hw.active) diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index 9004b99bb51f..711168882684 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -7,17 +7,15 @@ #define __INTEL_CX0_PHY_H__ #include <linux/types.h> -#include <linux/bitfield.h> -#include <linux/bits.h> enum icl_port_dpll_id; -struct drm_i915_private; struct intel_atomic_state; struct intel_c10pll_state; struct intel_c20pll_state; -struct intel_cx0pll_state; struct intel_crtc; struct intel_crtc_state; +struct intel_cx0pll_state; +struct intel_display; struct intel_encoder; struct intel_hdmi; @@ -35,7 +33,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, const struct intel_cx0pll_state *pll_state); -void intel_cx0pll_dump_hw_state(struct drm_i915_private *dev_priv, +void intel_cx0pll_dump_hw_state(struct intel_display *display, const struct intel_cx0pll_state *hw_state); void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h index ab3ae110b68f..f0e5c196eae4 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h @@ -273,13 +273,15 @@ #define _XE2HPD_C20_A_MPLLB_CFG 0xCCC2 #define _XE2HPD_C20_B_MPLLB_CFG 0xCCB6 -#define _IS_XE2HPD_C20(i915) (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) +#define _IS_XE2HPD_C20(i915) (DISPLAY_VERx100(i915) == 1401) #define PHY_C20_A_TX_CNTX_CFG(i915, idx) \ ((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_TX_CNTX_CFG : _MTL_C20_A_TX_CNTX_CFG) - (idx)) #define PHY_C20_B_TX_CNTX_CFG(i915, idx) \ ((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_B_TX_CNTX_CFG : _MTL_C20_B_TX_CNTX_CFG) - (idx)) #define C20_PHY_TX_RATE REG_GENMASK(2, 0) +#define C20_PHY_TX_MISC_MASK REG_GENMASK16(7, 0) +#define C20_PHY_TX_MISC(val) REG_FIELD_PREP16(C20_PHY_TX_MISC_MASK, (val)) #define PHY_C20_A_CMN_CNTX_CFG(i915, idx) \ ((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_CMN_CNTX_CFG : _MTL_C20_A_CMN_CNTX_CFG) - (idx)) @@ -363,4 +365,7 @@ #define HDMI_DIV_MASK REG_GENMASK16(2, 0) #define HDMI_DIV(val) REG_FIELD_PREP16(HDMI_DIV_MASK, val) +#define PICA_PHY_CONFIG_CONTROL _MMIO(0x16FE68) +#define EDP_ON_TYPEC REG_BIT(31) + #endif /* __INTEL_CX0_REG_DEFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index b1c294236cc8..49b5cc01ce40 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -54,6 +54,7 @@ #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" @@ -2235,7 +2236,7 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, enable ? DP_FEC_READY : 0) <= 0) drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n", - enable ? "enabled" : "disabled"); + str_enabled_disabled(enable)); if (enable && drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS, @@ -2255,9 +2256,9 @@ static int read_fec_detected_status(struct drm_dp_aux *aux) return status; } -static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) +static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) { - struct drm_i915_private *i915 = to_i915(aux->drm_dev); + struct intel_display *display = to_intel_display(aux->drm_dev); int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED; int status; int err; @@ -2266,57 +2267,92 @@ static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) status & mask || status < 0, 10000, 200000); - if (!err && status >= 0) - return; + if (err || status < 0) { + drm_dbg_kms(display->drm, + "Failed waiting for FEC %s to get detected: %d (status %d)\n", + str_enabled_disabled(enabled), err, status); + return err ? err : status; + } - if (err == -ETIMEDOUT) - drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n", - str_enabled_disabled(enabled)); - else - drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status); + return 0; } -void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - bool enabled) +int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool enabled) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int ret; if (!crtc_state->fec_enable) - return; + return 0; if (enabled) - ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state), + ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_FEC_ENABLE_LIVE, 1); else - ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state), + ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_FEC_ENABLE_LIVE, 1); - if (ret) - drm_err(&i915->drm, + if (ret) { + drm_err(display->drm, "Timeout waiting for FEC live state to get %s\n", str_enabled_disabled(enabled)); - + return ret; + } /* * At least the Synoptics MST hub doesn't set the detected flag for * FEC decoding disabling so skip waiting for that. */ - if (enabled) - wait_for_fec_detected(&intel_dp->aux, enabled); + if (enabled) { + ret = wait_for_fec_detected(&intel_dp->aux, enabled); + if (ret) + return ret; + } + + return 0; } static void intel_ddi_enable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + int i; + int ret; if (!crtc_state->fec_enable) return; - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), 0, DP_TP_CTL_FEC_ENABLE); + + if (DISPLAY_VER(display) < 30) + return; + + ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, true); + if (!ret) + return; + + for (i = 0; i < 3; i++) { + drm_dbg_kms(display->drm, "Retry FEC enabling\n"); + + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_FEC_ENABLE, 0); + + ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, false); + if (ret) + continue; + + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + 0, DP_TP_CTL_FEC_ENABLE); + + ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, true); + if (!ret) + return; + } + + drm_err(display->drm, "Failed to enable FEC after retries\n"); } static void intel_ddi_disable_fec(struct intel_encoder *encoder, @@ -3115,11 +3151,12 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *pipe_crtc; + int i; - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -3130,8 +3167,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, intel_ddi_disable_transcoder_func(old_crtc_state); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -3382,8 +3418,9 @@ static void intel_enable_ddi(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *pipe_crtc; + int i; intel_ddi_enable_transcoder_func(encoder, crtc_state); @@ -3394,8 +3431,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, intel_ddi_wait_for_fec_status(encoder, crtc_state, true); - for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -3477,6 +3513,13 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, drm_connector_update_privacy_screen(conn_state); } +static void intel_ddi_update_pipe_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + intel_hdmi_fastset_infoframes(encoder, crtc_state, conn_state); +} + void intel_ddi_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -3488,6 +3531,10 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state, intel_ddi_update_pipe_dp(state, encoder, crtc_state, conn_state); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + intel_ddi_update_pipe_hdmi(encoder, crtc_state, + conn_state); + intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state); } @@ -4391,6 +4438,7 @@ static void intel_ddi_encoder_reset(struct drm_encoder *encoder) struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); intel_dp->reset_link_params = true; + intel_dp_invalidate_source_oui(intel_dp); intel_pps_encoder_reset(intel_dp); @@ -4550,12 +4598,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder, enum intel_hotplug_state state; int ret; - if (intel_dp->compliance.test_active && - intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { - intel_dp_phy_test(encoder); - /* just do the PHY test and nothing else */ + if (intel_dp_test_phy(intel_dp)) return INTEL_HOTPLUG_UNCHANGED; - } state = intel_encoder_hotplug(encoder, connector); @@ -4888,7 +4932,7 @@ void intel_ddi_init(struct intel_display *display, if (!assert_has_icl_dsi(dev_priv)) return; - icl_dsi_init(dev_priv, devdata); + icl_dsi_init(display, devdata); return; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 6d85422bdefe..640851d46b1b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -63,9 +63,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); -void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - bool enabled); +int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool enabled); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index e881bfeafb47..bb51f974e9e2 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "intel_dsb.h" #include "intel_uncore.h" static inline struct intel_uncore *__to_uncore(struct intel_display *display) @@ -31,7 +32,7 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg) #define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__) static inline u8 -__intel_de_read8(struct intel_display *display, i915_reg_t reg) +intel_de_read8(struct intel_display *display, i915_reg_t reg) { u8 val; @@ -43,11 +44,10 @@ __intel_de_read8(struct intel_display *display, i915_reg_t reg) return val; } -#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__) static inline u64 -__intel_de_read64_2x32(struct intel_display *display, - i915_reg_t lower_reg, i915_reg_t upper_reg) +intel_de_read64_2x32(struct intel_display *display, + i915_reg_t lower_reg, i915_reg_t upper_reg) { u64 val; @@ -62,7 +62,6 @@ __intel_de_read64_2x32(struct intel_display *display, return val; } -#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__) static inline void __intel_de_posting_read(struct intel_display *display, i915_reg_t reg) @@ -87,12 +86,11 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val) #define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__) static inline u32 -____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg, - u32 clear, u32 set) +__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg, + u32 clear, u32 set) { return intel_uncore_rmw(__to_uncore(display), reg, clear, set); } -#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__) static inline u32 __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, @@ -111,18 +109,17 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, #define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__) static inline int -____intel_de_wait_for_register_nowl(struct intel_display *display, - i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +__intel_de_wait_for_register_nowl(struct intel_display *display, + i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { return intel_wait_for_register(__to_uncore(display), reg, mask, value, timeout); } -#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +intel_de_wait(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { int ret; @@ -135,11 +132,10 @@ __intel_de_wait(struct intel_display *display, i915_reg_t reg, return ret; } -#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { int ret; @@ -152,13 +148,12 @@ __intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, return ret; } -#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, - unsigned int fast_timeout_us, - unsigned int slow_timeout_ms, u32 *out_value) +intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, u32 *out_value) { int ret; @@ -172,7 +167,6 @@ __intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, return ret; } -#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__) static inline int __intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg, @@ -219,18 +213,25 @@ __intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val) #define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__) static inline u32 -__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) +intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) { return intel_uncore_read_notrace(__to_uncore(display), reg); } -#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__) static inline void -__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, - u32 val) +intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, u32 val) { intel_uncore_write_notrace(__to_uncore(display), reg, val); } -#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__) + +static __always_inline void +intel_de_write_dsb(struct intel_display *display, struct intel_dsb *dsb, + i915_reg_t reg, u32 val) +{ + if (dsb) + intel_dsb_reg_write(dsb, reg, val); + else + intel_de_write_fw(display, reg, val); +} #endif /* __INTEL_DE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b4ef4d59da1a..863927f429aa 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -43,9 +43,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> - -#include "gem/i915_gem_lmem.h" -#include "gem/i915_gem_object.h" +#include <drm/drm_vblank.h> #include "g4x_dp.h" #include "g4x_hdmi.h" @@ -60,6 +58,7 @@ #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" +#include "intel_bo.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" @@ -135,7 +134,8 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); -static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); +static void bdw_set_pipe_misc(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) @@ -253,6 +253,108 @@ static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) return ffs(crtc_state->joiner_pipes) - 1; } +/* + * The following helper functions, despite being named for bigjoiner, + * are applicable to both bigjoiner and uncompressed joiner configurations. + */ +static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) +{ + return hweight8(crtc_state->joiner_pipes) >= 2; +} + +static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!is_bigjoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); +} + +static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!is_bigjoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); +} + +bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!is_bigjoiner(crtc_state)) + return false; + + return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); +} + +bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!is_bigjoiner(crtc_state)) + return false; + + return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); +} + +u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!is_bigjoiner(crtc_state)) + return BIT(crtc->pipe); + + return bigjoiner_primary_pipes(crtc_state); +} + +u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) +{ + return bigjoiner_secondary_pipes(crtc_state); +} + +bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) +{ + return intel_crtc_num_joined_pipes(crtc_state) >= 4; +} + +static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!intel_crtc_is_ultrajoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); +} + +bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return intel_crtc_is_ultrajoiner(crtc_state) && + BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); +} + +/* + * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or + * any other logic, so lets just add helper function to + * at least hide this hassle.. + */ +static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!intel_crtc_is_ultrajoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); +} + +bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return intel_crtc_is_ultrajoiner(crtc_state) && + BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); +} + u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) { if (crtc_state->joiner_pipes) @@ -277,9 +379,9 @@ bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) crtc->pipe == joiner_primary_pipe(crtc_state); } -static int intel_joiner_num_pipes(const struct intel_crtc_state *crtc_state) +int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) { - return hweight8(crtc_state->joiner_pipes); + return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); } u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) @@ -291,10 +393,10 @@ u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (intel_crtc_is_joiner_secondary(crtc_state)) - return intel_crtc_for_pipe(i915, joiner_primary_pipe(crtc_state)); + return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); else return to_intel_crtc(crtc_state->uapi.crtc); } @@ -320,6 +422,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) void assert_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; @@ -340,24 +443,24 @@ void assert_transcoder(struct drm_i915_private *dev_priv, cur_state = false; } - I915_STATE_WARN(dev_priv, cur_state != state, - "transcoder %s assertion failure (expected %s, current %s)\n", - transcoder_name(cpu_transcoder), str_on_off(state), - str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "transcoder %s assertion failure (expected %s, current %s)\n", + transcoder_name(cpu_transcoder), str_on_off(state), + str_on_off(cur_state)); } static void assert_plane(struct intel_plane *plane, bool state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe; bool cur_state; cur_state = plane->get_hw_state(plane, &pipe); - I915_STATE_WARN(i915, cur_state != state, - "%s assertion failure (expected %s, current %s)\n", - plane->base.name, str_on_off(state), - str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "%s assertion failure (expected %s, current %s)\n", + plane->base.name, str_on_off(state), + str_on_off(cur_state)); } #define assert_plane_enabled(p) assert_plane(p, true) @@ -372,7 +475,7 @@ static void assert_planes_disabled(struct intel_crtc *crtc) assert_plane_disabled(plane); } -void vlv_wait_port_ready(struct drm_i915_private *dev_priv, +void vlv_wait_port_ready(struct intel_display *display, struct intel_digital_port *dig_port, unsigned int expected_mask) { @@ -385,11 +488,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, fallthrough; case PORT_B: port_mask = DPLL_PORTB_READY_MASK; - dpll_reg = DPLL(dev_priv, 0); + dpll_reg = DPLL(display, 0); break; case PORT_C: port_mask = DPLL_PORTC_READY_MASK; - dpll_reg = DPLL(dev_priv, 0); + dpll_reg = DPLL(display, 0); expected_mask <<= 4; break; case PORT_D: @@ -398,11 +501,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, break; } - if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) - drm_WARN(&dev_priv->drm, 1, + if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000)) + drm_WARN(display->drm, 1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", dig_port->base.base.base.id, dig_port->base.base.name, - intel_de_read(dev_priv, dpll_reg) & port_mask, + intel_de_read(display, dpll_reg) & port_mask, expected_mask); } @@ -715,7 +818,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - intel_plane_disable_arm(plane, crtc_state); + intel_plane_disable_arm(NULL, plane, crtc_state); intel_crtc_wait_for_next_vblank(crtc); } @@ -759,7 +862,7 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) */ if (IS_DG2(dev_priv)) tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; - else if (DISPLAY_VER(dev_priv) >= 13) + else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30)) tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; /* Wa_14010547955:dg2 */ @@ -1116,6 +1219,22 @@ static void intel_post_plane_update(struct intel_atomic_state *state, intel_encoders_audio_enable(state, crtc); } +static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ + hsw_ips_post_update(state, crtc); + + /* + * Activate DRRS after state readout to avoid + * dp_m_n vs. dp_m2_n2 confusion on BDW+. + */ + intel_drrs_activate(new_crtc_state); +} + static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -1172,8 +1291,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, * Apart from the async flip bit we want to * preserve the old state for the plane. */ - intel_plane_async_flip(plane, old_crtc_state, - old_plane_state, false); + intel_plane_async_flip(NULL, plane, + old_crtc_state, old_plane_state, false); need_vbl_wait = true; } } @@ -1249,8 +1368,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * * WaCxSRDisabledForSpriteScaling:ivb */ - if (old_crtc_state->hw.active && - new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) + if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active && + new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv)) intel_crtc_wait_for_next_vblank(crtc); /* @@ -1315,7 +1434,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(NULL, plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; @@ -1502,14 +1621,6 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state, } } -static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - - plane->disable_arm(plane, crtc_state); -} - static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1575,11 +1686,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); @@ -1677,23 +1784,22 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta static void hsw_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; struct intel_crtc *pipe_crtc; + int i; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) - intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe); + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) + intel_dmc_enable_pipe(display, pipe_crtc->pipe); intel_encoders_pre_pll_enable(state, crtc); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -1703,27 +1809,25 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_enable(state, crtc); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); intel_dsc_enable(pipe_crtc_state); - if (DISPLAY_VER(dev_priv) >= 13) + if (HAS_UNCOMPRESSED_JOINER(dev_priv)) intel_uncompressed_joiner_enable(pipe_crtc_state); intel_set_pipe_src_size(pipe_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(pipe_crtc_state); + bdw_set_pipe_misc(NULL, pipe_crtc_state); } if (!transcoder_is_dsi(cpu_transcoder)) hsw_configure_cpu_transcoder(new_crtc_state); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -1741,12 +1845,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(pipe_crtc_state); - intel_color_commit_noarm(pipe_crtc_state); - intel_color_commit_arm(pipe_crtc_state); - /* update DSPCNTR to configure gamma/csc for pipe bottom color */ - if (DISPLAY_VER(dev_priv) < 9) - intel_disable_primary_plane(pipe_crtc_state); + intel_color_modeset(pipe_crtc_state); hsw_set_linetime_wm(pipe_crtc_state); @@ -1758,8 +1857,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_enable(state, crtc); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); enum pipe hsw_workaround_pipe; @@ -1776,7 +1874,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { struct intel_crtc *wa_crtc = - intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); + intel_crtc_for_pipe(display, hsw_workaround_pipe); intel_crtc_wait_for_next_vblank(wa_crtc); intel_crtc_wait_for_next_vblank(wa_crtc); @@ -1841,10 +1939,11 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, static void hsw_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc *pipe_crtc; + int i; /* * FIXME collapse everything to one hook. @@ -1853,8 +1952,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_encoders_disable(state, crtc); intel_encoders_post_disable(state, crtc); - for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -1863,9 +1961,8 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_encoders_post_pll_disable(state, crtc); - for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) - intel_dmc_disable_pipe(i915, pipe_crtc->pipe); + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) + intel_dmc_disable_pipe(display, pipe_crtc->pipe); } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -2147,11 +2244,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); @@ -2187,11 +2280,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); if (!intel_initial_watermarks(state, crtc)) intel_update_watermarks(dev_priv); @@ -2224,9 +2313,10 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) static void i9xx_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* @@ -2265,7 +2355,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, /* clock the pipe down to 640x480@60 to potentially save power */ if (IS_I830(dev_priv)) - i830_enable_pipe(dev_priv, pipe); + i830_enable_pipe(display, pipe); } void intel_encoder_destroy(struct drm_encoder *encoder) @@ -2343,9 +2433,9 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, struct drm_display_mode *mode) { - int num_pipes = intel_joiner_num_pipes(crtc_state); + int num_pipes = intel_crtc_num_joined_pipes(crtc_state); - if (num_pipes < 2) + if (num_pipes == 1) return; mode->crtc_clock /= num_pipes; @@ -2407,7 +2497,7 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state drm_mode_copy(mode, pipe_mode); intel_mode_from_crtc_timings(mode, mode); mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * - (intel_joiner_num_pipes(crtc_state) ?: 1); + intel_crtc_num_joined_pipes(crtc_state); mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); /* Derive per-pipe timings in case joiner is used */ @@ -2427,10 +2517,10 @@ void intel_encoder_get_config(struct intel_encoder *encoder, static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) { - int num_pipes = intel_joiner_num_pipes(crtc_state); + int num_pipes = intel_crtc_num_joined_pipes(crtc_state); int width, height; - if (num_pipes < 2) + if (num_pipes == 1) return; width = drm_rect_width(&crtc_state->pipe_src); @@ -2520,13 +2610,29 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) return 0; } +static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + return intel_vrr_possible(crtc_state) && crtc_state->has_psr && + adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay && + IS_DISPLAY_VER(display, 13, 14); +} + static int intel_crtc_compute_config(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; int ret; + /* Wa_14015401596 */ + if (intel_crtc_needs_wa_14015401596(crtc_state)) + adjusted_mode->crtc_vblank_start += 1; + ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; @@ -2887,11 +2993,11 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc, static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - int num_pipes = intel_joiner_num_pipes(crtc_state); + int num_pipes = intel_crtc_num_joined_pipes(crtc_state); enum pipe primary_pipe, pipe = crtc->pipe; int width; - if (num_pipes < 2) + if (num_pipes == 1) return; primary_pipe = joiner_primary_pipe(crtc_state); @@ -3246,9 +3352,11 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); } -static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) +static void bdw_set_pipe_misc(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_display *display = to_intel_display(crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = 0; @@ -3293,7 +3401,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) if (IS_BROADWELL(dev_priv)) val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; - intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); + intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); } int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) @@ -3534,23 +3642,57 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, return tmp & TRANS_DDI_FUNC_ENABLE; } -static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, - u8 *primary_pipes, u8 *secondary_pipes) +static void enabled_uncompressed_joiner_pipes(struct intel_display *display, + u8 *primary_pipes, u8 *secondary_pipes) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_crtc *crtc; + + *primary_pipes = 0; + *secondary_pipes = 0; + + if (!HAS_UNCOMPRESSED_JOINER(display)) + return; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, + joiner_pipes(i915)) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + power_domain = POWER_DOMAIN_PIPE(pipe); + with_intel_display_power_if_enabled(i915, power_domain, wakeref) { + u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); + + if (tmp & UNCOMPRESSED_JOINER_PRIMARY) + *primary_pipes |= BIT(pipe); + if (tmp & UNCOMPRESSED_JOINER_SECONDARY) + *secondary_pipes |= BIT(pipe); + } + } +} + +static void enabled_bigjoiner_pipes(struct intel_display *display, + u8 *primary_pipes, u8 *secondary_pipes) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_crtc *crtc; *primary_pipes = 0; *secondary_pipes = 0; - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, - joiner_pipes(dev_priv)) { + if (!HAS_BIGJOINER(display)) + return; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, + joiner_pipes(i915)) { enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; - power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); - with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { - u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); + with_intel_display_power_if_enabled(i915, power_domain, wakeref) { + u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); if (!(tmp & BIG_JOINER_ENABLE)) continue; @@ -3560,56 +3702,197 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, else *secondary_pipes |= BIT(pipe); } + } +} - if (DISPLAY_VER(dev_priv) < 13) - continue; +static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) +{ + u8 secondary_pipes = 0; - power_domain = POWER_DOMAIN_PIPE(pipe); - with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { - u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + for (int i = 1; i < num_pipes; i++) + secondary_pipes |= primary_pipes << i; - if (tmp & UNCOMPRESSED_JOINER_PRIMARY) - *primary_pipes |= BIT(pipe); - if (tmp & UNCOMPRESSED_JOINER_SECONDARY) - *secondary_pipes |= BIT(pipe); - } - } + return secondary_pipes; +} - /* Joiner pipes should always be consecutive primary and secondary */ - drm_WARN(&dev_priv->drm, *secondary_pipes != *primary_pipes << 1, - "Joiner misconfigured (primary pipes 0x%x, secondary pipes 0x%x)\n", - *primary_pipes, *secondary_pipes); +static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) +{ + return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); } -static enum pipe get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes) +static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) { - if ((secondary_pipes & BIT(pipe)) == 0) - return pipe; + return expected_secondary_pipes(bigjoiner_primary_pipes, 2); +} - /* ignore everything above our pipe */ - primary_pipes &= ~GENMASK(7, pipe); +static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) +{ + primary_pipes &= GENMASK(pipe, 0); - /* highest remaining bit should be our primary pipe */ - return fls(primary_pipes) - 1; + return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; } -static u8 get_joiner_secondary_pipes(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes) +static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) { - enum pipe primary_pipe, next_primary_pipe; + return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); +} - primary_pipe = get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes); +static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, + u8 ultrajoiner_secondary_pipes) +{ + return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; +} - if ((primary_pipes & BIT(primary_pipe)) == 0) - return 0; +static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915, + u8 *primary_pipes, u8 *secondary_pipes) +{ + struct intel_crtc *crtc; + + *primary_pipes = 0; + *secondary_pipes = 0; + + if (!HAS_ULTRAJOINER(i915)) + return; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, + joiner_pipes(i915)) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); + with_intel_display_power_if_enabled(i915, power_domain, wakeref) { + u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); + + if (!(tmp & ULTRA_JOINER_ENABLE)) + continue; + + if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) + *primary_pipes |= BIT(pipe); + else + *secondary_pipes |= BIT(pipe); + } + } +} + +static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, + enum pipe pipe, + u8 *primary_pipe, u8 *secondary_pipes) +{ + struct intel_display *display = to_intel_display(&dev_priv->drm); + u8 primary_ultrajoiner_pipes; + u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; + u8 secondary_ultrajoiner_pipes; + u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; + u8 ultrajoiner_pipes; + u8 uncompressed_joiner_pipes, bigjoiner_pipes; + + enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes, + &secondary_ultrajoiner_pipes); + /* + * For some strange reason the last pipe in the set of four + * shouldn't have ultrajoiner enable bit set in hardware. + * Set the bit anyway to make life easier. + */ + drm_WARN_ON(&dev_priv->drm, + expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != + secondary_ultrajoiner_pipes); + secondary_ultrajoiner_pipes = + fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, + secondary_ultrajoiner_pipes); + + drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); + + enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, + &secondary_uncompressed_joiner_pipes); + + drm_WARN_ON(display->drm, + (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); + + enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, + &secondary_bigjoiner_pipes); + + drm_WARN_ON(display->drm, + (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); + + ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; + uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | + secondary_uncompressed_joiner_pipes; + bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; + + drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, + "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", + ultrajoiner_pipes, bigjoiner_pipes); + + drm_WARN(display->drm, secondary_ultrajoiner_pipes != + expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), + "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", + expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), + secondary_ultrajoiner_pipes); + + drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, + "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", + uncompressed_joiner_pipes, bigjoiner_pipes); + + drm_WARN(display->drm, secondary_bigjoiner_pipes != + expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), + "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", + expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), + secondary_bigjoiner_pipes); + + drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != + expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), + "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", + expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), + secondary_uncompressed_joiner_pipes); + + *primary_pipe = 0; + *secondary_pipes = 0; + + if (ultrajoiner_pipes & BIT(pipe)) { + *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); + *secondary_pipes = secondary_ultrajoiner_pipes & + expected_ultrajoiner_secondary_pipes(*primary_pipe); + + drm_WARN(display->drm, + expected_ultrajoiner_secondary_pipes(*primary_pipe) != + *secondary_pipes, + "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", + *primary_pipe, + expected_ultrajoiner_secondary_pipes(*primary_pipe), + *secondary_pipes); + return; + } + + if (uncompressed_joiner_pipes & BIT(pipe)) { + *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); + *secondary_pipes = secondary_uncompressed_joiner_pipes & + expected_uncompressed_joiner_secondary_pipes(*primary_pipe); + + drm_WARN(display->drm, + expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != + *secondary_pipes, + "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", + *primary_pipe, + expected_uncompressed_joiner_secondary_pipes(*primary_pipe), + *secondary_pipes); + return; + } - /* ignore our primary pipe and everything below it */ - primary_pipes &= ~GENMASK(primary_pipe, 0); - /* make sure a high bit is set for the ffs() */ - primary_pipes |= BIT(7); - /* lowest remaining bit should be the next primary pipe */ - next_primary_pipe = ffs(primary_pipes) - 1; + if (bigjoiner_pipes & BIT(pipe)) { + *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); + *secondary_pipes = secondary_bigjoiner_pipes & + expected_bigjoiner_secondary_pipes(*primary_pipe); - return secondary_pipes & GENMASK(next_primary_pipe - 1, primary_pipe); + drm_WARN(display->drm, + expected_bigjoiner_secondary_pipes(*primary_pipe) != + *secondary_pipes, + "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", + *primary_pipe, + expected_bigjoiner_secondary_pipes(*primary_pipe), + *secondary_pipes); + return; + } } static u8 hsw_panel_transcoders(struct drm_i915_private *i915) @@ -3628,7 +3911,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); enum transcoder cpu_transcoder; - u8 primary_pipes, secondary_pipes; + u8 primary_pipe, secondary_pipes; u8 enabled_transcoders = 0; /* @@ -3681,10 +3964,9 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) enabled_transcoders |= BIT(cpu_transcoder); /* joiner secondary -> consider the primary pipe's transcoder as well */ - enabled_joiner_pipes(dev_priv, &primary_pipes, &secondary_pipes); + enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes); if (secondary_pipes & BIT(crtc->pipe)) { - cpu_transcoder = (enum transcoder) - get_joiner_primary_pipe(crtc->pipe, primary_pipes, secondary_pipes); + cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); } @@ -3815,17 +4097,15 @@ static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - u8 primary_pipes, secondary_pipes; + u8 primary_pipe, secondary_pipes; enum pipe pipe = crtc->pipe; - enabled_joiner_pipes(i915, &primary_pipes, &secondary_pipes); + enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes); - if (((primary_pipes | secondary_pipes) & BIT(pipe)) == 0) + if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) return; - crtc_state->joiner_pipes = - BIT(get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes)) | - get_joiner_secondary_pipes(pipe, primary_pipes, secondary_pipes); + crtc_state->joiner_pipes = primary_pipe | secondary_pipes; } static bool hsw_get_pipe_config(struct intel_crtc *crtc, @@ -3986,7 +4266,7 @@ int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc_state *crtc_state; struct drm_display_mode *mode; struct intel_crtc *crtc; @@ -3995,7 +4275,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) if (!encoder->get_hw_state(encoder, &pipe)) return NULL; - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -4285,22 +4565,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (ret) return ret; - ret = intel_compute_pipe_wm(state, crtc); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "Target pipe watermarks are invalid\n"); - return ret; - } - - /* - * Calculate 'intermediate' watermarks that satisfy both the - * old state and the new state. We can program these - * immediately. - */ - ret = intel_compute_intermediate_wm(state, crtc); + ret = intel_wm_compute(state, crtc); if (ret) { drm_dbg_kms(&dev_priv->drm, - "No valid intermediate pipe watermarks are possible\n"); + "[CRTC:%d:%s] watermarks are invalid\n", + crtc->base.base.id, crtc->base.name); return ret; } @@ -4798,6 +5067,8 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state, struct drm_connector *connector; int i; + intel_vrr_compute_config_late(crtc_state); + for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_encoder *encoder = @@ -5035,15 +5306,15 @@ pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset, const struct intel_cx0pll_state *a, const struct intel_cx0pll_state *b) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); char *chipname = a->use_c10 ? "C10" : "C20"; pipe_config_mismatch(p, fastset, crtc, name, chipname); drm_printf(p, "expected:\n"); - intel_cx0pll_dump_hw_state(i915, a); + intel_cx0pll_dump_hw_state(display, a); drm_printf(p, "found:\n"); - intel_cx0pll_dump_hw_state(i915, b); + intel_cx0pll_dump_hw_state(display, b); } bool @@ -5431,7 +5702,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_INFOFRAME(avi); PIPE_CONF_CHECK_INFOFRAME(spd); PIPE_CONF_CHECK_INFOFRAME(hdmi); - PIPE_CONF_CHECK_INFOFRAME(drm); + if (!fastset) + PIPE_CONF_CHECK_INFOFRAME(drm); PIPE_CONF_CHECK_DP_VSC_SDP(vsc); PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); @@ -6732,17 +7004,12 @@ int intel_atomic_check(struct drm_device *dev, static int intel_atomic_prepare_commit(struct intel_atomic_state *state) { - struct intel_crtc_state __maybe_unused *crtc_state; - struct intel_crtc *crtc; - int i, ret; + int ret; ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); if (ret < 0) return ret; - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - intel_color_prepare_commit(state, crtc); - return 0; } @@ -6823,12 +7090,12 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state, * During modesets pipe configuration was programmed as the * CRTC was enabled. */ - if (!modeset) { + if (!modeset && !new_crtc_state->use_dsb) { if (intel_crtc_needs_color_update(new_crtc_state)) - intel_color_commit_arm(new_crtc_state); + intel_color_commit_arm(NULL, new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(new_crtc_state); + bdw_set_pipe_misc(NULL, new_crtc_state); if (intel_crtc_needs_fastset(new_crtc_state)) intel_pipe_fastset(old_crtc_state, new_crtc_state); @@ -6925,10 +7192,12 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state, drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); if (!modeset && - intel_crtc_needs_color_update(new_crtc_state)) - intel_color_commit_noarm(new_crtc_state); + intel_crtc_needs_color_update(new_crtc_state) && + !new_crtc_state->use_dsb) + intel_color_commit_noarm(NULL, new_crtc_state); - intel_crtc_planes_update_noarm(state, crtc); + if (!new_crtc_state->use_dsb) + intel_crtc_planes_update_noarm(NULL, state, crtc); } static void intel_update_crtc(struct intel_atomic_state *state, @@ -6939,16 +7208,25 @@ static void intel_update_crtc(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - /* Perform vblank evasion around commit operation */ - intel_pipe_update_start(state, crtc); + if (new_crtc_state->use_dsb) { + intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); - commit_pipe_pre_planes(state, crtc); + intel_dsb_commit(new_crtc_state->dsb_commit, false); + } else { + /* Perform vblank evasion around commit operation */ + intel_pipe_update_start(state, crtc); - intel_crtc_planes_update_arm(state, crtc); + if (new_crtc_state->dsb_commit) + intel_dsb_commit(new_crtc_state->dsb_commit, false); - commit_pipe_post_planes(state, crtc); + commit_pipe_pre_planes(state, crtc); - intel_pipe_update_end(state, crtc); + intel_crtc_planes_update_arm(NULL, state, crtc); + + commit_pipe_post_planes(state, crtc); + + intel_pipe_update_end(state, crtc); + } /* * VRR/Seamless M/N update may need to update frame timings. @@ -7273,6 +7551,24 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat } } +static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->dsb_commit) + intel_dsb_wait(crtc_state->dsb_commit); + + intel_color_wait_commit(crtc_state); +} + +static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->dsb_commit) { + intel_dsb_cleanup(crtc_state->dsb_commit); + crtc_state->dsb_commit = NULL; + } + + intel_color_cleanup_commit(crtc_state); +} + static void intel_atomic_cleanup_work(struct work_struct *work) { struct intel_atomic_state *state = @@ -7283,7 +7579,7 @@ static void intel_atomic_cleanup_work(struct work_struct *work) int i; for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) - intel_color_cleanup_commit(old_crtc_state); + intel_atomic_dsb_cleanup(old_crtc_state); drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); drm_atomic_helper_commit_cleanup_done(&state->base); @@ -7324,15 +7620,93 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s * caller made sure that the object is synced wrt. the related color clear value * GPU write on it. */ - ret = i915_gem_object_read_from_page(intel_fb_obj(fb), - fb->offsets[cc_plane] + 16, - &plane_state->ccval, - sizeof(plane_state->ccval)); + ret = intel_bo_read_from_page(intel_fb_bo(fb), + fb->offsets[cc_plane] + 16, + &plane_state->ccval, + sizeof(plane_state->ccval)); /* The above could only fail if the FB obj has an unexpected backing store type. */ drm_WARN_ON(&i915->drm, ret); } } +static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + intel_color_prepare_commit(state, crtc); +} + +static void intel_atomic_dsb_finish(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!new_crtc_state->hw.active) + return; + + if (state->base.legacy_cursor_update) + return; + + /* FIXME deal with everything */ + new_crtc_state->use_dsb = + new_crtc_state->update_planes && + !new_crtc_state->vrr.enable && + !new_crtc_state->do_async_flip && + !new_crtc_state->has_psr && + !new_crtc_state->scaler_state.scaler_users && + !old_crtc_state->scaler_state.scaler_users && + !intel_crtc_needs_modeset(new_crtc_state) && + !intel_crtc_needs_fastset(new_crtc_state); + + if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank) + return; + + /* + * Rough estimate: + * ~64 registers per each plane * 8 planes = 512 + * Double that for pipe stuff and other overhead. + */ + new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, + new_crtc_state->use_dsb ? 1024 : 16); + if (!new_crtc_state->dsb_commit) { + new_crtc_state->use_dsb = false; + intel_color_cleanup_commit(new_crtc_state); + return; + } + + if (new_crtc_state->use_dsb) { + if (intel_crtc_needs_color_update(new_crtc_state)) + intel_color_commit_noarm(new_crtc_state->dsb_commit, + new_crtc_state); + intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, + state, crtc); + + intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); + + if (intel_crtc_needs_color_update(new_crtc_state)) + intel_color_commit_arm(new_crtc_state->dsb_commit, + new_crtc_state); + bdw_set_pipe_misc(new_crtc_state->dsb_commit, + new_crtc_state); + intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, + state, crtc); + + if (!new_crtc_state->dsb_color_vblank) { + intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); + intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit); + intel_dsb_interrupt(new_crtc_state->dsb_commit); + } + } + + if (new_crtc_state->dsb_color_vblank) + intel_dsb_chain(state, new_crtc_state->dsb_commit, + new_crtc_state->dsb_color_vblank, true); + + intel_dsb_finish(new_crtc_state->dsb_commit); +} + static void intel_atomic_commit_tail(struct intel_atomic_state *state) { struct drm_device *dev = state->base.dev; @@ -7340,13 +7714,21 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; int i; + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + intel_atomic_dsb_prepare(state, crtc); + intel_atomic_commit_fence_wait(state); intel_td_flush(dev_priv); + intel_atomic_prepare_plane_clear_colors(state); + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + intel_atomic_dsb_finish(state, crtc); + drm_atomic_helper_wait_for_dependencies(&state->base); drm_dp_mst_atomic_wait_for_dependencies(&state->base); intel_atomic_global_state_wait_for_dependencies(state); @@ -7380,8 +7762,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); - intel_atomic_prepare_plane_clear_colors(state); - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state) || @@ -7462,7 +7842,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (new_crtc_state->do_async_flip) intel_crtc_disable_flip_done(state, crtc); - intel_color_wait_commit(new_crtc_state); + intel_atomic_dsb_wait_commit(new_crtc_state); } /* @@ -7497,14 +7877,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_modeset_verify_crtc(state, crtc); - /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ - hsw_ips_post_update(state, crtc); - - /* - * Activate DRRS after state readout to avoid - * dp_m_n vs. dp_m2_n2 confusion on BDW+. - */ - intel_drrs_activate(new_crtc_state); + intel_post_plane_update_after_readout(state, crtc); /* * DSB cleanup is done in cleanup_work aligning with framebuffer @@ -7514,7 +7887,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * FIXME get rid of this funny new->old swapping */ old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); - old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit); + old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); } /* Underruns don't always raise interrupts, so check manually */ @@ -7661,13 +8034,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, ret = intel_atomic_swap_state(state); if (ret) { - struct intel_crtc_state *new_crtc_state; - struct intel_crtc *crtc; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) - intel_color_cleanup_commit(new_crtc_state); - drm_atomic_helper_unprepare_planes(dev, &state->base); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; @@ -7702,23 +8068,6 @@ void intel_plane_destroy(struct drm_plane *plane) kfree(to_intel_plane(plane)); } -int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; - struct drm_crtc *drmmode_crtc; - struct intel_crtc *crtc; - - drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); - if (!drmmode_crtc) - return -ENOENT; - - crtc = to_intel_crtc(drmmode_crtc); - pipe_from_crtc_id->pipe = crtc->pipe; - - return 0; -} - static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -7800,7 +8149,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) if (HAS_DDI(dev_priv)) { if (intel_ddi_crt_present(dev_priv)) - intel_crt_init(dev_priv); + intel_crt_init(display); intel_bios_for_each_encoder(display, intel_ddi_init); @@ -7815,7 +8164,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) * incorrect sharing of the PPS. */ intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); @@ -7846,7 +8195,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) bool has_edp, has_port; if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) - intel_crt_init(dev_priv); + intel_crt_init(display); /* * The DP_DETECTED bit is the latched state of the DDC @@ -7892,14 +8241,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) vlv_dsi_init(dev_priv); } else if (IS_PINEVIEW(dev_priv)) { intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { bool found = false; if (IS_MOBILE(dev_priv)) intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); @@ -7941,7 +8290,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_I85X(dev_priv)) intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); intel_dvo_init(dev_priv); } @@ -7961,8 +8310,9 @@ static int max_dotclock(struct drm_i915_private *i915) { int max_dotclock = i915->display.cdclk.max_dotclk_freq; - /* icl+ might use joiner */ - if (DISPLAY_VER(i915) >= 11) + if (HAS_ULTRAJOINER(i915)) + max_dotclock *= 4; + else if (HAS_UNCOMPRESSED_JOINER(i915) || HAS_BIGJOINER(i915)) max_dotclock *= 2; return max_dotclock; @@ -8086,7 +8436,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, const struct drm_display_mode *mode, - bool joiner) + int num_joined_pipes) { int plane_width_max, plane_height_max; @@ -8102,8 +8452,11 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, * plane so let's not advertize modes that are * too big for that. */ - if (DISPLAY_VER(dev_priv) >= 11) { - plane_width_max = 5120 << joiner; + if (DISPLAY_VER(dev_priv) >= 30) { + plane_width_max = 6144 * num_joined_pipes; + plane_height_max = 4800; + } else if (DISPLAY_VER(dev_priv) >= 11) { + plane_width_max = 5120 * num_joined_pipes; plane_height_max = 4320; } else { plane_width_max = 5120; @@ -8255,9 +8608,9 @@ out: return ret; } -void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +void i830_enable_pipe(struct intel_display *display, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); enum transcoder cpu_transcoder = (enum transcoder)pipe; /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { @@ -8270,10 +8623,10 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) u32 dpll, fp; int i; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, i9xx_calc_dpll_params(48000, &clock) != 25154); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", pipe_name(pipe), clock.vco, clock.dot); @@ -8285,35 +8638,35 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), HACTIVE(640 - 1) | HTOTAL(800 - 1)); - intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); - intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); - intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), VACTIVE(480 - 1) | VTOTAL(525 - 1)); - intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); - intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); - intel_de_write(dev_priv, PIPESRC(dev_priv, pipe), + intel_de_write(display, PIPESRC(display, pipe), PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); - intel_de_write(dev_priv, FP0(pipe), fp); - intel_de_write(dev_priv, FP1(pipe), fp); + intel_de_write(display, FP0(pipe), fp); + intel_de_write(display, FP1(pipe), fp); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), + intel_de_write(display, DPLL(display, pipe), dpll & ~DPLL_VGA_MODE_DIS); - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll); + intel_de_write(display, DPLL(display, pipe), dpll); /* Wait for the clocks to stabilize. */ - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); /* The pixel multiplier can only be updated once the @@ -8321,46 +8674,46 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) * * So write it again. */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll); + intel_de_write(display, DPLL(display, pipe), dpll); /* We do this three times for luck */ for (i = 0; i < 3 ; i++) { - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll); - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_write(display, DPLL(display, pipe), dpll); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); /* wait for warmup */ } - intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), TRANSCONF_ENABLE); - intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe)); + intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); + intel_de_posting_read(display, TRANSCONF(display, pipe)); intel_wait_for_pipe_scanline_moving(crtc); } -void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +void i830_disable_pipe(struct intel_display *display, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); - drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", + drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", pipe_name(pipe)); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_A)) & DISP_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_B)) & DISP_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_C)) & DISP_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & MCURSOR_MODE_MASK); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_B)) & MCURSOR_MODE_MASK); + drm_WARN_ON(display->drm, + intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); + drm_WARN_ON(display->drm, + intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); - intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), 0); - intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe)); + intel_de_write(display, TRANSCONF(display, pipe), 0); + intel_de_posting_read(display, TRANSCONF(display, pipe)); intel_wait_for_pipe_scanline_stopped(crtc); - intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS); - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); + intel_de_posting_read(display, DPLL(display, pipe)); } void intel_hpd_poll_fini(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index b21d9578d5db..caef04f655c5 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -40,7 +40,6 @@ struct drm_encoder; struct drm_file; struct drm_format_info; struct drm_framebuffer; -struct drm_i915_gem_object; struct drm_i915_private; struct drm_mode_fb_cmd2; struct drm_modeset_acquire_ctx; @@ -52,6 +51,7 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_digital_port; +struct intel_display; struct intel_dp; struct intel_encoder; struct intel_initial_plane_config; @@ -94,16 +94,6 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder) return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; } -/* - * Global legacy plane identifier. Valid only for primary/sprite - * planes on pre-g4x, and only for primary planes on g4x-bdw. - */ -enum i9xx_plane_id { - PLANE_A, - PLANE_B, - PLANE_C, -}; - #define plane_name(p) ((p) + 'A') #define for_each_plane_id_on_crtc(__crtc, __p) \ @@ -401,6 +391,30 @@ enum phy_fia { ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) +#define for_each_crtc_in_masks(display, crtc, first_pipes, second_pipes, i) \ + for ((i) = 0; \ + (i) < (I915_MAX_PIPES * 2) && ((crtc) = intel_crtc_for_pipe(display, (i) % I915_MAX_PIPES), 1); \ + (i)++) \ + for_each_if((crtc) && ((first_pipes) | ((second_pipes) << I915_MAX_PIPES)) & BIT(i)) + +#define for_each_crtc_in_masks_reverse(display, crtc, first_pipes, second_pipes, i) \ + for ((i) = (I915_MAX_PIPES * 2 - 1); \ + (i) >= 0 && ((crtc) = intel_crtc_for_pipe(display, (i) % I915_MAX_PIPES), 1); \ + (i)--) \ + for_each_if((crtc) && ((first_pipes) | ((second_pipes) << I915_MAX_PIPES)) & BIT(i)) + +#define for_each_pipe_crtc_modeset_disable(display, crtc, crtc_state, i) \ + for_each_crtc_in_masks(display, crtc, \ + _intel_modeset_primary_pipes(crtc_state), \ + _intel_modeset_secondary_pipes(crtc_state), \ + i) + +#define for_each_pipe_crtc_modeset_enable(display, crtc, crtc_state, i) \ + for_each_crtc_in_masks_reverse(display, crtc, \ + _intel_modeset_primary_pipes(crtc_state), \ + _intel_modeset_secondary_pipes(crtc_state), \ + i) + int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int intel_atomic_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -415,7 +429,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, const struct drm_display_mode *mode, - bool joiner); + int num_joined_pipes); enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915, const struct drm_display_mode *mode); @@ -425,7 +439,14 @@ bool is_trans_port_sync_master(const struct intel_crtc_state *state); u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state); u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state); +u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state); +u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state); struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state); bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state); bool intel_pipe_config_compare(const struct intel_crtc_state *current_config, @@ -437,8 +458,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state); void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state); -void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); -void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); +void i830_enable_pipe(struct intel_display *display, enum pipe pipe); +void i830_disable_pipe(struct intel_display *display, enum pipe pipe); int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); @@ -470,16 +491,10 @@ bool intel_encoder_is_snps(struct intel_encoder *encoder); bool intel_encoder_is_tc(struct intel_encoder *encoder); enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder); -int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); -void vlv_wait_port_ready(struct drm_i915_private *dev_priv, +void vlv_wait_port_ready(struct intel_display *display, struct intel_digital_port *dig_port, unsigned int expected_mask); -struct drm_framebuffer * -intel_framebuffer_create(struct drm_i915_gem_object *obj, - struct drm_mode_fb_cmd2 *mode_cmd); bool intel_fuzzy_clock_check(int clock1, int clock2); @@ -570,21 +585,21 @@ void assert_transcoder(struct drm_i915_private *dev_priv, bool assert_port_valid(struct drm_i915_private *i915, enum port port); /* - * Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity - * checks to check for unexpected conditions which may not necessarily be a user - * visible problem. This will either WARN() or DRM_ERROR() depending on the - * verbose_state_checks module param, to enable distros and users to tailor - * their preferred amount of i915 abrt spam. + * Use INTEL_DISPLAY_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw + * state sanity checks to check for unexpected conditions which may not + * necessarily be a user visible problem. This will either drm_WARN() or + * drm_err() depending on the verbose_state_checks module param, to enable + * distros and users to tailor their preferred amount of i915 abrt spam. */ -#define I915_STATE_WARN(__i915, condition, format...) ({ \ - struct drm_device *drm = &(__i915)->drm; \ +#define INTEL_DISPLAY_STATE_WARN(__display, condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \ - drm_err(drm, format); \ + if (!drm_WARN((__display)->drm, (__display)->params.verbose_state_checks, format)) \ + drm_err((__display)->drm, format); \ unlikely(__ret_warn_on); \ }) bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915); +int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 0a711114ff2b..45b7c6900adc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -81,10 +81,8 @@ struct intel_display_funcs { struct intel_wm_funcs { /* update_wm is for legacy wm management */ void (*update_wm)(struct drm_i915_private *dev_priv); - int (*compute_pipe_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - int (*compute_intermediate_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); + int (*compute_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); void (*initial_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*atomic_update_watermarks)(struct intel_atomic_state *state, @@ -286,6 +284,9 @@ struct intel_display { /* drm device backpointer */ struct drm_device *drm; + /* Platform (and subplatform, if any) identification */ + struct intel_display_platforms platform; + /* Display functions */ struct { /* Top level crtc-ish functions */ @@ -457,6 +458,10 @@ struct intel_display { /* For i915gm/i945gm vblank irq workaround */ u8 vblank_enabled; + int vblank_wa_num_pipes; + + struct work_struct vblank_dc_work; + u32 de_irq_mask[I915_MAX_PIPES]; u32 pipestat_irq_mask[I915_MAX_PIPES]; } irq; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index f5f618199d39..11aff485d8fa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -3,6 +3,7 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/debugfs.h> #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> @@ -10,13 +11,13 @@ #include <drm/drm_fourcc.h> #include "hsw_ips.h" -#include "i915_debugfs.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_alpm.h" +#include "intel_bo.h" #include "intel_crtc.h" -#include "intel_de.h" #include "intel_crtc_state_dump.h" +#include "intel_de.h" #include "intel_display_debugfs.h" #include "intel_display_debugfs_params.h" #include "intel_display_power.h" @@ -26,7 +27,9 @@ #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_drrs.h" +#include "intel_fb.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_hdcp.h" @@ -39,11 +42,28 @@ #include "intel_vdsc.h" #include "intel_wm.h" +static struct intel_display *node_to_intel_display(struct drm_info_node *node) +{ + return to_intel_display(node->minor->dev); +} + static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { return to_i915(node->minor->dev); } +static int intel_display_caps(struct seq_file *m, void *data) +{ + struct intel_display *display = node_to_intel_display(m->private); + struct drm_printer p = drm_seq_file_printer(m); + + intel_display_device_info_print(DISPLAY_INFO(display), + DISPLAY_RUNTIME_INFO(display), &p); + intel_display_params_dump(&display->params, display->drm->driver->name, &p); + + return 0; +} + static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -106,7 +126,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fbdev_fb->base.format->cpp[0] * 8, fbdev_fb->base.modifier, drm_framebuffer_read_refcount(&fbdev_fb->base)); - i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); + intel_bo_describe(m, intel_fb_bo(&fbdev_fb->base)); seq_putc(m, '\n'); } #endif @@ -124,7 +144,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.format->cpp[0] * 8, fb->base.modifier, drm_framebuffer_read_refcount(&fb->base)); - i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); + intel_bo_describe(m, intel_fb_bo(&fb->base)); seq_putc(m, '\n'); } mutex_unlock(&dev_priv->drm.mode_config.fb_lock); @@ -424,7 +444,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) int num_scalers = crtc->num_scalers; int i; - /* Not all platformas have a scaler */ + /* Not all platforms have a scaler */ if (num_scalers) { seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d", num_scalers, @@ -773,198 +793,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) return 0; } -static ssize_t i915_displayport_test_active_write(struct file *file, - const char __user *ubuf, - size_t len, loff_t *offp) -{ - char *input_buffer; - int status = 0; - struct drm_device *dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - int val = 0; - - dev = ((struct seq_file *)file->private_data)->private; - - if (len == 0) - return 0; - - input_buffer = memdup_user_nul(ubuf, len); - if (IS_ERR(input_buffer)) - return PTR_ERR(input_buffer); - - drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len); - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - status = kstrtoint(input_buffer, 10, &val); - if (status < 0) - break; - drm_dbg(dev, "Got %d for test active\n", val); - /* To prevent erroneous activation of the compliance - * testing code, only accept an actual value of 1 here - */ - if (val == 1) - intel_dp->compliance.test_active = true; - else - intel_dp->compliance.test_active = false; - } - } - drm_connector_list_iter_end(&conn_iter); - kfree(input_buffer); - if (status < 0) - return status; - - *offp += len; - return len; -} - -static int i915_displayport_test_active_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->compliance.test_active) - seq_puts(m, "1"); - else - seq_puts(m, "0"); - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} - -static int i915_displayport_test_active_open(struct inode *inode, - struct file *file) -{ - return single_open(file, i915_displayport_test_active_show, - inode->i_private); -} - -static const struct file_operations i915_displayport_test_active_fops = { - .owner = THIS_MODULE, - .open = i915_displayport_test_active_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_displayport_test_active_write -}; - -static int i915_displayport_test_data_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->compliance.test_type == - DP_TEST_LINK_EDID_READ) - seq_printf(m, "%lx", - intel_dp->compliance.test_data.edid); - else if (intel_dp->compliance.test_type == - DP_TEST_LINK_VIDEO_PATTERN) { - seq_printf(m, "hdisplay: %d\n", - intel_dp->compliance.test_data.hdisplay); - seq_printf(m, "vdisplay: %d\n", - intel_dp->compliance.test_data.vdisplay); - seq_printf(m, "bpc: %u\n", - intel_dp->compliance.test_data.bpc); - } else if (intel_dp->compliance.test_type == - DP_TEST_LINK_PHY_TEST_PATTERN) { - seq_printf(m, "pattern: %d\n", - intel_dp->compliance.test_data.phytest.phy_pattern); - seq_printf(m, "Number of lanes: %d\n", - intel_dp->compliance.test_data.phytest.num_lanes); - seq_printf(m, "Link Rate: %d\n", - intel_dp->compliance.test_data.phytest.link_rate); - seq_printf(m, "level: %02x\n", - intel_dp->train_set[0]); - } - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); - -static int i915_displayport_test_type_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); - static ssize_t i915_fifo_underrun_reset_write(struct file *filp, const char __user *ubuf, @@ -1025,6 +853,7 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { }; static const struct drm_info_list intel_display_debugfs_list[] = { + {"intel_display_caps", intel_display_caps, 0}, {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, @@ -1037,37 +866,22 @@ static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_lpsp_status", i915_lpsp_status, 0}, }; -static const struct { - const char *name; - const struct file_operations *fops; -} intel_display_debugfs_files[] = { - {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, - {"i915_dp_test_data", &i915_displayport_test_data_fops}, - {"i915_dp_test_type", &i915_displayport_test_type_fops}, - {"i915_dp_test_active", &i915_displayport_test_active_fops}, -}; - void intel_display_debugfs_register(struct drm_i915_private *i915) { struct intel_display *display = &i915->display; struct drm_minor *minor = i915->drm.primary; - int i; - for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { - debugfs_create_file(intel_display_debugfs_files[i].name, - 0644, - minor->debugfs_root, - to_i915(minor->dev), - intel_display_debugfs_files[i].fops); - } + debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root, + to_i915(minor->dev), &i915_fifo_underrun_reset_ops); drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); intel_bios_debugfs_register(display); - intel_cdclk_debugfs_register(i915); - intel_dmc_debugfs_register(i915); + intel_cdclk_debugfs_register(display); + intel_dmc_debugfs_register(display); + intel_dp_test_debugfs_register(display); intel_fbc_debugfs_register(display); intel_hpd_debugfs_register(i915); intel_opregion_debugfs_register(display); @@ -1502,6 +1316,68 @@ static int intel_crtc_pipe_show(struct seq_file *m, void *unused) } DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe); +static int i915_joiner_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + + seq_printf(m, "%d\n", connector->force_joined_pipes); + + return 0; +} + +static ssize_t i915_joiner_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + int force_joined_pipes = 0; + int ret; + + if (len == 0) + return 0; + + ret = kstrtoint_from_user(ubuf, len, 0, &force_joined_pipes); + if (ret < 0) + return ret; + + switch (force_joined_pipes) { + case 0: + case 1: + case 2: + connector->force_joined_pipes = force_joined_pipes; + break; + case 4: + if (HAS_ULTRAJOINER(i915)) { + connector->force_joined_pipes = force_joined_pipes; + break; + } + + fallthrough; + default: + return -EINVAL; + } + + *offp += len; + + return len; +} + +static int i915_joiner_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_joiner_show, inode->i_private); +} + +static const struct file_operations i915_joiner_fops = { + .owner = THIS_MODULE, + .open = i915_joiner_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_joiner_write +}; + /** * intel_connector_debugfs_add - add i915 specific connector debugfs files * @connector: pointer to a registered intel_connector @@ -1548,11 +1424,11 @@ void intel_connector_debugfs_add(struct intel_connector *connector) connector, &i915_dsc_fractional_bpp_fops); } - if (DISPLAY_VER(i915) >= 11 && - (connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector_type == DRM_MODE_CONNECTOR_eDP)) { - debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root, - &connector->force_bigjoiner_enable); + if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) && + intel_dp_has_joiner(intel_attached_dp(connector))) { + debugfs_create_file("i915_joiner_force_enable", 0644, root, + connector, &i915_joiner_fops); } if (connector_type == DRM_MODE_CONNECTOR_DSI || diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 1b46ba985580..5f98e1b2a401 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -3,12 +3,13 @@ * Copyright © 2023 Intel Corporation */ -#include <drm/intel/i915_pciids.h> +#include <drm/intel/pciids.h> #include <drm/drm_color_mgmt.h> #include <linux/pci.h> #include "i915_drv.h" #include "i915_reg.h" +#include "intel_cx0_phy_regs.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_device.h" @@ -31,14 +32,25 @@ struct stepping_desc { .step_info.size = ARRAY_SIZE(_map) struct subplatform_desc { - enum intel_display_subplatform subplatform; + struct intel_display_platforms platforms; const char *name; const u16 *pciidlist; struct stepping_desc step_info; }; +#define SUBPLATFORM(_platform, _subplatform) \ + .platforms._platform##_##_subplatform = 1, \ + .name = #_subplatform + +/* + * Group subplatform alias that matches multiple subplatforms. For making ult + * cover both ult and ulx on HSW/BDW. + */ +#define SUBPLATFORM_GROUP(_platform, _subplatform) \ + .platforms._platform##_##_subplatform = 1 + struct platform_desc { - enum intel_display_platform platform; + struct intel_display_platforms platforms; const char *name; const struct subplatform_desc *subplatforms; const struct intel_display_device_info *info; /* NULL for GMD ID */ @@ -46,9 +58,16 @@ struct platform_desc { }; #define PLATFORM(_platform) \ - .platform = (INTEL_DISPLAY_##_platform), \ + .platforms._platform = 1, \ .name = #_platform +/* + * Group platform alias that matches multiple platforms. For aliases such as g4x + * that covers both g45 and gm45. + */ +#define PLATFORM_GROUP(_platform) \ + .platforms._platform = 1 + #define ID(id) (id) static const struct intel_display_device_info no_display = {}; @@ -232,7 +251,7 @@ static const struct intel_display_device_info no_display = {}; .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) static const struct platform_desc i830_desc = { - PLATFORM(I830), + PLATFORM(i830), .info = &(const struct intel_display_device_info) { I830_DISPLAY, @@ -241,7 +260,7 @@ static const struct platform_desc i830_desc = { }; static const struct platform_desc i845_desc = { - PLATFORM(I845G), + PLATFORM(i845g), .info = &(const struct intel_display_device_info) { I845_DISPLAY, @@ -250,7 +269,7 @@ static const struct platform_desc i845_desc = { }; static const struct platform_desc i85x_desc = { - PLATFORM(I85X), + PLATFORM(i85x), .info = &(const struct intel_display_device_info) { I830_DISPLAY, @@ -260,7 +279,7 @@ static const struct platform_desc i85x_desc = { }; static const struct platform_desc i865g_desc = { - PLATFORM(I865G), + PLATFORM(i865g), .info = &(const struct intel_display_device_info) { I845_DISPLAY, @@ -282,7 +301,7 @@ static const struct platform_desc i865g_desc = { .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */ static const struct platform_desc i915g_desc = { - PLATFORM(I915G), + PLATFORM(i915g), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I845_COLORS, @@ -292,7 +311,7 @@ static const struct platform_desc i915g_desc = { }; static const struct platform_desc i915gm_desc = { - PLATFORM(I915GM), + PLATFORM(i915gm), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I9XX_COLORS, @@ -305,7 +324,7 @@ static const struct platform_desc i915gm_desc = { }; static const struct platform_desc i945g_desc = { - PLATFORM(I945G), + PLATFORM(i945g), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I845_COLORS, @@ -316,7 +335,7 @@ static const struct platform_desc i945g_desc = { }; static const struct platform_desc i945gm_desc = { - PLATFORM(I915GM), + PLATFORM(i915gm), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I9XX_COLORS, @@ -330,7 +349,7 @@ static const struct platform_desc i945gm_desc = { }; static const struct platform_desc g33_desc = { - PLATFORM(G33), + PLATFORM(g33), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I845_COLORS, @@ -339,7 +358,7 @@ static const struct platform_desc g33_desc = { }; static const struct platform_desc pnv_desc = { - PLATFORM(PINEVIEW), + PLATFORM(pineview), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I9XX_COLORS, @@ -360,7 +379,7 @@ static const struct platform_desc pnv_desc = { BIT(TRANSCODER_A) | BIT(TRANSCODER_B) static const struct platform_desc i965g_desc = { - PLATFORM(I965G), + PLATFORM(i965g), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, .has_overlay = 1, @@ -370,7 +389,7 @@ static const struct platform_desc i965g_desc = { }; static const struct platform_desc i965gm_desc = { - PLATFORM(I965GM), + PLATFORM(i965gm), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, .has_overlay = 1, @@ -382,7 +401,8 @@ static const struct platform_desc i965gm_desc = { }; static const struct platform_desc g45_desc = { - PLATFORM(G45), + PLATFORM(g45), + PLATFORM_GROUP(g4x), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, @@ -391,7 +411,8 @@ static const struct platform_desc g45_desc = { }; static const struct platform_desc gm45_desc = { - PLATFORM(GM45), + PLATFORM(gm45), + PLATFORM_GROUP(g4x), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, .supports_tv = 1, @@ -414,14 +435,14 @@ static const struct platform_desc gm45_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ static const struct platform_desc ilk_d_desc = { - PLATFORM(IRONLAKE), + PLATFORM(ironlake), .info = &(const struct intel_display_device_info) { ILK_DISPLAY, }, }; static const struct platform_desc ilk_m_desc = { - PLATFORM(IRONLAKE), + PLATFORM(ironlake), .info = &(const struct intel_display_device_info) { ILK_DISPLAY, @@ -430,7 +451,7 @@ static const struct platform_desc ilk_m_desc = { }; static const struct platform_desc snb_desc = { - PLATFORM(SANDYBRIDGE), + PLATFORM(sandybridge), .info = &(const struct intel_display_device_info) { .has_hotplug = 1, I9XX_PIPE_OFFSETS, @@ -447,7 +468,7 @@ static const struct platform_desc snb_desc = { }; static const struct platform_desc ivb_desc = { - PLATFORM(IVYBRIDGE), + PLATFORM(ivybridge), .info = &(const struct intel_display_device_info) { .has_hotplug = 1, IVB_PIPE_OFFSETS, @@ -464,7 +485,7 @@ static const struct platform_desc ivb_desc = { }; static const struct platform_desc vlv_desc = { - PLATFORM(VALLEYVIEW), + PLATFORM(valleyview), .info = &(const struct intel_display_device_info) { .has_gmch = 1, .has_hotplug = 1, @@ -495,10 +516,19 @@ static const u16 hsw_ulx_ids[] = { }; static const struct platform_desc hsw_desc = { - PLATFORM(HASWELL), + PLATFORM(haswell), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_HASWELL_ULT, "ULT", hsw_ult_ids }, - { INTEL_DISPLAY_HASWELL_ULX, "ULX", hsw_ulx_ids }, + /* Special case: Use ult both as group and subplatform. */ + { + SUBPLATFORM(haswell, ult), + SUBPLATFORM_GROUP(haswell, ult), + .pciidlist = hsw_ult_ids, + }, + { + SUBPLATFORM(haswell, ulx), + SUBPLATFORM_GROUP(haswell, ult), + .pciidlist = hsw_ulx_ids, + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -539,10 +569,19 @@ static const u16 bdw_ulx_ids[] = { }; static const struct platform_desc bdw_desc = { - PLATFORM(BROADWELL), + PLATFORM(broadwell), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_BROADWELL_ULT, "ULT", bdw_ult_ids }, - { INTEL_DISPLAY_BROADWELL_ULX, "ULX", bdw_ulx_ids }, + /* Special case: Use ult both as group and subplatform. */ + { + SUBPLATFORM(broadwell, ult), + SUBPLATFORM_GROUP(broadwell, ult), + .pciidlist = bdw_ult_ids, + }, + { + SUBPLATFORM(broadwell, ulx), + SUBPLATFORM_GROUP(broadwell, ult), + .pciidlist = bdw_ulx_ids, + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -567,7 +606,7 @@ static const struct platform_desc bdw_desc = { }; static const struct platform_desc chv_desc = { - PLATFORM(CHERRYVIEW), + PLATFORM(cherryview), .info = &(const struct intel_display_device_info) { .has_hotplug = 1, .has_gmch = 1, @@ -630,10 +669,16 @@ static const enum intel_step skl_steppings[] = { }; static const struct platform_desc skl_desc = { - PLATFORM(SKYLAKE), + PLATFORM(skylake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_SKYLAKE_ULT, "ULT", skl_ult_ids }, - { INTEL_DISPLAY_SKYLAKE_ULX, "ULX", skl_ulx_ids }, + { + SUBPLATFORM(skylake, ult), + .pciidlist = skl_ult_ids, + }, + { + SUBPLATFORM(skylake, ulx), + .pciidlist = skl_ulx_ids, + }, {}, }, .info = &skl_display, @@ -665,10 +710,16 @@ static const enum intel_step kbl_steppings[] = { }; static const struct platform_desc kbl_desc = { - PLATFORM(KABYLAKE), + PLATFORM(kabylake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_KABYLAKE_ULT, "ULT", kbl_ult_ids }, - { INTEL_DISPLAY_KABYLAKE_ULX, "ULX", kbl_ulx_ids }, + { + SUBPLATFORM(kabylake, ult), + .pciidlist = kbl_ult_ids, + }, + { + SUBPLATFORM(kabylake, ulx), + .pciidlist = kbl_ulx_ids, + }, {}, }, .info = &skl_display, @@ -690,10 +741,16 @@ static const u16 cfl_ulx_ids[] = { }; static const struct platform_desc cfl_desc = { - PLATFORM(COFFEELAKE), + PLATFORM(coffeelake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_COFFEELAKE_ULT, "ULT", cfl_ult_ids }, - { INTEL_DISPLAY_COFFEELAKE_ULX, "ULX", cfl_ulx_ids }, + { + SUBPLATFORM(coffeelake, ult), + .pciidlist = cfl_ult_ids, + }, + { + SUBPLATFORM(coffeelake, ulx), + .pciidlist = cfl_ulx_ids, + }, {}, }, .info = &skl_display, @@ -706,9 +763,12 @@ static const u16 cml_ult_ids[] = { }; static const struct platform_desc cml_desc = { - PLATFORM(COMETLAKE), + PLATFORM(cometlake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_COMETLAKE_ULT, "ULT", cml_ult_ids }, + { + SUBPLATFORM(cometlake, ult), + .pciidlist = cml_ult_ids, + }, {}, }, .info = &skl_display, @@ -745,7 +805,7 @@ static const enum intel_step bxt_steppings[] = { }; static const struct platform_desc bxt_desc = { - PLATFORM(BROXTON), + PLATFORM(broxton), .info = &(const struct intel_display_device_info) { GEN9_LP_DISPLAY, .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */ @@ -760,7 +820,7 @@ static const enum intel_step glk_steppings[] = { }; static const struct platform_desc glk_desc = { - PLATFORM(GEMINILAKE), + PLATFORM(geminilake), .info = &(const struct intel_display_device_info) { GEN9_LP_DISPLAY, .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ @@ -822,9 +882,12 @@ static const enum intel_step icl_steppings[] = { }; static const struct platform_desc icl_desc = { - PLATFORM(ICELAKE), + PLATFORM(icelake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ICELAKE_PORT_F, "Port F", icl_port_f_ids }, + { + SUBPLATFORM(icelake, port_f), + .pciidlist = icl_port_f_ids, + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -847,13 +910,13 @@ static const enum intel_step jsl_ehl_steppings[] = { }; static const struct platform_desc jsl_desc = { - PLATFORM(JASPERLAKE), + PLATFORM(jasperlake), .info = &jsl_ehl_display, STEP_INFO(jsl_ehl_steppings), }; static const struct platform_desc ehl_desc = { - PLATFORM(ELKHARTLAKE), + PLATFORM(elkhartlake), .info = &jsl_ehl_display, STEP_INFO(jsl_ehl_steppings), }; @@ -919,10 +982,13 @@ static const enum intel_step tgl_uy_steppings[] = { }; static const struct platform_desc tgl_desc = { - PLATFORM(TIGERLAKE), + PLATFORM(tigerlake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids, - STEP_INFO(tgl_uy_steppings) }, + { + SUBPLATFORM(tigerlake, uy), + .pciidlist = tgl_uy_ids, + STEP_INFO(tgl_uy_steppings), + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -944,7 +1010,7 @@ static const enum intel_step dg1_steppings[] = { }; static const struct platform_desc dg1_desc = { - PLATFORM(DG1), + PLATFORM(dg1), .info = &(const struct intel_display_device_info) { XE_D_DISPLAY, @@ -961,7 +1027,7 @@ static const enum intel_step rkl_steppings[] = { }; static const struct platform_desc rkl_desc = { - PLATFORM(ROCKETLAKE), + PLATFORM(rocketlake), .info = &(const struct intel_display_device_info) { XE_D_DISPLAY, .abox_mask = BIT(0), @@ -996,10 +1062,13 @@ static const enum intel_step adl_s_rpl_s_steppings[] = { }; static const struct platform_desc adl_s_desc = { - PLATFORM(ALDERLAKE_S), + PLATFORM(alderlake_s), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids, - STEP_INFO(adl_s_rpl_s_steppings) }, + { + SUBPLATFORM(alderlake_s, raptorlake_s), + .pciidlist = adls_rpls_ids, + STEP_INFO(adl_s_rpl_s_steppings), + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -1100,14 +1169,23 @@ static const enum intel_step adl_p_rpl_pu_steppings[] = { }; static const struct platform_desc adl_p_desc = { - PLATFORM(ALDERLAKE_P), + PLATFORM(alderlake_p), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids, - STEP_INFO(adl_p_adl_n_steppings) }, - { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids, - STEP_INFO(adl_p_rpl_pu_steppings) }, - { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids, - STEP_INFO(adl_p_rpl_pu_steppings) }, + { + SUBPLATFORM(alderlake_p, alderlake_n), + .pciidlist = adlp_adln_ids, + STEP_INFO(adl_p_adl_n_steppings), + }, + { + SUBPLATFORM(alderlake_p, raptorlake_p), + .pciidlist = adlp_rplp_ids, + STEP_INFO(adl_p_rpl_pu_steppings), + }, + { + SUBPLATFORM(alderlake_p, raptorlake_u), + .pciidlist = adlp_rplu_ids, + STEP_INFO(adl_p_rpl_pu_steppings), + }, {}, }, .info = &xe_lpd_display, @@ -1159,14 +1237,23 @@ static const enum intel_step dg2_g12_steppings[] = { }; static const struct platform_desc dg2_desc = { - PLATFORM(DG2), + PLATFORM(dg2), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids, - STEP_INFO(dg2_g10_steppings) }, - { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids, - STEP_INFO(dg2_g11_steppings) }, - { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids, - STEP_INFO(dg2_g12_steppings) }, + { + SUBPLATFORM(dg2, g10), + .pciidlist = dg2_g10_ids, + STEP_INFO(dg2_g10_steppings), + }, + { + SUBPLATFORM(dg2, g11), + .pciidlist = dg2_g11_ids, + STEP_INFO(dg2_g11_steppings), + }, + { + SUBPLATFORM(dg2, g12), + .pciidlist = dg2_g12_ids, + STEP_INFO(dg2_g12_steppings), + }, {}, }, .info = &xe_hpd_display, @@ -1227,6 +1314,7 @@ static const struct intel_display_device_info xe2_lpd_display = { .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D), + .__runtime_defaults.has_dbuf_overlap_detection = true, }; static const struct intel_display_device_info xe2_hpd_display = { @@ -1241,15 +1329,19 @@ static const struct intel_display_device_info xe2_hpd_display = { * reported by the hardware. */ static const struct platform_desc mtl_desc = { - PLATFORM(METEORLAKE), + PLATFORM(meteorlake), }; static const struct platform_desc lnl_desc = { - PLATFORM(LUNARLAKE), + PLATFORM(lunarlake), }; static const struct platform_desc bmg_desc = { - PLATFORM(BATTLEMAGE), + PLATFORM(battlemage), +}; + +static const struct platform_desc ptl_desc = { + PLATFORM(pantherlake), }; __diag_pop(); @@ -1318,9 +1410,11 @@ static const struct { INTEL_RPLU_IDS(INTEL_DISPLAY_DEVICE, &adl_p_desc), INTEL_RPLP_IDS(INTEL_DISPLAY_DEVICE, &adl_p_desc), INTEL_DG2_IDS(INTEL_DISPLAY_DEVICE, &dg2_desc), + INTEL_ARL_IDS(INTEL_DISPLAY_DEVICE, &mtl_desc), INTEL_MTL_IDS(INTEL_DISPLAY_DEVICE, &mtl_desc), INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc), INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc), + INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc), }; static const struct { @@ -1331,6 +1425,7 @@ static const struct { { 14, 0, &xe_lpdp_display }, { 14, 1, &xe2_hpd_display }, { 20, 0, &xe2_lpd_display }, + { 30, 0, &xe2_lpd_display }, }; static const struct intel_display_device_info * @@ -1391,7 +1486,7 @@ find_subplatform_desc(struct pci_dev *pdev, const struct platform_desc *desc) const struct subplatform_desc *sp; const u16 *id; - for (sp = desc->subplatforms; sp && sp->subplatform; sp++) + for (sp = desc->subplatforms; sp && sp->pciidlist; sp++) for (id = sp->pciidlist; *id; id++) if (*id == pdev->device) return sp; @@ -1450,6 +1545,25 @@ static enum intel_step get_pre_gmdid_step(struct intel_display *display, return step; } +/* Size of the entire bitmap, not the number of platforms */ +static unsigned int display_platforms_num_bits(void) +{ + return sizeof(((struct intel_display_platforms *)0)->bitmap) * BITS_PER_BYTE; +} + +/* Number of platform bits set */ +static unsigned int display_platforms_weight(const struct intel_display_platforms *p) +{ + return bitmap_weight(p->bitmap, display_platforms_num_bits()); +} + +/* Merge the subplatform information from src to dst */ +static void display_platforms_or(struct intel_display_platforms *dst, + const struct intel_display_platforms *src) +{ + bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits()); +} + void intel_display_device_probe(struct drm_i915_private *i915) { struct intel_display *display = &i915->display; @@ -1489,13 +1603,23 @@ void intel_display_device_probe(struct drm_i915_private *i915) &DISPLAY_INFO(i915)->__runtime_defaults, sizeof(*DISPLAY_RUNTIME_INFO(i915))); - drm_WARN_ON(&i915->drm, !desc->platform || !desc->name); - DISPLAY_RUNTIME_INFO(i915)->platform = desc->platform; + drm_WARN_ON(&i915->drm, !desc->name || + !display_platforms_weight(&desc->platforms)); + + display->platform = desc->platforms; subdesc = find_subplatform_desc(pdev, desc); if (subdesc) { - drm_WARN_ON(&i915->drm, !subdesc->subplatform || !subdesc->name); - DISPLAY_RUNTIME_INFO(i915)->subplatform = subdesc->subplatform; + drm_WARN_ON(&i915->drm, !subdesc->name || + !display_platforms_weight(&subdesc->platforms)); + + display_platforms_or(&display->platform, &subdesc->platforms); + + /* Ensure platform and subplatform are distinct */ + drm_WARN_ON(&i915->drm, + display_platforms_weight(&display->platform) != + display_platforms_weight(&desc->platforms) + + display_platforms_weight(&subdesc->platforms)); } if (ip_ver.ver || ip_ver.rel || ip_ver.step) { @@ -1531,6 +1655,7 @@ void intel_display_device_remove(struct drm_i915_private *i915) static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915); enum pipe pipe; @@ -1651,8 +1776,10 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) display_runtime->has_hdcp = 0; - if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - display_runtime->fbc_mask = 0; + if (IS_DG2(i915) || DISPLAY_VER(i915) < 13) { + if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) + display_runtime->fbc_mask = 0; + } if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) display_runtime->has_dmc = 0; @@ -1660,6 +1787,10 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 if (IS_DISPLAY_VER(i915, 10, 12) && (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) display_runtime->has_dsc = 0; + + if (DISPLAY_VER(display) >= 20 && + (dfsm & XE2LPD_DFSM_DBUF_OVERLAP_DISABLE)) + display_runtime->has_dbuf_overlap_detection = false; } if (DISPLAY_VER(i915) >= 20) { @@ -1677,7 +1808,11 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 } } - display_runtime->rawclk_freq = intel_read_rawclk(i915); + if (DISPLAY_VER(i915) >= 30) + display_runtime->edp_typec_support = + intel_de_read(display, PICA_PHY_CONFIG_CONTROL) & EDP_ON_TYPEC; + + display_runtime->rawclk_freq = intel_read_rawclk(display); drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq); return; diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index dfb0c8bf5ca2..43144a037f9f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -6,6 +6,7 @@ #ifndef __INTEL_DISPLAY_DEVICE_H__ #define __INTEL_DISPLAY_DEVICE_H__ +#include <linux/bitops.h> #include <linux/types.h> #include "intel_display_conversion.h" @@ -14,89 +15,108 @@ struct drm_i915_private; struct drm_printer; -/* Keep in gen based order, and chronological order within a gen */ -enum intel_display_platform { - INTEL_DISPLAY_PLATFORM_UNINITIALIZED = 0, - /* Display ver 2 */ - INTEL_DISPLAY_I830, - INTEL_DISPLAY_I845G, - INTEL_DISPLAY_I85X, - INTEL_DISPLAY_I865G, - /* Display ver 3 */ - INTEL_DISPLAY_I915G, - INTEL_DISPLAY_I915GM, - INTEL_DISPLAY_I945G, - INTEL_DISPLAY_I945GM, - INTEL_DISPLAY_G33, - INTEL_DISPLAY_PINEVIEW, - /* Display ver 4 */ - INTEL_DISPLAY_I965G, - INTEL_DISPLAY_I965GM, - INTEL_DISPLAY_G45, - INTEL_DISPLAY_GM45, - /* Display ver 5 */ - INTEL_DISPLAY_IRONLAKE, - /* Display ver 6 */ - INTEL_DISPLAY_SANDYBRIDGE, - /* Display ver 7 */ - INTEL_DISPLAY_IVYBRIDGE, - INTEL_DISPLAY_VALLEYVIEW, - INTEL_DISPLAY_HASWELL, - /* Display ver 8 */ - INTEL_DISPLAY_BROADWELL, - INTEL_DISPLAY_CHERRYVIEW, - /* Display ver 9 */ - INTEL_DISPLAY_SKYLAKE, - INTEL_DISPLAY_BROXTON, - INTEL_DISPLAY_KABYLAKE, - INTEL_DISPLAY_GEMINILAKE, - INTEL_DISPLAY_COFFEELAKE, - INTEL_DISPLAY_COMETLAKE, - /* Display ver 11 */ - INTEL_DISPLAY_ICELAKE, - INTEL_DISPLAY_JASPERLAKE, - INTEL_DISPLAY_ELKHARTLAKE, - /* Display ver 12 */ - INTEL_DISPLAY_TIGERLAKE, - INTEL_DISPLAY_ROCKETLAKE, - INTEL_DISPLAY_DG1, - INTEL_DISPLAY_ALDERLAKE_S, - /* Display ver 13 */ - INTEL_DISPLAY_ALDERLAKE_P, - INTEL_DISPLAY_DG2, - /* Display ver 14 (based on GMD ID) */ - INTEL_DISPLAY_METEORLAKE, - /* Display ver 20 (based on GMD ID) */ - INTEL_DISPLAY_LUNARLAKE, - /* Display ver 14.1 (based on GMD ID) */ - INTEL_DISPLAY_BATTLEMAGE, -}; +/* + * Display platforms and subplatforms. Keep platforms in display version based + * order, chronological order within a version, and subplatforms next to the + * platform. + */ +#define INTEL_DISPLAY_PLATFORMS(func) \ + /* Display ver 2 */ \ + func(i830) \ + func(i845g) \ + func(i85x) \ + func(i865g) \ + /* Display ver 3 */ \ + func(i915g) \ + func(i915gm) \ + func(i945g) \ + func(i945gm) \ + func(g33) \ + func(pineview) \ + /* Display ver 4 */ \ + func(i965g) \ + func(i965gm) \ + func(g45) \ + func(gm45) \ + func(g4x) /* group alias for g45 and gm45 */ \ + /* Display ver 5 */ \ + func(ironlake) \ + /* Display ver 6 */ \ + func(sandybridge) \ + /* Display ver 7 */ \ + func(ivybridge) \ + func(valleyview) \ + func(haswell) \ + func(haswell_ult) \ + func(haswell_ulx) \ + /* Display ver 8 */ \ + func(broadwell) \ + func(broadwell_ult) \ + func(broadwell_ulx) \ + func(cherryview) \ + /* Display ver 9 */ \ + func(skylake) \ + func(skylake_ult) \ + func(skylake_ulx) \ + func(broxton) \ + func(kabylake) \ + func(kabylake_ult) \ + func(kabylake_ulx) \ + func(geminilake) \ + func(coffeelake) \ + func(coffeelake_ult) \ + func(coffeelake_ulx) \ + func(cometlake) \ + func(cometlake_ult) \ + func(cometlake_ulx) \ + /* Display ver 11 */ \ + func(icelake) \ + func(icelake_port_f) \ + func(jasperlake) \ + func(elkhartlake) \ + /* Display ver 12 */ \ + func(tigerlake) \ + func(tigerlake_uy) \ + func(rocketlake) \ + func(dg1) \ + func(alderlake_s) \ + func(alderlake_s_raptorlake_s) \ + /* Display ver 13 */ \ + func(alderlake_p) \ + func(alderlake_p_alderlake_n) \ + func(alderlake_p_raptorlake_p) \ + func(alderlake_p_raptorlake_u) \ + func(dg2) \ + func(dg2_g10) \ + func(dg2_g11) \ + func(dg2_g12) \ + /* Display ver 14 (based on GMD ID) */ \ + func(meteorlake) \ + /* Display ver 20 (based on GMD ID) */ \ + func(lunarlake) \ + /* Display ver 14.1 (based on GMD ID) */ \ + func(battlemage) \ + /* Display ver 30 (based on GMD ID) */ \ + func(pantherlake) + +#define __MEMBER(name) unsigned long name:1; +#define __COUNT(x) 1 + -enum intel_display_subplatform { - INTEL_DISPLAY_SUBPLATFORM_UNINITIALIZED = 0, - INTEL_DISPLAY_HASWELL_ULT, - INTEL_DISPLAY_HASWELL_ULX, - INTEL_DISPLAY_BROADWELL_ULT, - INTEL_DISPLAY_BROADWELL_ULX, - INTEL_DISPLAY_SKYLAKE_ULT, - INTEL_DISPLAY_SKYLAKE_ULX, - INTEL_DISPLAY_KABYLAKE_ULT, - INTEL_DISPLAY_KABYLAKE_ULX, - INTEL_DISPLAY_COFFEELAKE_ULT, - INTEL_DISPLAY_COFFEELAKE_ULX, - INTEL_DISPLAY_COMETLAKE_ULT, - INTEL_DISPLAY_COMETLAKE_ULX, - INTEL_DISPLAY_ICELAKE_PORT_F, - INTEL_DISPLAY_TIGERLAKE_UY, - INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, - INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, - INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, - INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, - INTEL_DISPLAY_DG2_G10, - INTEL_DISPLAY_DG2_G11, - INTEL_DISPLAY_DG2_G12, +#define __NUM_PLATFORMS (INTEL_DISPLAY_PLATFORMS(__COUNT) 0) + +struct intel_display_platforms { + union { + struct { + INTEL_DISPLAY_PLATFORMS(__MEMBER); + }; + DECLARE_BITMAP(bitmap, __NUM_PLATFORMS); + }; }; +#undef __MEMBER +#undef __COUNT +#undef __NUM_PLATFORMS + #define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ /* Keep in alphabetical order */ \ func(cursor_needs_physical); \ @@ -118,10 +138,12 @@ enum intel_display_subplatform { #define HAS_4TILE(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14) #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) +#define HAS_BIGJOINER(i915) (DISPLAY_VER(i915) >= 11 && HAS_DSC(i915)) #define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl) #define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash) #define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13)) #define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915)) +#define HAS_DBUF_OVERLAP_DETECTION(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dbuf_overlap_detection) #define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi) #define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0) #define HAS_DMC(i915) (DISPLAY_RUNTIME_INFO(i915)->has_dmc) @@ -149,9 +171,13 @@ enum intel_display_subplatform { #define HAS_PSR(i915) (DISPLAY_INFO(i915)->has_psr) #define HAS_PSR_HW_TRACKING(i915) (DISPLAY_INFO(i915)->has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12) -#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_LP(i915)) +#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915) && !IS_GEMINILAKE(i915)) #define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \ BIT(trans)) != 0) +#define HAS_UNCOMPRESSED_JOINER(i915) (DISPLAY_VER(i915) >= 13) +#define HAS_ULTRAJOINER(i915) ((DISPLAY_VER(i915) >= 20 || \ + (IS_DGFX(i915) && DISPLAY_VER(i915) == 14)) && \ + HAS_DSC(i915)) #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) #define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13) #define HAS_CMRR(i915) (DISPLAY_VER(i915) >= 20) @@ -161,10 +187,10 @@ enum intel_display_subplatform { #define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv) /* Check that device has a display IP version within the specific range. */ -#define IS_DISPLAY_VER_FULL(__i915, from, until) ( \ - BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \ - (DISPLAY_VER_FULL(__i915) >= (from) && \ - DISPLAY_VER_FULL(__i915) <= (until))) +#define IS_DISPLAY_VERx100(__i915, from, until) ( \ + BUILD_BUG_ON_ZERO((from) < 200) + \ + (DISPLAY_VERx100(__i915) >= (from) && \ + DISPLAY_VERx100(__i915) <= (until))) /* * Check if a device has a specific IP version as well as a stepping within the @@ -175,22 +201,22 @@ enum intel_display_subplatform { * hardware fix is present and the software workaround is no longer necessary. * E.g., * - * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2) - * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER) + * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_B2) + * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_C0, STEP_FOREVER) * * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper * stepping bound for the specified IP version. */ -#define IS_DISPLAY_VER_STEP(__i915, ipver, from, until) \ - (IS_DISPLAY_VER_FULL((__i915), (ipver), (ipver)) && \ +#define IS_DISPLAY_VERx100_STEP(__i915, ipver, from, until) \ + (IS_DISPLAY_VERx100((__i915), (ipver), (ipver)) && \ IS_DISPLAY_STEP((__i915), (from), (until))) #define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info) #define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info) #define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver) -#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \ - DISPLAY_RUNTIME_INFO(i915)->ip.rel) +#define DISPLAY_VERx100(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver * 100 + \ + DISPLAY_RUNTIME_INFO(i915)->ip.rel) #define IS_DISPLAY_VER(i915, from, until) \ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) @@ -201,9 +227,6 @@ enum intel_display_subplatform { INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) struct intel_display_runtime_info { - enum intel_display_platform platform; - enum intel_display_subplatform subplatform; - struct intel_display_ip_ver { u16 ver; u16 rel; @@ -225,6 +248,8 @@ struct intel_display_runtime_info { bool has_hdcp; bool has_dmc; bool has_dsc; + bool edp_typec_support; + bool has_dbuf_overlap_detection; }; struct intel_display_device_info { diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 069426d9260b..56b78cf6b854 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -11,7 +11,7 @@ #include <acpi/video.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_mode_config.h> #include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> @@ -82,16 +82,17 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev) void intel_display_driver_init_hw(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_cdclk_state *cdclk_state; if (!HAS_DISPLAY(i915)) return; - cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); + cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); - intel_update_cdclk(i915); - intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); - cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); + cdclk_state->logical = cdclk_state->actual = display->cdclk.hw; intel_display_wa_apply(i915); } @@ -168,10 +169,11 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915) static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); @@ -192,8 +194,8 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915) intel_display_irq_init(i915); intel_dkl_phy_init(i915); - intel_color_init_hooks(i915); - intel_init_cdclk_hooks(i915); + intel_color_init_hooks(&i915->display); + intel_init_cdclk_hooks(&i915->display); intel_audio_hooks_init(i915); intel_dpll_init_clock_hook(i915); intel_init_display_hooks(i915); @@ -219,7 +221,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) intel_bios_init(display); - ret = intel_vga_register(i915); + ret = intel_vga_register(display); if (ret) goto cleanup_bios; @@ -235,7 +237,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return 0; - intel_dmc_init(i915); + intel_dmc_init(display); i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | @@ -243,11 +245,11 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) intel_mode_config_init(i915); - ret = intel_cdclk_init(i915); + ret = intel_cdclk_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; - ret = intel_color_init(i915); + ret = intel_color_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; @@ -270,10 +272,10 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) return 0; cleanup_vga_client_pw_domain_dmc: - intel_dmc_fini(i915); + intel_dmc_fini(display); intel_power_domains_driver_remove(i915); cleanup_vga: - intel_vga_unregister(i915); + intel_vga_unregister(display); cleanup_bios: intel_bios_driver_remove(display); @@ -430,7 +432,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_pps_setup(display); - intel_gmbus_setup(i915); + intel_gmbus_setup(display); drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", INTEL_NUM_PIPES(i915), @@ -450,13 +452,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_display_driver_init_hw(i915); intel_dpll_update_ref_clks(i915); - if (i915->display.cdclk.max_cdclk_freq == 0) - intel_update_max_cdclk(i915); + if (display->cdclk.max_cdclk_freq == 0) + intel_update_max_cdclk(display); intel_hti_init(display); /* Just disable it once at startup */ - intel_vga_disable(i915); + intel_vga_disable(display); intel_setup_outputs(i915); ret = intel_dp_tunnel_mgr_init(display); @@ -483,7 +485,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) return 0; err_hdcp: - intel_hdcp_component_fini(i915); + intel_hdcp_component_fini(display); err_mode_config: intel_mode_config_cleanup(i915); @@ -493,6 +495,7 @@ err_mode_config: /* part #3: call after gem init */ int intel_display_driver_probe(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; int ret; if (!HAS_DISPLAY(i915)) @@ -503,7 +506,7 @@ int intel_display_driver_probe(struct drm_i915_private *i915) * the BIOS fb takeover and whatever else magic ggtt reservations * happen during gem/ggtt init. */ - intel_hdcp_component_init(i915); + intel_hdcp_component_init(display); /* * Force all active planes to recompute their states. So that on @@ -598,7 +601,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) /* flush any delayed tasks or pending work */ flush_workqueue(i915->unordered_wq); - intel_hdcp_component_fini(i915); + intel_hdcp_component_fini(display); intel_mode_config_cleanup(i915); @@ -606,7 +609,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) intel_overlay_cleanup(i915); - intel_gmbus_teardown(i915); + intel_gmbus_teardown(display); destroy_workqueue(i915->display.wq.flip); destroy_workqueue(i915->display.wq.modeset); @@ -619,11 +622,11 @@ void intel_display_driver_remove_nogem(struct drm_i915_private *i915) { struct intel_display *display = &i915->display; - intel_dmc_fini(i915); + intel_dmc_fini(display); intel_power_domains_driver_remove(i915); - intel_vga_unregister(i915); + intel_vga_unregister(display); intel_bios_driver_remove(display); } @@ -681,12 +684,13 @@ __intel_display_driver_resume(struct drm_i915_private *i915, struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { + struct intel_display *display = &i915->display; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; int ret, i; intel_modeset_setup_hw_state(i915, ctx); - intel_vga_redisable(i915); + intel_vga_redisable(display); if (!state) return 0; diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 73369847ed66..e1547ebce60e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <drm/drm_vblank.h> + #include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_irq.h" @@ -27,7 +29,8 @@ static void intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); drm_crtc_handle_vblank(&crtc->base); } @@ -269,14 +272,17 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, intel_uncore_posting_read(&dev_priv->uncore, reg); } -static bool i915_has_asle(struct drm_i915_private *i915) +static bool i915_has_legacy_blc_interrupt(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); - if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915)) - return false; + if (IS_I85X(i915)) + return true; + + if (IS_PINEVIEW(i915)) + return true; - return intel_opregion_asle_present(display); + return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915); } /** @@ -285,7 +291,12 @@ static bool i915_has_asle(struct drm_i915_private *i915) */ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - if (!i915_has_asle(dev_priv)) + struct intel_display *display = &dev_priv->display; + + if (!intel_opregion_asle_present(display)) + return; + + if (!i915_has_legacy_blc_interrupt(display)) return; spin_lock_irq(&dev_priv->irq_lock); @@ -298,14 +309,15 @@ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -#if defined(CONFIG_DEBUG_FS) +#if IS_ENABLED(CONFIG_DEBUG_FS) static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -344,7 +356,8 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct intel_display *display = &i915->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); spin_lock(&i915->drm.event_lock); @@ -400,7 +413,7 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, res1, res2); } -void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) +static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -480,28 +493,10 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->irq_lock); } -void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, - u16 iir, u32 pipe_stats[I915_MAX_PIPES]) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } -} - void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { struct intel_display *display = &dev_priv->display; - bool blc_event = false; enum pipe pipe; @@ -548,12 +543,13 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_opregion_asle_intr(display); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -571,7 +567,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) @@ -593,7 +589,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_dp_aux_irq_handler(display); if (pch_iir & SDE_GMBUS) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_HDCP_MASK) drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); @@ -682,7 +678,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_dp_aux_irq_handler(display); if (pch_iir & SDE_GMBUS_CPT) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); @@ -907,6 +903,13 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) struct intel_display *display = &dev_priv->display; bool found = false; + if (HAS_DBUF_OVERLAP_DETECTION(display)) { + if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { + drm_warn(display->drm, "DBuf overlap detected\n"); + found = true; + } + } + if (DISPLAY_VER(dev_priv) >= 14) { if (iir & (XELPDP_PMDEMAND_RSP | XELPDP_PMDEMAND_RSPTOUT_ERR)) { @@ -1026,17 +1029,6 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) return GEN8_PIPE_PRIMARY_FLIP_DONE; } -u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) -{ - u32 mask = GEN8_PIPE_FIFO_UNDERRUN; - - if (DISPLAY_VER(dev_priv) >= 13) - mask |= XELPD_PIPE_SOFT_UNDERRUN | - XELPD_PIPE_HARD_UNDERRUN; - - return mask; -} - static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) { u32 pica_ier = 0; @@ -1125,7 +1117,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && (iir & BXT_DE_PORT_GMBUS)) { - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); found = true; } @@ -1182,7 +1174,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); - if (iir & gen8_de_pipe_underrun_mask(dev_priv)) + if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); @@ -1226,15 +1218,14 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) { - void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; - iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); + iir = intel_de_read(i915, GEN11_GU_MISC_IIR); if (likely(iir)) - raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); + intel_de_write(i915, GEN11_GU_MISC_IIR, iir); return iir; } @@ -1249,25 +1240,56 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) void gen11_display_irq_handler(struct drm_i915_private *i915) { - void __iomem * const regs = intel_uncore_regs(&i915->uncore); - const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); + u32 disp_ctl; disable_rpm_wakeref_asserts(&i915->runtime_pm); /* * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ * for the display related bits. */ - raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); + disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL); + + intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0); gen8_de_irq_handler(i915, disp_ctl); - raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, - GEN11_DISPLAY_IRQ_ENABLE); + intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); enable_rpm_wakeref_asserts(&i915->runtime_pm); } -/* Called from drm generic code, passed 'crtc' which - * we use as a pipe index - */ +static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.vblank_time_lock); + + /* + * Vblank/CRC interrupts fail to wake the device up from C2+. + * Disabling render clock gating during C-states avoids + * the problem. There is a small power cost so we do this + * only when vblank/CRC interrupts are actually enabled. + */ + if (i915->display.irq.vblank_enabled++ == 0) + intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); +} + +static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.vblank_time_lock); + + if (--i915->display.irq.vblank_enabled == 0) + intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); +} + +void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable) +{ + spin_lock_irq(&i915->drm.vblank_time_lock); + + if (enable) + i915gm_irq_cstate_wa_enable(i915); + else + i915gm_irq_cstate_wa_disable(i915); + + spin_unlock_irq(&i915->drm.vblank_time_lock); +} + int i8xx_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -1281,22 +1303,35 @@ int i8xx_enable_vblank(struct drm_crtc *crtc) return 0; } +void i8xx_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + int i915gm_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->dev); - /* - * Vblank interrupts fail to wake the device up from C2+. - * Disabling render clock gating during C-states avoids - * the problem. There is a small power cost so we do this - * only when vblank interrupts are actually enabled. - */ - if (i915->display.irq.vblank_enabled++ == 0) - intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + i915gm_irq_cstate_wa_enable(i915); return i8xx_enable_vblank(crtc); } +void i915gm_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->dev); + + i8xx_disable_vblank(crtc); + + i915gm_irq_cstate_wa_disable(i915); +} + int i965_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -1311,6 +1346,18 @@ int i965_enable_vblank(struct drm_crtc *crtc) return 0; } +void i965_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + int ilk_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -1332,6 +1379,19 @@ int ilk_enable_vblank(struct drm_crtc *crtc) return 0; } +void ilk_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ilk_disable_display_irq(dev_priv, bit); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, bool enable) { @@ -1356,9 +1416,27 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, return true; } +static void intel_display_vblank_dc_work(struct work_struct *work) +{ + struct intel_display *display = + container_of(work, typeof(*display), irq.vblank_dc_work); + struct drm_i915_private *i915 = to_i915(display->drm); + int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes); + + /* + * NOTE: intel_display_power_set_target_dc_state is used only by PSR + * code for DC3CO handling. DC3CO target state is currently disabled in + * PSR code. If DC3CO is taken into use we need take that into account + * here as well. + */ + intel_display_power_set_target_dc_state(i915, vblank_wa_num_pipes ? DC_STATE_DISABLE : + DC_STATE_EN_UPTO_DC6); +} + int bdw_enable_vblank(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; unsigned long irqflags; @@ -1366,6 +1444,9 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) if (gen11_dsi_configure_te(crtc, true)) return 0; + if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0) + schedule_work(&display->irq.vblank_dc_work); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1379,58 +1460,10 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) return 0; } -/* Called from drm generic code, passed 'crtc' which - * we use as a pipe index - */ -void i8xx_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void i915gm_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *i915 = to_i915(crtc->dev); - - i8xx_disable_vblank(crtc); - - if (--i915->display.irq.vblank_enabled == 0) - intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); -} - -void i965_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void ilk_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? - DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_disable_display_irq(dev_priv, bit); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - void bdw_disable_vblank(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; unsigned long irqflags; @@ -1441,6 +1474,9 @@ void bdw_disable_vblank(struct drm_crtc *_crtc) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0) + schedule_work(&display->irq.vblank_dc_work); } void vlv_display_irq_reset(struct drm_i915_private *dev_priv) @@ -1457,10 +1493,21 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv) i9xx_pipestat_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, VLV_); + gen2_irq_reset(uncore, VLV_IRQ_REGS); dev_priv->irq_mask = ~0u; } +void i9xx_display_irq_reset(struct drm_i915_private *i915) +{ + if (I915_HAS_HOTPLUG(i915)) { + i915_hotplug_interrupt_update(i915, 0xffffffff, 0); + intel_uncore_rmw(&i915->uncore, + PORT_HOTPLUG_STAT(i915), 0, 0); + } + + i9xx_pipestat_irq_reset(i915); +} + void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -1489,7 +1536,7 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~enable_mask; - GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); } void gen8_display_irq_reset(struct drm_i915_private *dev_priv) @@ -1506,10 +1553,10 @@ void gen8_display_irq_reset(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); - GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); - GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); + gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS); } void gen11_display_irq_reset(struct drm_i915_private *dev_priv) @@ -1549,26 +1596,25 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); - GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); - GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); + gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS); if (DISPLAY_VER(dev_priv) >= 14) - GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); + gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS); else - GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); + gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - GEN3_IRQ_RESET(uncore, SDE); + gen2_irq_reset(uncore, SDE_IRQ_REGS); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask) { struct intel_uncore *uncore = &dev_priv->uncore; - u32 extra_ier = GEN8_PIPE_VBLANK | - gen8_de_pipe_underrun_mask(dev_priv) | + u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | gen8_de_pipe_flip_done_mask(dev_priv); enum pipe pipe; @@ -1580,9 +1626,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, } for_each_pipe_masked(dev_priv, pipe, pipe_mask) - GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->display.irq.de_irq_mask[pipe], - ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); + gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe), + dev_priv->display.irq.de_irq_mask[pipe], + ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } @@ -1601,7 +1647,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, } for_each_pipe_masked(dev_priv, pipe, pipe_mask) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); spin_unlock_irq(&dev_priv->irq_lock); @@ -1635,7 +1681,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) else mask = SDE_GMBUS_CPT; - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); + gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff); } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) @@ -1692,7 +1738,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) } if (IS_HASWELL(i915)) { - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR); display_mask |= DE_EDP_PSR_INT_HSW; } @@ -1703,7 +1749,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) ibx_irq_postinstall(i915); - GEN3_IRQ_INIT(uncore, DE, i915->irq_mask, + gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask, display_mask | extra_mask); } @@ -1751,14 +1797,16 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_port_masked |= DSI0_TE | DSI1_TE; } + if (HAS_DBUF_OVERLAP_DETECTION(display)) + de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; + if (HAS_DSB(dev_priv)) de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | GEN12_DSB_INT(INTEL_DSB_1) | GEN12_DSB_INT(INTEL_DSB_2); de_pipe_enables = de_pipe_masked | - GEN8_PIPE_VBLANK | - gen8_de_pipe_underrun_mask(dev_priv) | + GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | gen8_de_pipe_flip_done_mask(dev_priv); de_port_enables = de_port_masked; @@ -1777,11 +1825,11 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (!intel_display_power_is_enabled(dev_priv, domain)) continue; - gen3_assert_iir_is_zero(uncore, + gen2_assert_iir_is_zero(uncore, TRANS_PSR_IIR(dev_priv, trans)); } } else { - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR); } for_each_pipe(dev_priv, pipe) { @@ -1789,20 +1837,20 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->display.irq.de_irq_mask[pipe], - de_pipe_enables); + gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe), + dev_priv->display.irq.de_irq_mask[pipe], + de_pipe_enables); } - GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); - GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); + gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables); + gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked); if (IS_DISPLAY_VER(dev_priv, 11, 13)) { u32 de_hpd_masked = 0; u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; - GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, + gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, de_hpd_enables); } } @@ -1815,10 +1863,10 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915) u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK; - GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, + gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, de_hpd_enables); - GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); + gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); } static void icp_irq_postinstall(struct drm_i915_private *dev_priv) @@ -1826,7 +1874,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; u32 mask = SDE_GMBUS_ICP; - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); + gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff); } void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) @@ -1866,4 +1914,7 @@ void intel_display_irq_init(struct drm_i915_private *i915) i915->display.irq.display_irqs_enabled = false; intel_hotplug_irq_init(i915); + + INIT_WORK(&i915->display.irq.vblank_dc_work, + intel_display_vblank_dc_work); } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h index 2a090dd6abd7..b077712b7be1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.h +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -33,7 +33,6 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask); void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask); -u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *i915); int i8xx_enable_vblank(struct drm_crtc *crtc); int i915gm_enable_vblank(struct drm_crtc *crtc); @@ -54,6 +53,7 @@ void gen11_display_irq_handler(struct drm_i915_private *i915); u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl); void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir); +void i9xx_display_irq_reset(struct drm_i915_private *i915); void vlv_display_irq_reset(struct drm_i915_private *i915); void gen8_display_irq_reset(struct drm_i915_private *i915); void gen11_display_irq_reset(struct drm_i915_private *i915); @@ -68,15 +68,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe); void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); void i915_enable_asle_pipestat(struct drm_i915_private *i915); -void i9xx_pipestat_irq_reset(struct drm_i915_private *i915); void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]); -void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]); void intel_display_irq_init(struct drm_i915_private *i915); +void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable); + #endif /* __INTEL_DISPLAY_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h index c4775c99dc83..f0fa27e365ab 100644 --- a/drivers/gpu/drm/i915/display/intel_display_limits.h +++ b/drivers/gpu/drm/i915/display/intel_display_limits.h @@ -50,6 +50,16 @@ enum transcoder { }; /* + * Global legacy plane identifier. Valid only for primary/sprite + * planes on pre-g4x, and only for primary planes on g4x-bdw. + */ +enum i9xx_plane_id { + PLANE_A, + PLANE_B, + PLANE_C, +}; + +/* * Per-pipe plane identifier. * I915_MAX_PLANES in the enum below is the maximum (across all platforms) * number of planes per CRTC. Not all platforms really have this many planes, diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c index 1a45d300b6f0..024de8abcb1a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -173,14 +173,16 @@ static void _param_print_charp(struct drm_printer *p, const char *driver_name, /** * intel_display_params_dump - dump intel display modparams - * @display: display device + * @params: display params + * @driver_name: driver name to use for printing * @p: the &drm_printer * * Pretty printer for i915 modparams. */ -void intel_display_params_dump(struct intel_display *display, struct drm_printer *p) +void intel_display_params_dump(const struct intel_display_params *params, + const char *driver_name, struct drm_printer *p) { -#define PRINT(T, x, ...) _param_print(p, display->drm->driver->name, #x, display->params.x); +#define PRINT(T, x, ...) _param_print(p, driver_name, #x, params->x); INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT); #undef PRINT } diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h index da8dc943234b..dcb6face936a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -9,7 +9,6 @@ #include <linux/types.h> struct drm_printer; -struct intel_display; /* * Invoke param, a function-like macro, for each intel display param, with @@ -56,8 +55,8 @@ struct intel_display_params { }; #undef MEMBER -void intel_display_params_dump(struct intel_display *display, - struct drm_printer *p); +void intel_display_params_dump(const struct intel_display_params *params, + const char *driver_name, struct drm_printer *p); void intel_display_params_copy(struct intel_display_params *dest); void intel_display_params_free(struct intel_display_params *params); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index ef2fdbf97346..2766fd9208b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -545,7 +545,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) - return false; + return NULL; mutex_lock(&power_domains->lock); @@ -560,7 +560,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, if (!is_enabled) { intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - wakeref = 0; + wakeref = NULL; } return wakeref; @@ -648,7 +648,7 @@ intel_display_power_put_async_work(struct work_struct *work) struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); - intel_wakeref_t old_work_wakeref = 0; + intel_wakeref_t old_work_wakeref = NULL; mutex_lock(&power_domains->lock); @@ -895,7 +895,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) { - intel_wakeref_t __maybe_unused wf = -1; + intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); @@ -1176,43 +1176,44 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc; - for_each_intel_crtc(&dev_priv->drm, crtc) - I915_STATE_WARN(dev_priv, crtc->active, - "CRTC for pipe %c enabled\n", - pipe_name(crtc->pipe)); - - I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), - "Display power well on\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, - "SPLL enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, - "WRPLL1 enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, - "WRPLL2 enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, PP_STATUS(dev_priv, 0)) & PP_ON, - "Panel power on\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, - "CPU PWM1 enabled\n"); + for_each_intel_crtc(display->drm, crtc) + INTEL_DISPLAY_STATE_WARN(display, crtc->active, + "CRTC for pipe %c enabled\n", + pipe_name(crtc->pipe)); + + INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2), + "Display power well on\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE, + "SPLL enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, + "WRPLL1 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, + "WRPLL2 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, PP_STATUS(display, 0)) & PP_ON, + "Panel power on\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + "CPU PWM1 enabled\n"); if (IS_HASWELL(dev_priv)) - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, - "CPU PWM2 enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, - "PCH PWM1 enabled\n"); - I915_STATE_WARN(dev_priv, - (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), - "Utility pin enabled in PWM mode\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, - "PCH GTC enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + "CPU PWM2 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + "PCH PWM1 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), + "Utility pin enabled in PWM mode\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE, + "PCH GTC enabled\n"); /* * In theory we can still leave IRQs enabled, as long as only the HPD @@ -1220,8 +1221,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough. */ - I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv), - "IRQs enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv), + "IRQs enabled\n"); } static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) @@ -1300,6 +1301,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, */ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; u32 val; val = intel_de_read(dev_priv, LCPLL_CTL); @@ -1343,8 +1345,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); } /* @@ -1416,10 +1418,11 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* enable PCH reset handshake */ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); @@ -1438,28 +1441,29 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - intel_cdclk_init_hw(dev_priv); + intel_cdclk_init_hw(display); gen9_dbuf_enable(dev_priv); if (resume) - intel_dmc_load_program(dev_priv); + intel_dmc_load_program(display); } static void skl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; - gen9_disable_dc_states(dev_priv); + gen9_disable_dc_states(display); /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); - intel_cdclk_uninit_hw(dev_priv); + intel_cdclk_uninit_hw(display); /* The spec doesn't call for removing the reset handshake flag */ /* disable PG1 and Misc I/O */ @@ -1482,10 +1486,11 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT @@ -1506,28 +1511,29 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume mutex_unlock(&power_domains->lock); - intel_cdclk_init_hw(dev_priv); + intel_cdclk_init_hw(display); gen9_dbuf_enable(dev_priv); if (resume) - intel_dmc_load_program(dev_priv); + intel_dmc_load_program(display); } static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; - gen9_disable_dc_states(dev_priv); + gen9_disable_dc_states(display); /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); - intel_cdclk_uninit_hw(dev_priv); + intel_cdclk_uninit_hw(display); /* The spec doesn't call for removing the reset handshake flag */ @@ -1623,10 +1629,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && @@ -1657,7 +1664,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); /* 4. Enable CDCLK. */ - intel_cdclk_init_hw(dev_priv); + intel_cdclk_init_hw(display); if (DISPLAY_VER(dev_priv) >= 12) gen12_dbuf_slices_config(dev_priv); @@ -1677,14 +1684,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_snps_phy_wait_for_calibration(dev_priv); /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */ - if (DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 1)) + if (DISPLAY_VERx100(dev_priv) == 1401) intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1); if (resume) - intel_dmc_load_program(dev_priv); + intel_dmc_load_program(display); /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ - if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0))) + if (IS_DISPLAY_VERx100(dev_priv, 1200, 1300)) intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); @@ -1704,14 +1711,15 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; - gen9_disable_dc_states(dev_priv); - intel_dmc_disable_program(dev_priv); + gen9_disable_dc_states(display); + intel_dmc_disable_program(display); /* 1. Disable all display engine functions -> aready done */ @@ -1719,7 +1727,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_dbuf_disable(dev_priv); /* 3. Disable CD clock */ - intel_cdclk_uninit_hw(dev_priv); + intel_cdclk_uninit_hw(display); if (DISPLAY_VER(dev_priv) == 14) intel_de_rmw(dev_priv, DC_STATE_EN, 0, @@ -2066,7 +2074,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915) */ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) { - struct i915_power_domains *power_domains = &i915->display.power.domains; + struct intel_display *display = &i915->display; + struct i915_power_domains *power_domains = &display->power.domains; intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&power_domains->init_wakeref); @@ -2080,7 +2089,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) * that would be blocked if the firmware was inactive. */ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && - intel_dmc_has_payload(i915)) { + intel_dmc_has_payload(display)) { intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); return; @@ -2225,9 +2234,11 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915) void intel_display_power_suspend_late(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { - bxt_enable_dc9(i915); + bxt_enable_dc9(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } @@ -2239,10 +2250,12 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915) void intel_display_power_resume_early(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { - gen9_sanitize_dc_state(i915); - bxt_disable_dc9(i915); + gen9_sanitize_dc_state(display); + bxt_disable_dc9(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } @@ -2254,12 +2267,14 @@ void intel_display_power_resume_early(struct drm_i915_private *i915) void intel_display_power_suspend(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (DISPLAY_VER(i915) >= 11) { icl_display_core_uninit(i915); - bxt_enable_dc9(i915); + bxt_enable_dc9(display); } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_display_core_uninit(i915); - bxt_enable_dc9(i915); + bxt_enable_dc9(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } @@ -2267,23 +2282,24 @@ void intel_display_power_suspend(struct drm_i915_private *i915) void intel_display_power_resume(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->display.power.domains; + struct intel_display *display = &i915->display; + struct i915_power_domains *power_domains = &display->power.domains; if (DISPLAY_VER(i915) >= 11) { - bxt_disable_dc9(i915); + bxt_disable_dc9(display); icl_display_core_init(i915, true); - if (intel_dmc_has_payload(i915)) { + if (intel_dmc_has_payload(display)) { if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) - skl_enable_dc6(i915); + skl_enable_dc6(display); else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) - gen9_enable_dc5(i915); + gen9_enable_dc5(display); } } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { - bxt_disable_dc9(i915); + bxt_disable_dc9(display); bxt_display_core_init(i915, true); - if (intel_dmc_has_payload(i915) && + if (intel_dmc_has_payload(display) && (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) - gen9_enable_dc5(i915); + gen9_enable_dc5(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 425452c5a469..3f8f84df4733 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -232,7 +232,7 @@ intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - __intel_display_power_put_async(i915, domain, -1, -1); + __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, -1); } static inline void @@ -241,7 +241,7 @@ intel_display_power_put_async_delay(struct drm_i915_private *i915, intel_wakeref_t wakeref, int delay_ms) { - __intel_display_power_put_async(i915, domain, -1, delay_ms); + __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, delay_ms); } #endif @@ -297,10 +297,10 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, #define with_intel_display_power(i915, domain, wf) \ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ - intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) + intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL) #define with_intel_display_power_if_enabled(i915, domain, wf) \ for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \ - intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) + intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL) #endif /* __INTEL_DISPLAY_POWER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 10948b3964ee..5575aa0d6689 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1586,6 +1586,136 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = { I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), }; +/* + * Xe3 changes the power well hierarchy slightly from Xe_LPD+; PGB now + * depends on PG1 instead of PG2: + * + * PG0 + * | + * --PG1-- + * / | \ + * PGA PGB PG2 + * / \ + * PGC PGD + */ + +#define XE3LPD_PW_C_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C + +#define XE3LPD_PW_D_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_D, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_D + +#define XE3LPD_PW_2_POWER_DOMAINS \ + XE3LPD_PW_C_POWER_DOMAINS, \ + XE3LPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_TRANSCODER_D, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4 + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_2, + XE3LPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_b, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_c, + XE3LPD_PW_C_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_d, + XE3LPD_PW_D_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc xe3lpd_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xe3lpd_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xe3lpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xe3lpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_D", &xe3lpd_pwdoms_pw_d, + .hsw.idx = XELPD_PW_CTL_IDX_PW_D), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_D), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), + I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), + I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), + I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3), + I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4), + ), + .ops = &xelpdp_aux_power_well_ops, + }, +}; + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off, + POWER_DOMAIN_DC_OFF, + XE3LPD_PW_2_POWER_DOMAINS, + XE3LPD_PW_C_POWER_DOMAINS, + XE3LPD_PW_D_POWER_DOMAINS, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xe3lpd_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list xe3lpd_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff), + I915_PW_DESCRIPTORS(xe3lpd_power_wells_main), + I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), +}; + static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { @@ -1693,7 +1823,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) return 0; } - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(i915) >= 30) + return set_power_wells(power_domains, xe3lpd_power_wells); + else if (DISPLAY_VER(i915) >= 20) return set_power_wells(power_domains, xe2lpd_power_wells); else if (DISPLAY_VER(i915) >= 14) return set_power_wells(power_domains, xelpdp_power_wells); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 46e9eff12c23..f0131dd853de 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -187,8 +187,10 @@ int intel_power_well_refcount(struct i915_power_well *power_well) static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, u8 irq_pipe_mask, bool has_vga) { + struct intel_display *display = &dev_priv->display; + if (has_vga) - intel_vga_reset_io_mem(dev_priv); + intel_vga_reset_io_mem(display); if (irq_pipe_mask) gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); @@ -601,20 +603,22 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, return (val & mask) == mask; } -static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) +static void assert_can_enable_dc9(struct intel_display *display) { - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), + struct drm_i915_private *dev_priv = to_i915(display->drm); + + drm_WARN_ONCE(display->drm, + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9), "DC9 already programmed to be enabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & + drm_WARN_ONCE(display->drm, + intel_de_read(display, HSW_PWR_WELL_CTL2) & HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), "Power well 2 on.\n"); - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); /* @@ -626,12 +630,14 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) */ } -static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) +static void assert_can_disable_dc9(struct intel_display *display) { - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + struct drm_i915_private *dev_priv = to_i915(display->drm); + + drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled.\n"); @@ -644,14 +650,14 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) */ } -static void gen9_write_dc_state(struct drm_i915_private *dev_priv, +static void gen9_write_dc_state(struct intel_display *display, u32 state) { int rewrites = 0; int rereads = 0; u32 v; - intel_de_write(dev_priv, DC_STATE_EN, state); + intel_de_write(display, DC_STATE_EN, state); /* It has been observed that disabling the dc6 state sometimes * doesn't stick and dmc keeps returning old value. Make sure @@ -659,10 +665,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, * we are confident that state is exactly what we want. */ do { - v = intel_de_read(dev_priv, DC_STATE_EN); + v = intel_de_read(display, DC_STATE_EN); if (v != state) { - intel_de_write(dev_priv, DC_STATE_EN, state); + intel_de_write(display, DC_STATE_EN, state); rewrites++; rereads = 0; } else if (rereads++ > 5) { @@ -672,27 +678,28 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, } while (rewrites < 100); if (v != state) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Writing dc state to 0x%x failed, now 0x%x\n", state, v); /* Most of the times we need one retry, avoid spam */ if (rewrites > 1) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Rewrote dc state to 0x%x %d times\n", state, rewrites); } -static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) +static u32 gen9_dc_mask(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 mask; mask = DC_STATE_EN_UPTO_DC5; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; - else if (DISPLAY_VER(dev_priv) == 11) + else if (DISPLAY_VER(display) == 11) mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) mask |= DC_STATE_EN_DC9; @@ -702,17 +709,17 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) return mask; } -void gen9_sanitize_dc_state(struct drm_i915_private *i915) +void gen9_sanitize_dc_state(struct intel_display *display) { - struct i915_power_domains *power_domains = &i915->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; u32 val; - if (!HAS_DISPLAY(i915)) + if (!HAS_DISPLAY(display)) return; - val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); + val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Resetting DC state tracking from %02x to %02x\n", power_domains->dc_state, val); power_domains->dc_state = val; @@ -720,7 +727,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *i915) /** * gen9_set_dc_state - set target display C power state - * @dev_priv: i915 device instance + * @display: display instance * @state: target DC power state * - DC_STATE_DISABLE * - DC_STATE_EN_UPTO_DC5 @@ -741,150 +748,152 @@ void gen9_sanitize_dc_state(struct drm_i915_private *i915) * back on and register state is restored. This is guaranteed by the MMIO write * to DC_STATE_EN blocking until the state is restored. */ -void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) +void gen9_set_dc_state(struct intel_display *display, u32 state) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; u32 val; u32 mask; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, + if (drm_WARN_ON_ONCE(display->drm, state & ~power_domains->allowed_dc_mask)) state &= power_domains->allowed_dc_mask; - val = intel_de_read(dev_priv, DC_STATE_EN); - mask = gen9_dc_mask(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", + val = intel_de_read(display, DC_STATE_EN); + mask = gen9_dc_mask(display); + drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n", val & mask, state); /* Check if DMC is ignoring our DC state requests */ if ((val & mask) != power_domains->dc_state) - drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", + drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n", power_domains->dc_state, val & mask); val &= ~mask; val |= state; - gen9_write_dc_state(dev_priv, val); + gen9_write_dc_state(display, val); power_domains->dc_state = val & mask; } -static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) +static void tgl_enable_dc3co(struct intel_display *display) { - drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); + drm_dbg_kms(display->drm, "Enabling DC3CO\n"); + gen9_set_dc_state(display, DC_STATE_EN_DC3CO); } -static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) +static void tgl_disable_dc3co(struct intel_display *display) { - drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); - intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + drm_dbg_kms(display->drm, "Disabling DC3CO\n"); + intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* * Delay of 200us DC3CO Exit time B.Spec 49196 */ usleep_range(200, 210); } -static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) +static void assert_can_enable_dc5(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); enum i915_power_well_id high_pg; /* Power wells at this level and above must be disabled for DC5 entry */ - if (DISPLAY_VER(dev_priv) == 12) + if (DISPLAY_VER(display) == 12) high_pg = ICL_DISP_PW_3; else high_pg = SKL_DISP_PW_2; - drm_WARN_ONCE(&dev_priv->drm, + drm_WARN_ONCE(display->drm, intel_display_power_well_is_enabled(dev_priv, high_pg), "Power wells above platform's DC5 limit still enabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); assert_rpm_wakelock_held(&dev_priv->runtime_pm); - assert_dmc_loaded(dev_priv); + assert_dmc_loaded(display); } -void gen9_enable_dc5(struct drm_i915_private *dev_priv) +void gen9_enable_dc5(struct intel_display *display) { - assert_can_enable_dc5(dev_priv); + struct drm_i915_private *dev_priv = to_i915(display->drm); + + assert_can_enable_dc5(display); - drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); + drm_dbg_kms(display->drm, "Enabling DC5\n"); /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv)) + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); - intel_dmc_wl_enable(&dev_priv->display); + intel_dmc_wl_enable(display); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); + gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5); } -static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) +static void assert_can_enable_dc6(struct intel_display *display) { - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, UTIL_PIN_CTL) & + drm_WARN_ONCE(display->drm, + (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), "Utility pin enabled in PWM mode\n"); - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), "DC6 already programmed to be enabled.\n"); - assert_dmc_loaded(dev_priv); + assert_dmc_loaded(display); } -void skl_enable_dc6(struct drm_i915_private *dev_priv) +void skl_enable_dc6(struct intel_display *display) { - assert_can_enable_dc6(dev_priv); + struct drm_i915_private *dev_priv = to_i915(display->drm); - drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); + assert_can_enable_dc6(display); + + drm_dbg_kms(display->drm, "Enabling DC6\n"); /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv)) + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); - intel_dmc_wl_enable(&dev_priv->display); + intel_dmc_wl_enable(display); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6); } -void bxt_enable_dc9(struct drm_i915_private *dev_priv) +void bxt_enable_dc9(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - assert_can_enable_dc9(dev_priv); + assert_can_enable_dc9(display); - drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); + drm_dbg_kms(display->drm, "Enabling DC9\n"); /* - * Power sequencer reset is not needed on - * platforms with South Display Engine on PCH, - * because PPS registers are always on. + * Power sequencer reset is needed on BXT/GLK, because the PPS registers + * aren't always on, unlike with South Display Engine on PCH. */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_pps_reset_all(display); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); + if (IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) + bxt_pps_reset_all(display); + gen9_set_dc_state(display, DC_STATE_EN_DC9); } -void bxt_disable_dc9(struct drm_i915_private *dev_priv) +void bxt_disable_dc9(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + assert_can_disable_dc9(display); - assert_can_disable_dc9(dev_priv); + drm_dbg_kms(display->drm, "Disabling DC9\n"); - drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); intel_pps_unlock_regs_wa(display); } @@ -910,38 +919,45 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + struct intel_display *display = &dev_priv->display; + + bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + struct intel_display *display = &dev_priv->display; + + bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + struct intel_display *display = &dev_priv->display; + + return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct i915_power_well *power_well; power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (intel_power_well_refcount(power_well) > 0) - bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (intel_power_well_refcount(power_well) > 0) - bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { power_well = lookup_power_well(dev_priv, GLK_DISP_PW_DPIO_CMN_C); if (intel_power_well_refcount(power_well) > 0) - bxt_dpio_phy_verify_state(dev_priv, + bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); } } @@ -949,8 +965,10 @@ static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv) static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); + struct intel_display *display = &dev_priv->display; + + return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); } static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) @@ -965,27 +983,28 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) enabled_dbuf_slices); } -void gen9_disable_dc_states(struct drm_i915_private *dev_priv) +void gen9_disable_dc_states(struct intel_display *display) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct i915_power_domains *power_domains = &display->power.domains; struct intel_cdclk_config cdclk_config = {}; if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { - tgl_disable_dc3co(dev_priv); + tgl_disable_dc3co(display); return; } - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - intel_dmc_wl_disable(&dev_priv->display); + intel_dmc_wl_disable(display); - intel_cdclk_get_cdclk(dev_priv, &cdclk_config); + intel_cdclk_get_cdclk(display, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ - drm_WARN_ON(&dev_priv->drm, - intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw, + drm_WARN_ON(display->drm, + intel_cdclk_clock_changed(&display->cdclk.hw, &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); @@ -993,7 +1012,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_verify_dpio_phy_power_wells(dev_priv); - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) /* * DMC retains HW context only for port A, the other combo * PHY's HW context for port B is lost after DC transitions, @@ -1005,26 +1024,29 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - gen9_disable_dc_states(dev_priv); + struct intel_display *display = &dev_priv->display; + + gen9_disable_dc_states(display); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; - if (!intel_dmc_has_payload(dev_priv)) + if (!intel_dmc_has_payload(display)) return; switch (power_domains->target_dc_state) { case DC_STATE_EN_DC3CO: - tgl_enable_dc3co(dev_priv); + tgl_enable_dc3co(display); break; case DC_STATE_EN_UPTO_DC6: - skl_enable_dc6(dev_priv); + skl_enable_dc6(display); break; case DC_STATE_EN_UPTO_DC5: - gen9_enable_dc5(dev_priv); + gen9_enable_dc5(display); break; } } @@ -1048,24 +1070,30 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_A); - if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_B); + struct intel_display *display = &dev_priv->display; + + if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0) + i830_enable_pipe(display, PIPE_A); + if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0) + i830_enable_pipe(display, PIPE_B); } static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - i830_disable_pipe(dev_priv, PIPE_B); - i830_disable_pipe(dev_priv, PIPE_A); + struct intel_display *display = &dev_priv->display; + + i830_disable_pipe(display, PIPE_B); + i830_disable_pipe(display, PIPE_A); } static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE && - intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE; + struct intel_display *display = &dev_priv->display; + + return intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE && + intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE; } static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1232,7 +1260,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_crt_reset(&encoder->base); } - intel_vga_redisable_power_on(dev_priv); + intel_vga_redisable_power_on(display); intel_pps_unlock_regs_wa(display); } @@ -1248,7 +1276,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) /* make sure we're done processing display irqs */ intel_synchronize_irq(dev_priv); - intel_pps_reset_all(display); + vlv_pps_reset_all(display); /* Prevent us from re-enabling polling on accident in late suspend */ if (!dev_priv->drm.dev->power.is_suspended) @@ -1309,13 +1337,14 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) -static void assert_chv_phy_status(struct drm_i915_private *dev_priv) +static void assert_chv_phy_status(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_well *cmn_bc = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->display.power.chv_phy_control; + u32 phy_control = display->power.chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; @@ -1326,7 +1355,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * reset (ie. the power well has been disabled at * least once). */ - if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) + if (!display->power.chv_phy_assert[DPIO_PHY0]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | @@ -1334,7 +1363,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) + if (!display->power.chv_phy_assert[DPIO_PHY1]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); @@ -1362,7 +1391,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) */ if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && - (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & DPLL_VCO_ENABLE) == 0) + (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0) phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); if (BITS_SET(phy_control, @@ -1405,12 +1434,12 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS, + if (intel_de_wait(display, DISPLAY_PHY_STATUS, phy_status_mask, phy_status, 10)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->display.power.chv_phy_control); + intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, display->power.chv_phy_control); } #undef BITS_SET @@ -1418,11 +1447,12 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + struct intel_display *display = &dev_priv->display; enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; u32 tmp; - drm_WARN_ON_ONCE(&dev_priv->drm, + drm_WARN_ON_ONCE(display->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); @@ -1436,9 +1466,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ - if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, + if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS, PHY_POWERGOOD(phy), 1)) - drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", + drm_err(display->drm, "Display PHY %d is not power up\n", phy); vlv_dpio_get(dev_priv); @@ -1466,24 +1496,25 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_put(dev_priv); - dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->display.power.chv_phy_control); + phy, display->power.chv_phy_control); - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); } static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + struct intel_display *display = &dev_priv->display; enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; - drm_WARN_ON_ONCE(&dev_priv->drm, + drm_WARN_ON_ONCE(display->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); @@ -1496,20 +1527,20 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, PIPE_C); } - dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); vlv_set_power_well(dev_priv, power_well, false); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->display.power.chv_phy_control); + phy, display->power.chv_phy_control); /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->display.power.chv_phy_assert[phy] = true; + display->power.chv_phy_assert[phy] = true; - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); } static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, @@ -1579,29 +1610,30 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; bool was_override; mutex_lock(&power_domains->lock); - was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); if (override == was_override) goto out; if (override) - dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->display.power.chv_phy_control); + phy, ch, display->power.chv_phy_control); - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); out: mutex_unlock(&power_domains->lock); @@ -1612,29 +1644,30 @@ out: void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); - dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); if (override) - dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->display.power.chv_phy_control); + phy, ch, mask, display->power.chv_phy_control); - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 9357a9a73c06..93559f7c6100 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -12,6 +12,7 @@ struct drm_i915_private; struct i915_power_well_ops; +struct intel_display; struct intel_encoder; #define for_each_power_well(__dev_priv, __power_well) \ @@ -154,13 +155,13 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override); -void gen9_enable_dc5(struct drm_i915_private *dev_priv); -void skl_enable_dc6(struct drm_i915_private *dev_priv); -void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); -void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state); -void gen9_disable_dc_states(struct drm_i915_private *dev_priv); -void bxt_enable_dc9(struct drm_i915_private *dev_priv); -void bxt_disable_dc9(struct drm_i915_private *dev_priv); +void gen9_enable_dc5(struct intel_display *display); +void skl_enable_dc6(struct intel_display *display); +void gen9_sanitize_dc_state(struct intel_display *display); +void gen9_set_dc_state(struct intel_display *display, u32 state); +void gen9_disable_dc_states(struct intel_display *display); +void bxt_enable_dc9(struct intel_display *display); +void bxt_disable_dc9(struct intel_display *display); extern const struct i915_power_well_ops i9xx_always_on_power_well_ops; extern const struct i915_power_well_ops chv_pipe_power_well_ops; diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c new file mode 100644 index 000000000000..030c4f873da1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <linux/slab.h> + +#include "i915_drv.h" +#include "intel_display_device.h" +#include "intel_display_params.h" +#include "intel_display_snapshot.h" +#include "intel_dmc.h" +#include "intel_overlay.h" + +struct intel_display_snapshot { + struct intel_display *display; + + struct intel_display_device_info info; + struct intel_display_runtime_info runtime_info; + struct intel_display_params params; + struct intel_overlay_snapshot *overlay; + struct intel_dmc_snapshot *dmc; +}; + +struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display) +{ + struct intel_display_snapshot *snapshot; + + snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); + if (!snapshot) + return NULL; + + snapshot->display = display; + + memcpy(&snapshot->info, DISPLAY_INFO(display), sizeof(snapshot->info)); + memcpy(&snapshot->runtime_info, DISPLAY_RUNTIME_INFO(display), + sizeof(snapshot->runtime_info)); + + intel_display_params_copy(&snapshot->params); + + snapshot->overlay = intel_overlay_snapshot_capture(display); + snapshot->dmc = intel_dmc_snapshot_capture(display); + + return snapshot; +} + +void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot, + struct drm_printer *p) +{ + struct intel_display *display; + + if (!snapshot) + return; + + display = snapshot->display; + + intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p); + intel_display_params_dump(&snapshot->params, display->drm->driver->name, p); + + intel_overlay_snapshot_print(snapshot->overlay, p); + intel_dmc_snapshot_print(snapshot->dmc, p); +} + +void intel_display_snapshot_free(struct intel_display_snapshot *snapshot) +{ + if (!snapshot) + return; + + intel_display_params_free(&snapshot->params); + + kfree(snapshot->overlay); + kfree(snapshot->dmc); + kfree(snapshot); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.h b/drivers/gpu/drm/i915/display/intel_display_snapshot.h new file mode 100644 index 000000000000..7ed27cdea644 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_DISPLAY_SNAPSHOT_H__ +#define __INTEL_DISPLAY_SNAPSHOT_H__ + +struct drm_printer; +struct intel_display; +struct intel_display_snapshot; + +struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display); +void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot, + struct drm_printer *p); +void intel_display_snapshot_free(struct intel_display_snapshot *snapshot); + +#endif /* __INTEL_DISPLAY_SNAPSHOT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h index c734ef1fba3c..9bd8f1e505b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_trace.h +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -9,44 +9,85 @@ #if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) #define __INTEL_DISPLAY_TRACE_H__ +#include <linux/string.h> #include <linux/string_helpers.h> #include <linux/types.h> #include <linux/tracepoint.h> #include "i915_drv.h" #include "intel_crtc.h" +#include "intel_display_limits.h" #include "intel_display_types.h" #include "intel_vblank.h" -#define __dev_name_i915(i915) dev_name((i915)->drm.dev) +#define __dev_name_display(display) dev_name((display)->drm->dev) #define __dev_name_kms(obj) dev_name((obj)->base.dev->dev) +/* + * Using identifiers from enum pipe in TP_printk() will confuse tools that + * parse /sys/kernel/debug/tracing/{xe,i915}/<event>/format. So we use CPP + * macros instead. + */ +#define _TRACE_PIPE_A 0 +#define _TRACE_PIPE_B 1 +#define _TRACE_PIPE_C 2 +#define _TRACE_PIPE_D 3 + +/* + * FIXME: Several TP_printk() calls below display frame and scanline numbers for + * all possible pipes (regardless of whether they are available) and that is + * done with a constant format string. A better approach would be to generate + * that info dynamically based on available pipes, but, while we do not have + * that implemented yet, let's assert that the constant format string indeed + * covers all possible pipes. + */ +static_assert(I915_MAX_PIPES - 1 == _TRACE_PIPE_D); + +#define _PIPES_FRAME_AND_SCANLINE_FMT \ + "pipe A: frame=%u, scanline=%u" \ + ", pipe B: frame=%u, scanline=%u" \ + ", pipe C: frame=%u, scanline=%u" \ + ", pipe D: frame=%u, scanline=%u" + +#define _PIPES_FRAME_AND_SCANLINE_VALUES \ + __entry->frame[_TRACE_PIPE_A], __entry->scanline[_TRACE_PIPE_A] \ + , __entry->frame[_TRACE_PIPE_B], __entry->scanline[_TRACE_PIPE_B] \ + , __entry->frame[_TRACE_PIPE_C], __entry->scanline[_TRACE_PIPE_C] \ + , __entry->frame[_TRACE_PIPE_D], __entry->scanline[_TRACE_PIPE_D] + +/* + * Paranoid sanity check that at least the enumeration starts at the + * same value as _TRACE_PIPE_A. + */ +static_assert(PIPE_A == _TRACE_PIPE_A); + TRACE_EVENT(intel_pipe_enable, TP_PROTO(struct intel_crtc *crtc), TP_ARGS(crtc), TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) + __array(u32, frame, I915_MAX_PIPES) + __array(u32, scanline, I915_MAX_PIPES) + __field(char, pipe_name) ), TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc *it__; __assign_str(dev); - for_each_intel_crtc(&dev_priv->drm, it__) { + memset(__entry->frame, 0, + sizeof(__entry->frame[0]) * I915_MAX_PIPES); + memset(__entry->scanline, 0, + sizeof(__entry->scanline[0]) * I915_MAX_PIPES); + for_each_intel_crtc(display->drm, it__) { __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); ), - TP_printk("dev %s, pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) + TP_printk("dev %s, pipe %c enable, " _PIPES_FRAME_AND_SCANLINE_FMT, + __get_str(dev), __entry->pipe_name, _PIPES_FRAME_AND_SCANLINE_VALUES) ); TRACE_EVENT(intel_pipe_disable, @@ -55,27 +96,28 @@ TRACE_EVENT(intel_pipe_disable, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) + __array(u32, frame, I915_MAX_PIPES) + __array(u32, scanline, I915_MAX_PIPES) + __field(char, pipe_name) ), TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc *it__; __assign_str(dev); - for_each_intel_crtc(&dev_priv->drm, it__) { + memset(__entry->frame, 0, + sizeof(__entry->frame[0]) * I915_MAX_PIPES); + memset(__entry->scanline, 0, + sizeof(__entry->scanline[0]) * I915_MAX_PIPES); + for_each_intel_crtc(display->drm, it__) { __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); ), - TP_printk("dev %s, pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) + TP_printk("dev %s, pipe %c disable, " _PIPES_FRAME_AND_SCANLINE_FMT, + __get_str(dev), __entry->pipe_name, _PIPES_FRAME_AND_SCANLINE_VALUES) ); TRACE_EVENT(intel_crtc_flip_done, @@ -84,20 +126,20 @@ TRACE_EVENT(intel_crtc_flip_done, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); @@ -107,7 +149,7 @@ TRACE_EVENT(intel_pipe_crc, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __array(u32, crcs, 5) @@ -115,14 +157,14 @@ TRACE_EVENT(intel_pipe_crc, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], __entry->crcs[3], @@ -130,62 +172,62 @@ TRACE_EVENT(intel_pipe_crc, ); TRACE_EVENT(intel_cpu_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_display *display, enum pipe pipe), + TP_ARGS(display, pipe), TP_STRUCT__entry( - __string(dev, __dev_name_i915(dev_priv)) - __field(enum pipe, pipe) + __string(dev, __dev_name_display(display)) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); __assign_str(dev); - __entry->pipe = pipe; + __entry->pipe_name = pipe_name(pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); TRACE_EVENT(intel_pch_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), - TP_ARGS(dev_priv, pch_transcoder), + TP_PROTO(struct intel_display *display, enum pipe pch_transcoder), + TP_ARGS(display, pch_transcoder), TP_STRUCT__entry( - __string(dev, __dev_name_i915(dev_priv)) - __field(enum pipe, pipe) + __string(dev, __dev_name_display(display)) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( enum pipe pipe = pch_transcoder; - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); __assign_str(dev); - __entry->pipe = pipe; + __entry->pipe_name = pipe_name(pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pch transcoder %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); TRACE_EVENT(intel_memory_cxsr, - TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), - TP_ARGS(dev_priv, old, new), + TP_PROTO(struct intel_display *display, bool old, bool new), + TP_ARGS(display, old, new), TP_STRUCT__entry( - __string(dev, __dev_name_i915(dev_priv)) - __array(u32, frame, 3) - __array(u32, scanline, 3) + __string(dev, __dev_name_display(display)) + __array(u32, frame, I915_MAX_PIPES) + __array(u32, scanline, I915_MAX_PIPES) __field(bool, old) __field(bool, new) ), @@ -193,7 +235,11 @@ TRACE_EVENT(intel_memory_cxsr, TP_fast_assign( struct intel_crtc *crtc; __assign_str(dev); - for_each_intel_crtc(&dev_priv->drm, crtc) { + memset(__entry->frame, 0, + sizeof(__entry->frame[0]) * I915_MAX_PIPES); + memset(__entry->scanline, 0, + sizeof(__entry->scanline[0]) * I915_MAX_PIPES); + for_each_intel_crtc(display->drm, crtc) { __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); } @@ -201,11 +247,9 @@ TRACE_EVENT(intel_memory_cxsr, __entry->new = new; ), - TP_printk("dev %s, cxsr %s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + TP_printk("dev %s, cxsr %s->%s, " _PIPES_FRAME_AND_SCANLINE_FMT, __get_str(dev), str_on_off(__entry->old), str_on_off(__entry->new), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) + _PIPES_FRAME_AND_SCANLINE_VALUES) ); TRACE_EVENT(g4x_wm, @@ -214,7 +258,7 @@ TRACE_EVENT(g4x_wm, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u16, primary) @@ -233,7 +277,7 @@ TRACE_EVENT(g4x_wm, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; @@ -251,7 +295,7 @@ TRACE_EVENT(g4x_wm, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->primary, __entry->sprite, __entry->cursor, str_yes_no(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, @@ -265,7 +309,7 @@ TRACE_EVENT(vlv_wm, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, level) @@ -280,7 +324,7 @@ TRACE_EVENT(vlv_wm, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->level = wm->level; @@ -294,7 +338,7 @@ TRACE_EVENT(vlv_wm, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->level, __entry->cxsr, __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, @@ -307,7 +351,7 @@ TRACE_EVENT(vlv_fifo_size, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, sprite0_start) @@ -317,7 +361,7 @@ TRACE_EVENT(vlv_fifo_size, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->sprite0_start = sprite0_start; @@ -326,7 +370,7 @@ TRACE_EVENT(vlv_fifo_size, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, %d/%d/%d", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->sprite0_start, __entry->sprite1_start, __entry->fifo_size) ); @@ -337,7 +381,7 @@ TRACE_EVENT(intel_plane_async_flip, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(bool, async_flip) @@ -347,14 +391,14 @@ TRACE_EVENT(intel_plane_async_flip, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->async_flip = async_flip; ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, async_flip=%s", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline, str_yes_no(__entry->async_flip)) ); @@ -364,7 +408,7 @@ TRACE_EVENT(intel_plane_update_noarm, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __array(int, src, 4) @@ -375,7 +419,7 @@ TRACE_EVENT(intel_plane_update_noarm, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src)); @@ -383,7 +427,7 @@ TRACE_EVENT(intel_plane_update_noarm, ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline, DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) @@ -395,7 +439,7 @@ TRACE_EVENT(intel_plane_update_arm, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __array(int, src, 4) @@ -406,7 +450,7 @@ TRACE_EVENT(intel_plane_update_arm, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src)); @@ -414,7 +458,7 @@ TRACE_EVENT(intel_plane_update_arm, ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline, DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) @@ -426,7 +470,7 @@ TRACE_EVENT(intel_plane_disable_arm, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __string(name, plane->base.name) @@ -435,13 +479,13 @@ TRACE_EVENT(intel_plane_disable_arm, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -452,23 +496,24 @@ TRACE_EVENT(intel_fbc_activate, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) __string(name, plane->base.name) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + struct intel_display *display = to_intel_display(plane->base.dev); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -479,23 +524,24 @@ TRACE_EVENT(intel_fbc_deactivate, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) __string(name, plane->base.name) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + struct intel_display *display = to_intel_display(plane->base.dev); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -506,23 +552,24 @@ TRACE_EVENT(intel_fbc_nuke, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) __string(name, plane->base.name) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + struct intel_display *display = to_intel_display(plane->base.dev); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -532,20 +579,20 @@ TRACE_EVENT(intel_crtc_vblank_work_start, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); @@ -555,20 +602,20 @@ TRACE_EVENT(intel_crtc_vblank_work_end, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); @@ -578,7 +625,7 @@ TRACE_EVENT(intel_pipe_update_start, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, min) @@ -587,7 +634,7 @@ TRACE_EVENT(intel_pipe_update_start, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; @@ -595,7 +642,7 @@ TRACE_EVENT(intel_pipe_update_start, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->min, __entry->max) ); @@ -606,7 +653,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, min) @@ -615,7 +662,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = crtc->debug.start_vbl_count; __entry->scanline = crtc->debug.scanline_start; __entry->min = crtc->debug.min_vbl; @@ -623,7 +670,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->min, __entry->max) ); @@ -634,30 +681,30 @@ TRACE_EVENT(intel_pipe_update_end, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = frame; __entry->scanline = scanline_end; ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); TRACE_EVENT(intel_frontbuffer_invalidate, - TP_PROTO(struct drm_i915_private *i915, + TP_PROTO(struct intel_display *display, unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(i915, frontbuffer_bits, origin), + TP_ARGS(display, frontbuffer_bits, origin), TP_STRUCT__entry( - __string(dev, __dev_name_i915(i915)) + __string(dev, __dev_name_display(display)) __field(unsigned int, frontbuffer_bits) __field(unsigned int, origin) ), @@ -673,12 +720,12 @@ TRACE_EVENT(intel_frontbuffer_invalidate, ); TRACE_EVENT(intel_frontbuffer_flush, - TP_PROTO(struct drm_i915_private *i915, + TP_PROTO(struct intel_display *display, unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(i915, frontbuffer_bits, origin), + TP_ARGS(display, frontbuffer_bits, origin), TP_STRUCT__entry( - __string(dev, __dev_name_i915(i915)) + __string(dev, __dev_name_display(display)) __field(unsigned int, frontbuffer_bits) __field(unsigned int, origin) ), diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index f29e5dc3db91..ff6eb93337e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -26,10 +26,8 @@ #ifndef __INTEL_DISPLAY_TYPES_H__ #define __INTEL_DISPLAY_TYPES_H__ -#include <linux/i2c.h> #include <linux/pm_qos.h> #include <linux/pwm.h> -#include <linux/sched/clock.h> #include <drm/display/drm_dp_dual_mode_helper.h> #include <drm/display/drm_dp_mst_helper.h> @@ -38,16 +36,11 @@ #include <drm/drm_atomic.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> -#include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> -#include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> -#include <drm/drm_vblank.h> #include <drm/drm_vblank_work.h> #include <drm/intel/i915_hdcp_interface.h> -#include <media/cec-notifier.h> -#include "gem/i915_gem_object_types.h" /* for to_intel_bo() */ #include "i915_vma.h" #include "i915_vma_types.h" #include "intel_bios.h" @@ -57,11 +50,13 @@ #include "intel_dpll_mgr.h" #include "intel_wm_types.h" +struct cec_notifier; struct drm_printer; struct __intel_global_objs_state; +struct intel_connector; struct intel_ddi_buf_trans; struct intel_fbc; -struct intel_connector; +struct intel_hdcp_shim; struct intel_tc_port; /* @@ -430,128 +425,6 @@ struct intel_panel { struct intel_digital_port; -enum check_link_response { - HDCP_LINK_PROTECTED = 0, - HDCP_TOPOLOGY_CHANGE, - HDCP_LINK_INTEGRITY_FAILURE, - HDCP_REAUTH_REQUEST -}; - -/* - * This structure serves as a translation layer between the generic HDCP code - * and the bus-specific code. What that means is that HDCP over HDMI differs - * from HDCP over DP, so to account for these differences, we need to - * communicate with the receiver through this shim. - * - * For completeness, the 2 buses differ in the following ways: - * - DP AUX vs. DDC - * HDCP registers on the receiver are set via DP AUX for DP, and - * they are set via DDC for HDMI. - * - Receiver register offsets - * The offsets of the registers are different for DP vs. HDMI - * - Receiver register masks/offsets - * For instance, the ready bit for the KSV fifo is in a different - * place on DP vs HDMI - * - Receiver register names - * Seriously. In the DP spec, the 16-bit register containing - * downstream information is called BINFO, on HDMI it's called - * BSTATUS. To confuse matters further, DP has a BSTATUS register - * with a completely different definition. - * - KSV FIFO - * On HDMI, the ksv fifo is read all at once, whereas on DP it must - * be read 3 keys at a time - * - Aksv output - * Since Aksv is hidden in hardware, there's different procedures - * to send it over DP AUX vs DDC - */ -struct intel_hdcp_shim { - /* Outputs the transmitter's An and Aksv values to the receiver. */ - int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an); - - /* Reads the receiver's key selection vector */ - int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv); - - /* - * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The - * definitions are the same in the respective specs, but the names are - * different. Call it BSTATUS since that's the name the HDMI spec - * uses and it was there first. - */ - int (*read_bstatus)(struct intel_digital_port *dig_port, - u8 *bstatus); - - /* Determines whether a repeater is present downstream */ - int (*repeater_present)(struct intel_digital_port *dig_port, - bool *repeater_present); - - /* Reads the receiver's Ri' value */ - int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri); - - /* Determines if the receiver's KSV FIFO is ready for consumption */ - int (*read_ksv_ready)(struct intel_digital_port *dig_port, - bool *ksv_ready); - - /* Reads the ksv fifo for num_downstream devices */ - int (*read_ksv_fifo)(struct intel_digital_port *dig_port, - int num_downstream, u8 *ksv_fifo); - - /* Reads a 32-bit part of V' from the receiver */ - int (*read_v_prime_part)(struct intel_digital_port *dig_port, - int i, u32 *part); - - /* Enables HDCP signalling on the port */ - int (*toggle_signalling)(struct intel_digital_port *dig_port, - enum transcoder cpu_transcoder, - bool enable); - - /* Enable/Disable stream encryption on DP MST Transport Link */ - int (*stream_encryption)(struct intel_connector *connector, - bool enable); - - /* Ensures the link is still protected */ - bool (*check_link)(struct intel_digital_port *dig_port, - struct intel_connector *connector); - - /* Detects panel's hdcp capability. This is optional for HDMI. */ - int (*hdcp_get_capability)(struct intel_digital_port *dig_port, - bool *hdcp_capable); - - /* HDCP adaptation(DP/HDMI) required on the port */ - enum hdcp_wired_protocol protocol; - - /* Detects whether sink is HDCP2.2 capable */ - int (*hdcp_2_2_get_capability)(struct intel_connector *connector, - bool *capable); - - /* Write HDCP2.2 messages */ - int (*write_2_2_msg)(struct intel_connector *connector, - void *buf, size_t size); - - /* Read HDCP2.2 messages */ - int (*read_2_2_msg)(struct intel_connector *connector, - u8 msg_id, void *buf, size_t size); - - /* - * Implementation of DP HDCP2.2 Errata for the communication of stream - * type to Receivers. In DP HDCP2.2 Stream type is one of the input to - * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. - */ - int (*config_stream_type)(struct intel_connector *connector, - bool is_repeater, u8 type); - - /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */ - int (*stream_2_2_encryption)(struct intel_connector *connector, - bool enable); - - /* HDCP2.2 Link Integrity Check */ - int (*check_2_2_link)(struct intel_digital_port *dig_port, - struct intel_connector *connector); - - /* HDCP remote sink cap */ - int (*get_remote_hdcp_capability)(struct intel_connector *connector, - bool *hdcp_capable, bool *hdcp2_capable); -}; - struct intel_hdcp { const struct intel_hdcp_shim *shim; /* Mutex for hdcp state of the connector */ @@ -651,7 +524,7 @@ struct intel_connector { struct intel_dp *mst_port; - bool force_bigjoiner_enable; + int force_joined_pipes; struct { struct drm_dp_aux *dsc_decompression_aux; @@ -1036,6 +909,10 @@ struct intel_csc_matrix { u16 postoff[3]; }; +void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val); + +typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val); + struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -1270,9 +1147,6 @@ struct intel_crtc_state { /* w/a for waiting 2 vblanks during crtc enable */ enum pipe hsw_workaround_pipe; - /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ - bool disable_lp_wm; - struct intel_crtc_wm_state wm; int min_cdclk[I915_MAX_PLANES]; @@ -1396,8 +1270,9 @@ struct intel_crtc_state { /* Only valid on TGL+ */ enum transcoder mst_master_transcoder; - /* For DSB based color LUT updates */ - struct intel_dsb *dsb_color_vblank, *dsb_color_commit; + /* For DSB based pipe updates */ + struct intel_dsb *dsb_color_vblank, *dsb_commit; + bool use_dsb; u32 psr2_man_track_ctl; @@ -1488,6 +1363,8 @@ struct intel_crtc { /* armed event for async flip */ struct drm_pending_vblank_event *flip_done_event; + /* armed event for DSB based updates */ + struct drm_pending_vblank_event *dsb_event; /* Access to these should be protected by dev_priv->irq_lock. */ bool cpu_fifo_underrun_disabled; @@ -1540,6 +1417,8 @@ struct intel_crtc { #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc; #endif + + bool block_dc_for_vblank; }; struct intel_plane { @@ -1578,22 +1457,26 @@ struct intel_plane { u32 pixel_format, u64 modifier, unsigned int rotation); /* Write all non-self arming plane registers */ - void (*update_noarm)(struct intel_plane *plane, + void (*update_noarm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); /* Write all self-arming plane registers */ - void (*update_arm)(struct intel_plane *plane, + void (*update_arm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); /* Disable the plane, must arm */ - void (*disable_arm)(struct intel_plane *plane, + void (*disable_arm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); int (*min_cdclk)(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*async_flip)(struct intel_plane *plane, + void (*async_flip)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip); @@ -1601,14 +1484,6 @@ struct intel_plane { void (*disable_flip_done)(struct intel_plane *plane); }; -struct intel_watermark_params { - u16 fifo_size; - u16 max_wm; - u8 default_wm; - u8 guard_size; - u8 cacheline_size; -}; - #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) @@ -1622,8 +1497,6 @@ struct intel_watermark_params { #define to_intel_framebuffer(fb) \ container_of_const((fb), struct intel_framebuffer, base) -#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL) - struct intel_hdmi { i915_reg_t hdmi_reg; struct { @@ -1676,7 +1549,7 @@ struct intel_pps { * Pipe whose power sequencer is currently locked into * this port. Only relevant on VLV/CHV. */ - enum pipe pps_pipe; + enum pipe vlv_pps_pipe; /* * Power sequencer index. Only relevant on BXT+. @@ -1689,12 +1562,12 @@ struct intel_pps { * the use of the PPS for any pipe currentrly driving * external DP as that will mess things up on VLV. */ - enum pipe active_pipe; + enum pipe vlv_active_pipe; /* * Set if the sequencer may be reset due to a power transition, * requiring a reinitialization. Only relevant on BXT+. */ - bool pps_reset; + bool bxt_pps_reset; struct edp_power_seq pps_delays; struct edp_power_seq bios_pps_delays; }; @@ -1745,6 +1618,8 @@ struct intel_psr { u32 dc3co_exit_delay; struct delayed_work dc3co_work; u8 entry_setup_frames; + + bool link_ok; }; struct intel_dp { @@ -1892,6 +1767,7 @@ struct intel_dp { /* When we last wrote the OUI for eDP */ unsigned long last_oui_write; + bool oui_valid; bool colorimetry_support; @@ -2050,7 +1926,10 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector) { - return enc_to_intel_dp(intel_attached_encoder(connector)); + if (connector->mst_port) + return connector->mst_port; + else + return enc_to_intel_dp(intel_attached_encoder(connector)); } static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) @@ -2228,6 +2107,10 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) __drm_device_to_intel_display((p)->base.dev) #define __intel_hdmi_to_intel_display(p) \ __drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev) +#define __intel_plane_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.dev) +#define __intel_plane_state_to_intel_display(p) \ + __drm_device_to_intel_display((p)->uapi.plane->dev) /* Helper for generic association. Map types to conversion functions/macros. */ #define __assoc(type, p) \ @@ -2246,6 +2129,8 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) __assoc(intel_digital_port, p), \ __assoc(intel_dp, p), \ __assoc(intel_encoder, p), \ - __assoc(intel_hdmi, p)) + __assoc(intel_hdmi, p), \ + __assoc(intel_plane, p), \ + __assoc(intel_plane_state, p)) #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 7c756d5ba2a2..87bdacfd9edf 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -52,7 +52,7 @@ enum intel_dmc_id { }; struct intel_dmc { - struct drm_i915_private *i915; + struct intel_display *display; struct work_struct work; const char *fw_path; u32 max_fw_size; /* bytes */ @@ -70,21 +70,21 @@ struct intel_dmc { }; /* Note: This may be NULL. */ -static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) +static struct intel_dmc *display_to_dmc(struct intel_display *display) { - return i915->display.dmc.dmc; + return display->dmc.dmc; } -static const char *dmc_firmware_param(struct drm_i915_private *i915) +static const char *dmc_firmware_param(struct intel_display *display) { - const char *p = i915->display.params.dmc_firmware_path; + const char *p = display->params.dmc_firmware_path; return p && *p ? p : NULL; } -static bool dmc_firmware_param_disabled(struct drm_i915_private *i915) +static bool dmc_firmware_param_disabled(struct intel_display *display) { - const char *p = dmc_firmware_param(i915); + const char *p = dmc_firmware_param(display); /* Magic path to indicate disabled */ return p && !strcmp(p, "/dev/null"); @@ -113,6 +113,9 @@ static bool dmc_firmware_param_disabled(struct drm_i915_private *i915) #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd) +MODULE_FIRMWARE(XE3LPD_DMC_PATH); + #define XE2LPD_DMC_PATH DMC_PATH(xe2lpd) MODULE_FIRMWARE(XE2LPD_DMC_PATH); @@ -162,18 +165,22 @@ MODULE_FIRMWARE(SKL_DMC_PATH); #define BXT_DMC_MAX_FW_SIZE 0x3000 MODULE_FIRMWARE(BXT_DMC_PATH); -static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size) +static const char *dmc_firmware_default(struct intel_display *display, u32 *size) { + struct drm_i915_private *i915 = to_i915(display->drm); const char *fw_path = NULL; u32 max_fw_size = 0; - if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) { + if (DISPLAY_VERx100(display) == 3000) { + fw_path = XE3LPD_DMC_PATH; + max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; + } else if (DISPLAY_VERx100(display) == 2000) { fw_path = XE2LPD_DMC_PATH; max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) { + } else if (DISPLAY_VERx100(display) == 1401) { fw_path = BMG_DMC_PATH; max_fw_size = XELPDP_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { + } else if (DISPLAY_VERx100(display) == 1400) { fw_path = MTL_DMC_PATH; max_fw_size = XELPDP_DMC_MAX_FW_SIZE; } else if (IS_DG2(i915)) { @@ -194,7 +201,7 @@ static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size } else if (IS_TIGERLAKE(i915)) { fw_path = TGL_DMC_PATH; max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER(i915) == 11) { + } else if (DISPLAY_VER(display) == 11) { fw_path = ICL_DMC_PATH; max_fw_size = ICL_DMC_MAX_FW_SIZE; } else if (IS_GEMINILAKE(i915)) { @@ -375,70 +382,70 @@ static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; } -static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) +static bool has_dmc_id_fw(struct intel_display *display, enum intel_dmc_id dmc_id) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); return dmc && dmc->dmc_info[dmc_id].payload; } -bool intel_dmc_has_payload(struct drm_i915_private *i915) +bool intel_dmc_has_payload(struct intel_display *display) { - return has_dmc_id_fw(i915, DMC_FW_MAIN); + return has_dmc_id_fw(display, DMC_FW_MAIN); } static const struct stepping_info * -intel_get_stepping_info(struct drm_i915_private *i915, +intel_get_stepping_info(struct intel_display *display, struct stepping_info *si) { - const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(i915)); + const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(display)); si->stepping = step_name[0]; si->substepping = step_name[1]; return si; } -static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) +static void gen9_set_dc_state_debugmask(struct intel_display *display) { /* The below bit doesn't need to be cleared ever afterwards */ - intel_de_rmw(i915, DC_STATE_DEBUG, 0, + intel_de_rmw(display, DC_STATE_DEBUG, 0, DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); - intel_de_posting_read(i915, DC_STATE_DEBUG); + intel_de_posting_read(display, DC_STATE_DEBUG); } -static void disable_event_handler(struct drm_i915_private *i915, +static void disable_event_handler(struct intel_display *display, i915_reg_t ctl_reg, i915_reg_t htp_reg) { - intel_de_write(i915, ctl_reg, + intel_de_write(display, ctl_reg, REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, DMC_EVT_CTL_TYPE_EDGE_0_1) | REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, DMC_EVT_CTL_EVENT_ID_FALSE)); - intel_de_write(i915, htp_reg, 0); + intel_de_write(display, htp_reg, 0); } -static void disable_all_event_handlers(struct drm_i915_private *i915) +static void disable_all_event_handlers(struct intel_display *display) { enum intel_dmc_id dmc_id; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ - if (DISPLAY_VER(i915) < 12) + if (DISPLAY_VER(display) < 12) return; for_each_dmc_id(dmc_id) { int handler; - if (!has_dmc_id_fw(i915, dmc_id)) + if (!has_dmc_id_fw(display, dmc_id)) continue; for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) - disable_event_handler(i915, - DMC_EVT_CTL(i915, dmc_id, handler), - DMC_EVT_HTP(i915, dmc_id, handler)); + disable_event_handler(display, + DMC_EVT_CTL(display, dmc_id, handler), + DMC_EVT_HTP(display, dmc_id, handler)); } } -static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable) { enum pipe pipe; @@ -451,84 +458,86 @@ static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool ena */ if (enable) for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) - intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe), 0, PIPEDMC_GATING_DIS); else for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) - intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe), PIPEDMC_GATING_DIS, 0); } -static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) +static void mtl_pipedmc_clock_gating_wa(struct intel_display *display) { /* * Wa_16015201720 * The WA requires clock gating to be disabled all the time * for pipe A and B. */ - intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, + intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); } -static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable) { - if (DISPLAY_VER(i915) >= 14 && enable) - mtl_pipedmc_clock_gating_wa(i915); - else if (DISPLAY_VER(i915) == 13) - adlp_pipedmc_clock_gating_wa(i915, enable); + if (DISPLAY_VER(display) >= 14 && enable) + mtl_pipedmc_clock_gating_wa(display); + else if (DISPLAY_VER(display) == 13) + adlp_pipedmc_clock_gating_wa(display, enable); } -void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) +void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) return; - if (DISPLAY_VER(i915) >= 14) - intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); else - intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); } -void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) +void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) return; - if (DISPLAY_VER(i915) >= 14) - intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); else - intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); } -static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915, +static bool is_dmc_evt_ctl_reg(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg) { u32 offset = i915_mmio_reg_offset(reg); - u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0)); - u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); + u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)); + u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); return offset >= start && offset < end; } -static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915, +static bool is_dmc_evt_htp_reg(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg) { u32 offset = i915_mmio_reg_offset(reg); - u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0)); - u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); + u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)); + u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); return offset >= start && offset < end; } -static bool disable_dmc_evt(struct drm_i915_private *i915, +static bool disable_dmc_evt(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg, u32 data) { - if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg)) + struct drm_i915_private *i915 = to_i915(display->drm); + + if (!is_dmc_evt_ctl_reg(display, dmc_id, reg)) return false; /* keep all pipe DMC events disabled by default */ @@ -548,11 +557,11 @@ static bool disable_dmc_evt(struct drm_i915_private *i915, return false; } -static u32 dmc_mmiodata(struct drm_i915_private *i915, +static u32 dmc_mmiodata(struct intel_display *display, struct intel_dmc *dmc, enum intel_dmc_id dmc_id, int i) { - if (disable_dmc_evt(i915, dmc_id, + if (disable_dmc_evt(display, dmc_id, dmc->dmc_info[dmc_id].mmioaddr[i], dmc->dmc_info[dmc_id].mmiodata[i])) return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, @@ -565,25 +574,26 @@ static u32 dmc_mmiodata(struct drm_i915_private *i915, /** * intel_dmc_load_program() - write the firmware from memory to register. - * @i915: i915 drm device. + * @display: display instance * * DMC firmware is read from a .bin file and kept in internal memory one time. * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -void intel_dmc_load_program(struct drm_i915_private *i915) +void intel_dmc_load_program(struct intel_display *display) { - struct i915_power_domains *power_domains = &i915->display.power.domains; - struct intel_dmc *dmc = i915_to_dmc(i915); + struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm); + struct i915_power_domains *power_domains = &display->power.domains; + struct intel_dmc *dmc = display_to_dmc(display); enum intel_dmc_id dmc_id; u32 i; - if (!intel_dmc_has_payload(i915)) + if (!intel_dmc_has_payload(display)) return; - pipedmc_clock_gating_wa(i915, true); + pipedmc_clock_gating_wa(display, true); - disable_all_event_handlers(i915); + disable_all_event_handlers(display); assert_rpm_wakelock_held(&i915->runtime_pm); @@ -591,7 +601,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915) for_each_dmc_id(dmc_id) { for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { - intel_de_write_fw(i915, + intel_de_write_fw(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), dmc->dmc_info[dmc_id].payload[i]); } @@ -601,48 +611,48 @@ void intel_dmc_load_program(struct drm_i915_private *i915) for_each_dmc_id(dmc_id) { for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { - intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], - dmc_mmiodata(i915, dmc, dmc_id, i)); + intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i], + dmc_mmiodata(display, dmc, dmc_id, i)); } } power_domains->dc_state = 0; - gen9_set_dc_state_debugmask(i915); + gen9_set_dc_state_debugmask(display); - pipedmc_clock_gating_wa(i915, false); + pipedmc_clock_gating_wa(display, false); } /** * intel_dmc_disable_program() - disable the firmware - * @i915: i915 drm device + * @display: display instance * * Disable all event handlers in the firmware, making sure the firmware is * inactive after the display is uninitialized. */ -void intel_dmc_disable_program(struct drm_i915_private *i915) +void intel_dmc_disable_program(struct intel_display *display) { - if (!intel_dmc_has_payload(i915)) + if (!intel_dmc_has_payload(display)) return; - pipedmc_clock_gating_wa(i915, true); - disable_all_event_handlers(i915); - pipedmc_clock_gating_wa(i915, false); + pipedmc_clock_gating_wa(display, true); + disable_all_event_handlers(display); + pipedmc_clock_gating_wa(display, false); - intel_dmc_wl_disable(&i915->display); + intel_dmc_wl_disable(display); } -void assert_dmc_loaded(struct drm_i915_private *i915) +void assert_dmc_loaded(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); - drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); - drm_WARN_ONCE(&i915->drm, dmc && - !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n"); + drm_WARN_ONCE(display->drm, dmc && + !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); - drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), + drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); - drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL), + drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL), "DMC HTP Not fine\n"); } @@ -673,7 +683,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, const struct stepping_info *si, u8 package_ver) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; enum intel_dmc_id dmc_id; unsigned int i; @@ -681,7 +691,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; if (!is_valid_dmc_id(dmc_id)) { - drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); + drm_dbg(display->drm, "Unsupported firmware id: %u\n", dmc_id); continue; } @@ -703,7 +713,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, int header_ver, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; u32 start_range, end_range; int i; @@ -713,14 +723,14 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, } else if (dmc_id == DMC_FW_MAIN) { start_range = TGL_MAIN_MMIO_START; end_range = TGL_MAIN_MMIO_END; - } else if (DISPLAY_VER(i915) >= 13) { + } else if (DISPLAY_VER(display) >= 13) { start_range = ADLP_PIPE_MMIO_START; end_range = ADLP_PIPE_MMIO_END; - } else if (DISPLAY_VER(i915) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { start_range = TGL_PIPE_MMIO_START(dmc_id); end_range = TGL_PIPE_MMIO_END(dmc_id); } else { - drm_warn(&i915->drm, "Unknown mmio range for sanity check"); + drm_warn(display->drm, "Unknown mmio range for sanity check"); return false; } @@ -736,7 +746,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; @@ -784,39 +794,39 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, start_mmioaddr = DMC_V1_MMIO_START_RANGE; dmc_header_size = sizeof(*v1); } else { - drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", + drm_err(display->drm, "Unknown DMC fw header version: %u\n", dmc_header->header_ver); return 0; } if (header_len_bytes != dmc_header_size) { - drm_err(&i915->drm, "DMC firmware has wrong dmc header length " + drm_err(display->drm, "DMC firmware has wrong dmc header length " "(%u bytes)\n", header_len_bytes); return 0; } /* Cache the dmc header info. */ if (mmio_count > mmio_count_max) { - drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); + drm_err(display->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); return 0; } if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, dmc_header->header_ver, dmc_id)) { - drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); + drm_err(display->drm, "DMC firmware has Wrong MMIO Addresses\n"); return 0; } - drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id); + drm_dbg_kms(display->drm, "DMC %d:\n", dmc_id); for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; - drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", + drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", i, mmioaddr[i], mmiodata[i], - is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : - is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", - disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i], + is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : + is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", + disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i], dmc_info->mmiodata[i]) ? " (disabling)" : ""); } dmc_info->mmio_count = mmio_count; @@ -830,7 +840,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, goto error_truncated; if (payload_size > dmc->max_fw_size) { - drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); + drm_err(display->drm, "DMC FW too big (%u bytes)\n", payload_size); return 0; } dmc_info->dmc_fw_size = dmc_header->fw_size; @@ -845,7 +855,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, return header_len_bytes + payload_size; error_truncated: - drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); + drm_err(display->drm, "Truncated DMC firmware, refusing.\n"); return 0; } @@ -855,7 +865,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, const struct stepping_info *si, size_t rem_size) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; @@ -868,7 +878,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, } else if (package_header->header_ver == 2) { max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES; } else { - drm_err(&i915->drm, "DMC firmware has unknown header version %u\n", + drm_err(display->drm, "DMC firmware has unknown header version %u\n", package_header->header_ver); return 0; } @@ -882,7 +892,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, goto error_truncated; if (package_header->header_len * 4 != package_size) { - drm_err(&i915->drm, "DMC firmware has wrong package header length " + drm_err(display->drm, "DMC firmware has wrong package header length " "(%u bytes)\n", package_size); return 0; } @@ -900,7 +910,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, return package_size; error_truncated: - drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); + drm_err(display->drm, "Truncated DMC firmware, refusing.\n"); return 0; } @@ -909,16 +919,16 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; if (rem_size < sizeof(struct intel_css_header)) { - drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); + drm_err(display->drm, "Truncated DMC firmware, refusing.\n"); return 0; } if (sizeof(struct intel_css_header) != (css_header->header_len * 4)) { - drm_err(&i915->drm, "DMC firmware has wrong CSS header length " + drm_err(display->drm, "DMC firmware has wrong CSS header length " "(%u bytes)\n", (css_header->header_len * 4)); return 0; @@ -931,12 +941,12 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; struct stepping_info display_info = { '*', '*'}; - const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); + const struct stepping_info *si = intel_get_stepping_info(display, &display_info); enum intel_dmc_id dmc_id; u32 readcount = 0; u32 r, offset; @@ -966,7 +976,7 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; if (offset > fw->size) { - drm_err(&i915->drm, "Reading beyond the fw_size\n"); + drm_err(display->drm, "Reading beyond the fw_size\n"); continue; } @@ -974,30 +984,35 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); } - if (!intel_dmc_has_payload(i915)) { - drm_err(&i915->drm, "DMC firmware main program not found\n"); + if (!intel_dmc_has_payload(display)) { + drm_err(display->drm, "DMC firmware main program not found\n"); return -ENOENT; } return 0; } -static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) +static void intel_dmc_runtime_pm_get(struct intel_display *display) { - drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); - i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); + struct drm_i915_private *i915 = to_i915(display->drm); + + drm_WARN_ON(display->drm, display->dmc.wakeref); + display->dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } -static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) +static void intel_dmc_runtime_pm_put(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->display.dmc.wakeref); + fetch_and_zero(&display->dmc.wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); } -static const char *dmc_fallback_path(struct drm_i915_private *i915) +static const char *dmc_fallback_path(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + if (IS_ALDERLAKE_P(i915)) return ADLP_DMC_FALLBACK_PATH; @@ -1007,45 +1022,45 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915) static void dmc_load_work_fn(struct work_struct *work) { struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; const struct firmware *fw = NULL; const char *fallback_path; int err; - err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); + err = request_firmware(&fw, dmc->fw_path, display->drm->dev); - if (err == -ENOENT && !dmc_firmware_param(i915)) { - fallback_path = dmc_fallback_path(i915); + if (err == -ENOENT && !dmc_firmware_param(display)) { + fallback_path = dmc_fallback_path(display); if (fallback_path) { - drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", + drm_dbg_kms(display->drm, "%s not found, falling back to %s\n", dmc->fw_path, fallback_path); - err = request_firmware(&fw, fallback_path, i915->drm.dev); + err = request_firmware(&fw, fallback_path, display->drm->dev); if (err == 0) dmc->fw_path = fallback_path; } } if (err) { - drm_notice(&i915->drm, + drm_notice(display->drm, "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n", dmc->fw_path, ERR_PTR(err)); - drm_notice(&i915->drm, "DMC firmware homepage: %s", + drm_notice(display->drm, "DMC firmware homepage: %s", INTEL_DMC_FIRMWARE_URL); return; } err = parse_dmc_fw(dmc, fw); if (err) { - drm_notice(&i915->drm, + drm_notice(display->drm, "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n", dmc->fw_path, ERR_PTR(err)); goto out; } - intel_dmc_load_program(i915); - intel_dmc_runtime_pm_put(i915); + intel_dmc_load_program(display); + intel_dmc_runtime_pm_put(display); - drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", + drm_info(display->drm, "Finished loading DMC firmware %s (v%u.%u)\n", dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); @@ -1055,16 +1070,17 @@ out: /** * intel_dmc_init() - initialize the firmware loading. - * @i915: i915 drm device. + * @display: display instance * * This function is called at the time of loading the display driver to read * firmware from a .bin file and copied into a internal memory. */ -void intel_dmc_init(struct drm_i915_private *i915) +void intel_dmc_init(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc *dmc; - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; /* @@ -1075,35 +1091,35 @@ void intel_dmc_init(struct drm_i915_private *i915) * suspend as runtime suspend *requires* a working DMC for whatever * reason. */ - intel_dmc_runtime_pm_get(i915); + intel_dmc_runtime_pm_get(display); dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); if (!dmc) return; - dmc->i915 = i915; + dmc->display = display; INIT_WORK(&dmc->work, dmc_load_work_fn); - dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size); + dmc->fw_path = dmc_firmware_default(display, &dmc->max_fw_size); - if (dmc_firmware_param_disabled(i915)) { - drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); + if (dmc_firmware_param_disabled(display)) { + drm_info(display->drm, "Disabling DMC firmware and runtime PM\n"); goto out; } - if (dmc_firmware_param(i915)) - dmc->fw_path = dmc_firmware_param(i915); + if (dmc_firmware_param(display)) + dmc->fw_path = dmc_firmware_param(display); if (!dmc->fw_path) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "No known DMC firmware for platform, disabling runtime PM\n"); goto out; } - i915->display.dmc.dmc = dmc; + display->dmc.dmc = dmc; - drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); + drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path); queue_work(i915->unordered_wq, &dmc->work); return; @@ -1114,129 +1130,152 @@ out: /** * intel_dmc_suspend() - prepare DMC firmware before system suspend - * @i915: i915 drm device + * @display: display instance * * Prepare the DMC firmware before entering system suspend. This includes * flushing pending work items and releasing any resources acquired during * init. */ -void intel_dmc_suspend(struct drm_i915_private *i915) +void intel_dmc_suspend(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; if (dmc) flush_work(&dmc->work); - intel_dmc_wl_disable(&i915->display); + intel_dmc_wl_disable(display); /* Drop the reference held in case DMC isn't loaded. */ - if (!intel_dmc_has_payload(i915)) - intel_dmc_runtime_pm_put(i915); + if (!intel_dmc_has_payload(display)) + intel_dmc_runtime_pm_put(display); } /** * intel_dmc_resume() - init DMC firmware during system resume - * @i915: i915 drm device + * @display: display instance * * Reinitialize the DMC firmware during system resume, reacquiring any * resources released in intel_dmc_suspend(). */ -void intel_dmc_resume(struct drm_i915_private *i915) +void intel_dmc_resume(struct intel_display *display) { - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; /* * Reacquire the reference to keep RPM disabled in case DMC isn't * loaded. */ - if (!intel_dmc_has_payload(i915)) - intel_dmc_runtime_pm_get(i915); + if (!intel_dmc_has_payload(display)) + intel_dmc_runtime_pm_get(display); } /** * intel_dmc_fini() - unload the DMC firmware. - * @i915: i915 drm device. + * @display: display instance * * Firmmware unloading includes freeing the internal memory and reset the * firmware loading status. */ -void intel_dmc_fini(struct drm_i915_private *i915) +void intel_dmc_fini(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); enum intel_dmc_id dmc_id; - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; - intel_dmc_suspend(i915); - drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); + intel_dmc_suspend(display); + drm_WARN_ON(display->drm, display->dmc.wakeref); if (dmc) { for_each_dmc_id(dmc_id) kfree(dmc->dmc_info[dmc_id].payload); kfree(dmc); - i915->display.dmc.dmc = NULL; + display->dmc.dmc = NULL; } } -void intel_dmc_print_error_state(struct drm_printer *p, - struct drm_i915_private *i915) +struct intel_dmc_snapshot { + bool initialized; + bool loaded; + u32 version; +}; + +struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); + struct intel_dmc_snapshot *snapshot; - if (!HAS_DMC(i915)) - return; + if (!HAS_DMC(display)) + return NULL; + + snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); + if (!snapshot) + return NULL; - drm_printf(p, "DMC initialized: %s\n", str_yes_no(dmc)); - drm_printf(p, "DMC loaded: %s\n", - str_yes_no(intel_dmc_has_payload(i915))); + snapshot->initialized = dmc; + snapshot->loaded = intel_dmc_has_payload(display); if (dmc) + snapshot->version = dmc->version; + + return snapshot; +} + +void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p) +{ + if (!snapshot) + return; + + drm_printf(p, "DMC initialized: %s\n", str_yes_no(snapshot->initialized)); + drm_printf(p, "DMC loaded: %s\n", str_yes_no(snapshot->loaded)); + if (snapshot->initialized) drm_printf(p, "DMC fw version: %d.%d\n", - DMC_VERSION_MAJOR(dmc->version), - DMC_VERSION_MINOR(dmc->version)); + DMC_VERSION_MAJOR(snapshot->version), + DMC_VERSION_MINOR(snapshot->version)); } static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_display *display = m->private; + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_dmc *dmc = display_to_dmc(display); intel_wakeref_t wakeref; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return -ENODEV; wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", - str_yes_no(intel_dmc_has_payload(i915))); + str_yes_no(intel_dmc_has_payload(display))); seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); seq_printf(m, "Pipe A fw needed: %s\n", - str_yes_no(DISPLAY_VER(i915) >= 12)); + str_yes_no(DISPLAY_VER(display) >= 12)); seq_printf(m, "Pipe A fw loaded: %s\n", - str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); + str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA))); seq_printf(m, "Pipe B fw needed: %s\n", str_yes_no(IS_ALDERLAKE_P(i915) || - DISPLAY_VER(i915) >= 14)); + DISPLAY_VER(display) >= 14)); seq_printf(m, "Pipe B fw loaded: %s\n", - str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); + str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB))); - if (!intel_dmc_has_payload(i915)) + if (!intel_dmc_has_payload(display)) goto out; seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); - if (DISPLAY_VER(i915) >= 12) { + if (DISPLAY_VER(display) >= 12) { i915_reg_t dc3co_reg; - if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) { + if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) { dc3co_reg = DG1_DMC_DEBUG3; dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; } else { @@ -1246,7 +1285,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) } seq_printf(m, "DC3CO count: %d\n", - intel_de_read(i915, dc3co_reg)); + intel_de_read(display, dc3co_reg)); } else { dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT : SKL_DMC_DC3_DC5_COUNT; @@ -1254,18 +1293,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) dc6_reg = SKL_DMC_DC5_DC6_COUNT; } - seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg)); + seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg)); if (i915_mmio_reg_valid(dc6_reg)) seq_printf(m, "DC5 -> DC6 count: %d\n", - intel_de_read(i915, dc6_reg)); + intel_de_read(display, dc6_reg)); seq_printf(m, "program base: 0x%08x\n", - intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); + intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); out: seq_printf(m, "ssp base: 0x%08x\n", - intel_de_read(i915, DMC_SSP_BASE)); - seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); + intel_de_read(display, DMC_SSP_BASE)); + seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL)); intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -1274,10 +1313,10 @@ out: DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); -void intel_dmc_debugfs_register(struct drm_i915_private *i915) +void intel_dmc_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, - i915, &intel_dmc_debugfs_status_fops); + display, &intel_dmc_debugfs_status_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 54cff6002e31..44cecef98e73 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -9,22 +9,24 @@ #include <linux/types.h> enum pipe; -struct drm_i915_private; struct drm_printer; +struct intel_display; +struct intel_dmc_snapshot; -void intel_dmc_init(struct drm_i915_private *i915); -void intel_dmc_load_program(struct drm_i915_private *i915); -void intel_dmc_disable_program(struct drm_i915_private *i915); -void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe); -void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe); -void intel_dmc_fini(struct drm_i915_private *i915); -void intel_dmc_suspend(struct drm_i915_private *i915); -void intel_dmc_resume(struct drm_i915_private *i915); -bool intel_dmc_has_payload(struct drm_i915_private *i915); -void intel_dmc_debugfs_register(struct drm_i915_private *i915); -void intel_dmc_print_error_state(struct drm_printer *p, - struct drm_i915_private *i915); +void intel_dmc_init(struct intel_display *display); +void intel_dmc_load_program(struct intel_display *display); +void intel_dmc_disable_program(struct intel_display *display); +void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe); +void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe); +void intel_dmc_fini(struct intel_display *display); +void intel_dmc_suspend(struct intel_display *display); +void intel_dmc_resume(struct intel_display *display); +bool intel_dmc_has_payload(struct intel_display *display); +void intel_dmc_debugfs_register(struct intel_display *display); -void assert_dmc_loaded(struct drm_i915_private *i915); +struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display); +void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p); + +void assert_dmc_loaded(struct intel_display *display); #endif /* __INTEL_DMC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c index d9864b9cc429..5634ff07269d 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c @@ -109,10 +109,8 @@ static bool intel_dmc_wl_check_range(u32 address) static bool __intel_dmc_wl_supported(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(display) < 20 || - !intel_dmc_has_payload(i915) || + !intel_dmc_has_payload(display) || !display->params.enable_dmc_wl) return false; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 90fa73575feb..ff5ba7b3035f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -67,6 +67,7 @@ #include "intel_dp_hdcp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" @@ -82,8 +83,10 @@ #include "intel_modeset_lock.h" #include "intel_panel.h" #include "intel_pch_display.h" +#include "intel_pfit.h" #include "intel_pps.h" #include "intel_psr.h" +#include "intel_runtime_pm.h" #include "intel_quirks.h" #include "intel_tc.h" #include "intel_vdsc.h" @@ -103,13 +106,6 @@ /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 -/* Compliance test status bits */ -#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 -#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) -#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) -#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) - - /* Constants for DP DSC configurations */ static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; @@ -501,7 +497,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp) if (intel_encoder_is_c10phy(encoder)) return 810000; - if (DISPLAY_VER_FULL(to_i915(encoder->base.dev)) == IP_VER(14, 1)) + if (DISPLAY_VERx100(to_i915(encoder->base.dev)) == 1401) return 1350000; return 2000000; @@ -770,8 +766,8 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) intel_dp_link_config_init(intel_dp); } -static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, - u8 lane_count) +bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, + u8 lane_count) { /* * FIXME: we need to synchronize the current link parameters with @@ -865,36 +861,74 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p return bits_per_pixel; } -static -u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, - u32 mode_clock, u32 mode_hdisplay, - bool bigjoiner) +static int bigjoiner_interface_bits(struct intel_display *display) +{ + return DISPLAY_VER(display) >= 14 ? 36 : 24; +} + +static u32 bigjoiner_bw_max_bpp(struct intel_display *display, u32 mode_clock, + int num_joined_pipes) +{ + u32 max_bpp; + /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ + int ppc = 2; + int num_big_joiners = num_joined_pipes / 2; + + max_bpp = display->cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits(display) / + intel_dp_mode_to_fec_clock(mode_clock); + + max_bpp *= num_big_joiners; + + return max_bpp; + +} + +static u32 small_joiner_ram_max_bpp(struct intel_display *display, + u32 mode_hdisplay, + int num_joined_pipes) { - u32 max_bpp_small_joiner_ram; + struct drm_i915_private *i915 = to_i915(display->drm); + u32 max_bpp; /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ - max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; + max_bpp = small_joiner_ram_size_bits(i915) / mode_hdisplay; - if (bigjoiner) { - int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; - /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ - int ppc = 2; - u32 max_bpp_bigjoiner = - i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits / - intel_dp_mode_to_fec_clock(mode_clock); + max_bpp *= num_joined_pipes; - max_bpp_small_joiner_ram *= 2; + return max_bpp; +} - return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner); - } +static int ultrajoiner_ram_bits(void) +{ + return 4 * 72 * 512; +} + +static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay) +{ + return ultrajoiner_ram_bits() / mode_hdisplay; +} + +static +u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, + u32 mode_clock, u32 mode_hdisplay, + int num_joined_pipes) +{ + struct intel_display *display = to_intel_display(&i915->drm); + u32 max_bpp = small_joiner_ram_max_bpp(display, mode_hdisplay, num_joined_pipes); + + if (num_joined_pipes > 1) + max_bpp = min(max_bpp, bigjoiner_bw_max_bpp(display, mode_clock, + num_joined_pipes)); + if (num_joined_pipes == 4) + max_bpp = min(max_bpp, ultrajoiner_ram_max_bpp(mode_hdisplay)); - return max_bpp_small_joiner_ram; + return max_bpp; } u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay, - bool bigjoiner, + int num_joined_pipes, enum intel_output_format output_format, u32 pipe_bpp, u32 timeslots) @@ -940,7 +974,7 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, intel_dp_mode_to_fec_clock(mode_clock)); joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock, - mode_hdisplay, bigjoiner); + mode_hdisplay, num_joined_pipes); bits_per_pixel = min(bits_per_pixel, joiner_max_bpp); bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); @@ -950,7 +984,7 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, int mode_clock, int mode_hdisplay, - bool bigjoiner) + int num_joined_pipes) { struct drm_i915_private *i915 = to_i915(connector->base.dev); u8 min_slice_count, i; @@ -984,14 +1018,18 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, /* Find the closest match to the valid slice count values */ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { - u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; + u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes; if (test_slice_count > drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false)) break; - /* big joiner needs small joiner to be enabled */ - if (bigjoiner && test_slice_count < 4) + /* + * Bigjoiner needs small joiner to be enabled. + * So there should be at least 2 dsc slices per pipe, + * whenever bigjoiner is enabled. + */ + if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2) continue; if (min_slice_count <= test_slice_count) @@ -1270,17 +1308,45 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, return MODE_OK; } -bool intel_dp_need_joiner(struct intel_dp *intel_dp, - struct intel_connector *connector, - int hdisplay, int clock) +static +bool intel_dp_needs_joiner(struct intel_dp *intel_dp, + struct intel_connector *connector, + int hdisplay, int clock, + int num_joined_pipes) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int hdisplay_limit; if (!intel_dp_has_joiner(intel_dp)) return false; - return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 || - connector->force_bigjoiner_enable; + num_joined_pipes /= 2; + + hdisplay_limit = DISPLAY_VER(i915) >= 30 ? 6144 : 5120; + + return clock > num_joined_pipes * i915->display.cdclk.max_dotclk_freq || + hdisplay > num_joined_pipes * hdisplay_limit; +} + +int intel_dp_num_joined_pipes(struct intel_dp *intel_dp, + struct intel_connector *connector, + int hdisplay, int clock) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (connector->force_joined_pipes) + return connector->force_joined_pipes; + + if (HAS_ULTRAJOINER(i915) && + intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4)) + return 4; + + if ((HAS_BIGJOINER(i915) || HAS_UNCOMPRESSED_JOINER(i915)) && + intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2)) + return 2; + + return 1; } bool intel_dp_has_dsc(const struct intel_connector *connector) @@ -1317,7 +1383,8 @@ intel_dp_mode_valid(struct drm_connector *_connector, u16 dsc_max_compressed_bpp = 0; u8 dsc_slice_count = 0; enum drm_mode_status status; - bool dsc = false, joiner = false; + bool dsc = false; + int num_joined_pipes; status = intel_cpu_transcoder_mode_valid(dev_priv, mode); if (status != MODE_OK) @@ -1338,11 +1405,10 @@ intel_dp_mode_valid(struct drm_connector *_connector, target_clock = fixed_mode->clock; } - if (intel_dp_need_joiner(intel_dp, connector, - mode->hdisplay, target_clock)) { - joiner = true; - max_dotclk *= 2; - } + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, + mode->hdisplay, target_clock); + max_dotclk *= num_joined_pipes; + if (target_clock > max_dotclk) return MODE_CLOCK_HIGH; @@ -1386,20 +1452,20 @@ intel_dp_mode_valid(struct drm_connector *_connector, max_lanes, target_clock, mode->hdisplay, - joiner, + num_joined_pipes, output_format, pipe_bpp, 64); dsc_slice_count = intel_dp_dsc_get_slice_count(connector, target_clock, mode->hdisplay, - joiner); + num_joined_pipes); } dsc = dsc_max_compressed_bpp && dsc_slice_count; } - if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) + if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) return MODE_CLOCK_HIGH; if (mode_rate > max_rate && !dsc) @@ -1409,7 +1475,7 @@ intel_dp_mode_valid(struct drm_connector *_connector, if (status != MODE_OK) return status; - return intel_mode_valid_max_plane_size(dev_priv, mode, joiner); + return intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes); } bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) @@ -1632,45 +1698,6 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp, return bpp; } -/* Adjust link config limits based on compliance test requests. */ -void -intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct link_config_limits *limits) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - /* For DP Compliance we override the computed bpp for the pipe */ - if (intel_dp->compliance.test_data.bpc != 0) { - int bpp = 3 * intel_dp->compliance.test_data.bpc; - - limits->pipe.min_bpp = limits->pipe.max_bpp = bpp; - pipe_config->dither_force_disable = bpp == 6 * 3; - - drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); - } - - /* Use values requested by Compliance Test Request */ - if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { - int index; - - /* Validate the compliance test data since max values - * might have changed due to link train fallback. - */ - if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, - intel_dp->compliance.test_lane_count)) { - index = intel_dp_rate_index(intel_dp->common_rates, - intel_dp->num_common_rates, - intel_dp->compliance.test_link_rate); - if (index >= 0) - limits->min_rate = limits->max_rate = - intel_dp->compliance.test_link_rate; - limits->min_lane_count = limits->max_lane_count = - intel_dp->compliance.test_lane_count; - } - } -} - static bool has_seamless_m_n(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); @@ -2109,6 +2136,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; int dsc_joiner_max_bpp; + int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config); dsc_src_min_bpp = dsc_src_min_compressed_bpp(); dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); @@ -2123,7 +2151,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, adjusted_mode->hdisplay, - pipe_config->joiner_pipes); + num_joined_pipes); dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16)); @@ -2308,11 +2336,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config); int ret; + /* + * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional. + * Since, FEC is a bandwidth overhead, continue to not enable it for + * eDP. Until, there is a good reason to do so. + */ pipe_config->fec_enable = pipe_config->fec_enable || (!intel_dp_is_edp(intel_dp) && - intel_dp_supports_fec(intel_dp, connector, pipe_config)); + intel_dp_supports_fec(intel_dp, connector, pipe_config) && + !intel_dp_is_uhbr(pipe_config)); if (!intel_dp_supports_dsc(connector, pipe_config)) return -EINVAL; @@ -2357,7 +2392,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, intel_dp_dsc_get_slice_count(connector, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, - pipe_config->joiner_pipes); + num_joined_pipes); if (!dsc_dp_slice_count) { drm_dbg_kms(&dev_priv->drm, "Compressed Slice Count not supported\n"); @@ -2445,7 +2480,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, encoder->base.base.id, encoder->base.name, crtc->base.base.id, crtc->base.name, adjusted_mode->crtc_clock, - dsc ? "on" : "off", + str_on_off(dsc), limits->max_lane_count, limits->max_rate, limits->pipe.max_bpp, @@ -2488,7 +2523,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_rate = limits->max_rate; } - intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); + intel_dp_test_compute_config(intel_dp, crtc_state, limits); return intel_dp_compute_config_link_bpp_limits(intel_dp, crtc_state, @@ -2507,14 +2542,17 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); } -bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner) +bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, + int num_joined_pipes) { /* * Pipe joiner needs compression up to display 12 due to bandwidth * limitation. DG2 onwards pipe joiner can be enabled without * compression. + * Ultrajoiner always needs compression. */ - return DISPLAY_VER(i915) < 13 && use_joiner; + return (!HAS_UNCOMPRESSED_JOINER(i915) && num_joined_pipes == 2) || + num_joined_pipes == 4; } static int @@ -2532,18 +2570,20 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; bool dsc_needed, joiner_needs_dsc; + int num_joined_pipes; int ret = 0; if (pipe_config->fec_enable && !intel_dp_supports_fec(intel_dp, connector, pipe_config)) return -EINVAL; - if (intel_dp_need_joiner(intel_dp, connector, - adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_clock)) - pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, + adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_clock); + if (num_joined_pipes > 1) + pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); - joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->joiner_pipes); + joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, num_joined_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || !intel_dp_compute_config_limits(intel_dp, pipe_config, @@ -2742,7 +2782,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC; as_sdp->length = 0x9; as_sdp->duration_incr_ms = 0; - as_sdp->duration_incr_ms = 0; if (crtc_state->cmrr.enable) { as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED; @@ -3365,30 +3404,43 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, } static void -intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) +intel_dp_init_source_oui(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 oui[] = { 0x00, 0xaa, 0x01 }; u8 buf[3] = {}; + if (READ_ONCE(intel_dp->oui_valid)) + return; + + WRITE_ONCE(intel_dp->oui_valid, true); + /* * During driver init, we want to be careful and avoid changing the source OUI if it's * already set to what we want, so as to avoid clearing any state by accident */ - if (careful) { - if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) - drm_err(&i915->drm, "Failed to read source OUI\n"); + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) + drm_err(&i915->drm, "Failed to read source OUI\n"); - if (memcmp(oui, buf, sizeof(oui)) == 0) - return; + if (memcmp(oui, buf, sizeof(oui)) == 0) { + /* Assume the OUI was written now. */ + intel_dp->last_oui_write = jiffies; + return; } - if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) - drm_err(&i915->drm, "Failed to write source OUI\n"); + if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) { + drm_info(&i915->drm, "Failed to write source OUI\n"); + WRITE_ONCE(intel_dp->oui_valid, false); + } intel_dp->last_oui_write = jiffies; } +void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp) +{ + WRITE_ONCE(intel_dp->oui_valid, false); +} + void intel_dp_wait_source_oui(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; @@ -3424,8 +3476,7 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) lspcon_resume(dp_to_dig_port(intel_dp)); /* Write the source OUI as early as possible */ - if (intel_dp_is_edp(intel_dp)) - intel_edp_init_source_oui(intel_dp, false); + intel_dp_init_source_oui(intel_dp); /* * When turning on, we need to retry for 1ms to give the sink @@ -3900,7 +3951,7 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, str_enable_disable(tmp)); } -bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) +static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { u8 dprx = 0; @@ -3963,6 +4014,23 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector * intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd); } +static void +intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ + if (!HAS_DSC(i915)) + return; + + if (intel_dp_is_edp(intel_dp)) + intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], + connector); + else + intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], + connector); +} + static void intel_edp_mso_mode_fixup(struct intel_connector *connector, struct drm_display_mode *mode) { @@ -4051,6 +4119,45 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; } +static void +intel_edp_set_sink_rates(struct intel_dp *intel_dp) +{ + intel_dp->num_sink_rates = 0; + + if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + int i; + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + + for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { + int val = le16_to_cpu(sink_rates[i]); + + if (val == 0) + break; + + /* Value read multiplied by 200kHz gives the per-lane + * link rate in kHz. The source rates are, however, + * stored in terms of LS_Clk kHz. The full conversion + * back to symbols is + * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) + */ + intel_dp->sink_rates[i] = (val * 200) / 10; + } + intel_dp->num_sink_rates = i; + } + + /* + * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, + * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. + */ + if (intel_dp->num_sink_rates) + intel_dp->use_rate_select = true; + else + intel_dp_set_sink_rates(intel_dp); +} + static bool intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) { @@ -4090,59 +4197,22 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector } /* + * If needed, program our source OUI so we can make various Intel-specific AUX services + * available (such as HDR backlight controls) + */ + intel_dp_init_source_oui(intel_dp); + + /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] */ intel_psr_init_dpcd(intel_dp); - /* Clear the default sink rates */ - intel_dp->num_sink_rates = 0; - - /* Read the eDP 1.4+ supported link rates. */ - if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { - __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; - int i; - - drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, - sink_rates, sizeof(sink_rates)); - - for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { - int val = le16_to_cpu(sink_rates[i]); - - if (val == 0) - break; - - /* Value read multiplied by 200kHz gives the per-lane - * link rate in kHz. The source rates are, however, - * stored in terms of LS_Clk kHz. The full conversion - * back to symbols is - * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) - */ - intel_dp->sink_rates[i] = (val * 200) / 10; - } - intel_dp->num_sink_rates = i; - } - - /* - * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, - * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. - */ - if (intel_dp->num_sink_rates) - intel_dp->use_rate_select = true; - else - intel_dp_set_sink_rates(intel_dp); + intel_edp_set_sink_rates(intel_dp); intel_dp_set_max_sink_lane_count(intel_dp); /* Read the eDP DSC DPCD registers */ - if (HAS_DSC(dev_priv)) - intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], - connector); - - /* - * If needed, program our source OUI so we can make various Intel-specific AUX services - * available (such as HDR backlight controls) - */ - intel_edp_init_source_oui(intel_dp, true); + intel_dp_detect_dsc_caps(intel_dp, connector); return true; } @@ -4771,328 +4841,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder, } } -static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - int status = 0; - int test_link_rate; - u8 test_lane_count, test_link_bw; - /* (DP CTS 1.2) - * 4.3.1.11 - */ - /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, - &test_lane_count); - - if (status <= 0) { - drm_dbg_kms(&i915->drm, "Lane count read failed\n"); - return DP_TEST_NAK; - } - test_lane_count &= DP_MAX_LANE_COUNT_MASK; - - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, - &test_link_bw); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); - return DP_TEST_NAK; - } - test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); - - /* Validate the requested link rate and lane count */ - if (!intel_dp_link_params_valid(intel_dp, test_link_rate, - test_lane_count)) - return DP_TEST_NAK; - - intel_dp->compliance.test_lane_count = test_lane_count; - intel_dp->compliance.test_link_rate = test_link_rate; - - return DP_TEST_ACK; -} - -static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 test_pattern; - u8 test_misc; - __be16 h_width, v_height; - int status = 0; - - /* Read the TEST_PATTERN (DP CTS 3.1.5) */ - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, - &test_pattern); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); - return DP_TEST_NAK; - } - if (test_pattern != DP_COLOR_RAMP) - return DP_TEST_NAK; - - status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, - &h_width, 2); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "H Width read failed\n"); - return DP_TEST_NAK; - } - - status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, - &v_height, 2); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "V Height read failed\n"); - return DP_TEST_NAK; - } - - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, - &test_misc); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); - return DP_TEST_NAK; - } - if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) - return DP_TEST_NAK; - if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) - return DP_TEST_NAK; - switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { - case DP_TEST_BIT_DEPTH_6: - intel_dp->compliance.test_data.bpc = 6; - break; - case DP_TEST_BIT_DEPTH_8: - intel_dp->compliance.test_data.bpc = 8; - break; - default: - return DP_TEST_NAK; - } - - intel_dp->compliance.test_data.video_pattern = test_pattern; - intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); - intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); - /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = true; - - return DP_TEST_ACK; -} - -static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 test_result = DP_TEST_ACK; - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_connector *connector = &intel_connector->base; - - if (intel_connector->detect_edid == NULL || - connector->edid_corrupt || - intel_dp->aux.i2c_defer_count > 6) { - /* Check EDID read for NACKs, DEFERs and corruption - * (DP CTS 1.2 Core r1.1) - * 4.2.2.4 : Failed EDID read, I2C_NAK - * 4.2.2.5 : Failed EDID read, I2C_DEFER - * 4.2.2.6 : EDID corruption detected - * Use failsafe mode for all cases - */ - if (intel_dp->aux.i2c_nack_count > 0 || - intel_dp->aux.i2c_defer_count > 0) - drm_dbg_kms(&i915->drm, - "EDID read had %d NACKs, %d DEFERs\n", - intel_dp->aux.i2c_nack_count, - intel_dp->aux.i2c_defer_count); - intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; - } else { - /* FIXME: Get rid of drm_edid_raw() */ - const struct edid *block = drm_edid_raw(intel_connector->detect_edid); - - /* We have to write the checksum of the last block read */ - block += block->extensions; - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, - block->checksum) <= 0) - drm_dbg_kms(&i915->drm, - "Failed to write EDID checksum\n"); - - test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; - intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; - } - - /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = true; - - return test_result; -} - -static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = - to_i915(dp_to_dig_port(intel_dp)->base.base.dev); - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - enum pipe pipe = crtc->pipe; - u32 pattern_val; - - switch (data->phy_pattern) { - case DP_LINK_QUAL_PATTERN_DISABLE: - drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); - if (DISPLAY_VER(dev_priv) >= 10) - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), - DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, - DP_TP_CTL_LINK_TRAIN_NORMAL); - break; - case DP_LINK_QUAL_PATTERN_D10_2: - drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); - break; - case DP_LINK_QUAL_PATTERN_ERROR_RATE: - drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | - DDI_DP_COMP_CTL_SCRAMBLED_0); - break; - case DP_LINK_QUAL_PATTERN_PRBS7: - drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); - break; - case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: - /* - * FIXME: Ideally pattern should come from DPCD 0x250. As - * current firmware of DPR-100 could not set it, so hardcoding - * now for complaince test. - */ - drm_dbg_kms(&dev_priv->drm, - "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); - pattern_val = 0x3e0f83e0; - intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); - pattern_val = 0x0f83e0f8; - intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); - pattern_val = 0x0000f83e; - intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | - DDI_DP_COMP_CTL_CUSTOM80); - break; - case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: - /* - * FIXME: Ideally pattern should come from DPCD 0x24A. As - * current firmware of DPR-100 could not set it, so hardcoding - * now for complaince test. - */ - drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); - pattern_val = 0xFB; - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | - pattern_val); - break; - case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: - if (DISPLAY_VER(dev_priv) < 10) { - drm_warn(&dev_priv->drm, "Platform does not support TPS4\n"); - break; - } - drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), - DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, - DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); - break; - default: - drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n"); - } -} - -static void intel_dp_process_phy_request(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - u8 link_status[DP_LINK_STATUS_SIZE]; - - if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, - link_status) < 0) { - drm_dbg_kms(&i915->drm, "failed to get link status\n"); - return; - } - - /* retrieve vswing & pre-emphasis setting */ - intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, - link_status); - - intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); - - intel_dp_phy_pattern_update(intel_dp, crtc_state); - - drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, - intel_dp->train_set, crtc_state->lane_count); - - drm_dp_set_phy_test_pattern(&intel_dp->aux, data, - intel_dp->dpcd[DP_DPCD_REV]); -} - -static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - - if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { - drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); - return DP_TEST_NAK; - } - - /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = true; - - return DP_TEST_ACK; -} - -static void intel_dp_handle_test_request(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 response = DP_TEST_NAK; - u8 request = 0; - int status; - - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); - if (status <= 0) { - drm_dbg_kms(&i915->drm, - "Could not read test request from sink\n"); - goto update_status; - } - - switch (request) { - case DP_TEST_LINK_TRAINING: - drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); - response = intel_dp_autotest_link_training(intel_dp); - break; - case DP_TEST_LINK_VIDEO_PATTERN: - drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); - response = intel_dp_autotest_video_pattern(intel_dp); - break; - case DP_TEST_LINK_EDID_READ: - drm_dbg_kms(&i915->drm, "EDID test requested\n"); - response = intel_dp_autotest_edid(intel_dp); - break; - case DP_TEST_LINK_PHY_TEST_PATTERN: - drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); - response = intel_dp_autotest_phy_pattern(intel_dp); - break; - default: - drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", - request); - break; - } - - if (response & DP_TEST_ACK) - intel_dp->compliance.test_type = request; - -update_status: - status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); - if (status <= 0) - drm_dbg_kms(&i915->drm, - "Could not write test response to sink\n"); -} - static bool intel_dp_link_ok(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) { @@ -5290,11 +5038,12 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) return true; /* Retrain if link not ok */ - return !intel_dp_link_ok(intel_dp, link_status); + return !intel_dp_link_ok(intel_dp, link_status) && + !intel_psr_link_ok(intel_dp); } -static bool intel_dp_has_connector(struct intel_dp *intel_dp, - const struct drm_connector_state *conn_state) +bool intel_dp_has_connector(struct intel_dp *intel_dp, + const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder; @@ -5318,6 +5067,21 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp, return false; } +static void wait_for_connector_hw_done(const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_display *display = to_intel_display(connector); + + drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex); + + if (!conn_state->commit) + return; + + drm_WARN_ON(display->drm, + !wait_for_completion_timeout(&conn_state->commit->hw_done, + msecs_to_jiffies(5000))); +} + int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, u8 *pipe_mask) @@ -5354,10 +5118,7 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp, if (!crtc_state->hw.active) continue; - if (conn_state->commit) - drm_WARN_ON(&i915->drm, - !wait_for_completion_timeout(&conn_state->commit->hw_done, - msecs_to_jiffies(5000))); + wait_for_connector_hw_done(conn_state); *pipe_mask |= BIT(crtc->pipe); } @@ -5366,6 +5127,11 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp, return ret; } +void intel_dp_flush_connector_commits(struct intel_connector *connector) +{ + wait_for_connector_hw_done(connector->base.state); +} + static bool intel_dp_is_connected(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; @@ -5445,118 +5211,6 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp) intel_encoder_link_check_queue_work(encoder, 0); } -static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, - struct drm_modeset_acquire_ctx *ctx, - u8 *pipe_mask) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct drm_connector_list_iter conn_iter; - struct intel_connector *connector; - int ret = 0; - - *pipe_mask = 0; - - drm_connector_list_iter_begin(&i915->drm, &conn_iter); - for_each_intel_connector_iter(connector, &conn_iter) { - struct drm_connector_state *conn_state = - connector->base.state; - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc; - - if (!intel_dp_has_connector(intel_dp, conn_state)) - continue; - - crtc = to_intel_crtc(conn_state->crtc); - if (!crtc) - continue; - - ret = drm_modeset_lock(&crtc->base.mutex, ctx); - if (ret) - break; - - crtc_state = to_intel_crtc_state(crtc->base.state); - - drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); - - if (!crtc_state->hw.active) - continue; - - if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) - continue; - - *pipe_mask |= BIT(crtc->pipe); - } - drm_connector_list_iter_end(&conn_iter); - - return ret; -} - -static int intel_dp_do_phy_test(struct intel_encoder *encoder, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_crtc *crtc; - u8 pipe_mask; - int ret; - - ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, - ctx); - if (ret) - return ret; - - ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); - if (ret) - return ret; - - if (pipe_mask == 0) - return 0; - - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", - encoder->base.base.id, encoder->base.name); - - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - /* test on the MST master transcoder */ - if (DISPLAY_VER(dev_priv) >= 12 && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && - !intel_dp_mst_is_master_trans(crtc_state)) - continue; - - intel_dp_process_phy_request(intel_dp, crtc_state); - break; - } - - return 0; -} - -void intel_dp_phy_test(struct intel_encoder *encoder) -{ - struct drm_modeset_acquire_ctx ctx; - int ret; - - drm_modeset_acquire_init(&ctx, 0); - - for (;;) { - ret = intel_dp_do_phy_test(encoder, &ctx); - - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - continue; - } - - break; - } - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - drm_WARN(encoder->base.dev, ret, - "Acquiring modeset locks failed with %i\n", ret); -} - static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -5572,7 +5226,7 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); if (val & DP_AUTOMATED_TEST_REQUEST) - intel_dp_handle_test_request(intel_dp); + intel_dp_test_request(intel_dp); if (val & DP_CP_IRQ) intel_hdcp_handle_cp_irq(intel_dp->attached_connector); @@ -5625,16 +5279,11 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 old_sink_count = intel_dp->sink_count; bool reprobe_needed = false; bool ret; - /* - * Clearing compliance test variables to allow capturing - * of values for next automated test request. - */ - memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + intel_dp_test_reset(intel_dp); /* * Now read the DPCD to see if it's actually running @@ -5659,24 +5308,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) intel_psr_short_pulse(intel_dp); - switch (intel_dp->compliance.test_type) { - case DP_TEST_LINK_TRAINING: - drm_dbg_kms(&dev_priv->drm, - "Link Training Compliance Test requested\n"); - /* Send a Hotplug Uevent to userspace to start modeset */ - drm_kms_helper_hotplug_event(&dev_priv->drm); - break; - case DP_TEST_LINK_PHY_TEST_PATTERN: - drm_dbg_kms(&dev_priv->drm, - "PHY test pattern Compliance Test requested\n"); - /* - * Schedule long hpd to do the test - * - * FIXME get rid of the ad-hoc phy test modeset code - * and properly incorporate it into the normal modeset. - */ + if (intel_dp_test_short_pulse(intel_dp)) reprobe_needed = true; - } return !reprobe_needed; } @@ -5962,23 +5595,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) } static void -intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ - if (!HAS_DSC(i915)) - return; - - if (intel_dp_is_edp(intel_dp)) - intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], - connector); - else - intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], - connector); -} - -static void intel_dp_detect_sdp_caps(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -6012,6 +5628,10 @@ intel_dp_detect(struct drm_connector *connector, if (!intel_display_driver_check_access(dev_priv)) return connector->status; + intel_dp_flush_connector_commits(intel_connector); + + intel_pps_vdd_on(intel_dp); + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); @@ -6033,7 +5653,7 @@ intel_dp_detect(struct drm_connector *connector, status = connector_status_disconnected; if (status == connector_status_disconnected) { - memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + intel_dp_test_reset(intel_dp); memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); intel_dp->psr.sink_panel_replay_support = false; intel_dp->psr.sink_panel_replay_su_support = false; @@ -6042,12 +5662,17 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_tunnel_disconnect(intel_dp); - goto out; + goto out_unset_edid; } + intel_dp_init_source_oui(intel_dp); + ret = intel_dp_tunnel_detect(intel_dp, ctx); - if (ret == -EDEADLK) - return ret; + if (ret == -EDEADLK) { + status = ret; + + goto out_vdd_off; + } if (ret == 1) intel_connector->base.epoch_counter++; @@ -6075,7 +5700,7 @@ intel_dp_detect(struct drm_connector *connector, * with EDID on it */ status = connector_status_disconnected; - goto out; + goto out_unset_edid; } /* @@ -6104,7 +5729,7 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_check_device_service_irq(intel_dp); -out: +out_unset_edid: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); @@ -6113,6 +5738,9 @@ out: status, intel_dp->dpcd, intel_dp->downstream_ports); +out_vdd_off: + intel_pps_vdd_off(intel_dp); + return status; } @@ -6471,7 +6099,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) u8 dpcd[DP_RECEIVER_CAP_SIZE]; if (dig_port->base.type == INTEL_OUTPUT_EDP && - (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { + (long_hpd || + intel_runtime_pm_suspended(&i915->runtime_pm) || + !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which * would require vdd on to handle it, and thus we @@ -6504,6 +6134,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) if (long_hpd) { intel_dp->reset_link_params = true; + intel_dp_invalidate_source_oui(intel_dp); + return IRQ_NONE; } @@ -6620,20 +6252,8 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp, struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum pipe pipe = INVALID_PIPE; - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - /* - * Figure out the current pipe for the initial backlight setup. - * If the current pipe isn't valid, try the PPS pipe, and if that - * fails just assume pipe A. - */ - pipe = vlv_active_pipe(intel_dp); - - if (pipe != PIPE_A && pipe != PIPE_B) - pipe = intel_dp->pps.pps_pipe; - - if (pipe != PIPE_A && pipe != PIPE_B) - pipe = PIPE_A; - } + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + pipe = vlv_pps_backlight_initial_pipe(intel_dp); intel_backlight_setup(connector, pipe); } @@ -6801,6 +6421,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, out_vdd_off: intel_pps_vdd_off_sync(intel_dp); + intel_bios_fini_panel(&intel_connector->panel); return false; } @@ -6840,6 +6461,7 @@ bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { + struct intel_display *display = to_intel_display(dig_port); struct drm_connector *connector = &intel_connector->base; struct intel_dp *intel_dp = &dig_port->dp; struct intel_encoder *intel_encoder = &dig_port->base; @@ -6858,8 +6480,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, return false; intel_dp->reset_link_params = true; - intel_dp->pps.pps_pipe = INVALID_PIPE; - intel_dp->pps.active_pipe = INVALID_PIPE; /* Preserve the current hw state. */ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); @@ -6867,10 +6487,11 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { /* - * Currently we don't support eDP on TypeC ports, although in - * theory it could work on TypeC legacy ports. + * Currently we don't support eDP on TypeC ports for DISPLAY_VER < 30, + * although in theory it could work on TypeC legacy ports. */ - drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder)); + drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder) && + DISPLAY_VER(dev_priv) < 30); type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; @@ -6887,7 +6508,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_set_default_max_sink_lane_count(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); + vlv_pps_pipe_init(intel_dp); intel_dp_aux_init(intel_dp); intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; @@ -6904,7 +6525,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) connector->interlace_allowed = true; - intel_connector->polled = DRM_CONNECTOR_POLL_HPD; + if (type != DRM_MODE_CONNECTOR_eDP) + intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_connector->base.polled = intel_connector->polled; intel_connector_attach_encoder(intel_connector, intel_encoder); @@ -6930,7 +6552,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_add_properties(intel_dp, connector); - if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { + if (is_hdcp_supported(display, port) && !intel_dp_is_edp(intel_dp)) { int ret = intel_dp_hdcp_init(dig_port, intel_connector); if (ret) drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 1b9aaddd8c35..48f10876be65 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -37,9 +37,6 @@ struct link_config_limits { }; void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp); -void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct link_config_limits *limits); bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); int intel_dp_min_bpp(enum intel_output_format output_format); @@ -57,6 +54,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, u8 *pipe_mask); +void intel_dp_flush_connector_commits(struct intel_connector *connector); void intel_dp_link_check(struct intel_encoder *encoder); void intel_dp_check_link_state(struct intel_dp *intel_dp); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); @@ -117,13 +115,13 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, bool intel_dp_source_supports_tps3(struct drm_i915_private *i915); bool intel_dp_source_supports_tps4(struct drm_i915_private *i915); -bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, int bw_overhead); int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, int max_dprx_rate, int max_dprx_lanes); -bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner); +bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, + int num_joined_pipes); bool intel_dp_has_joiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -142,7 +140,7 @@ int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay, - bool bigjoiner, + int num_joined_pipes, enum intel_output_format output_format, u32 pipe_bpp, u32 timeslots); @@ -152,10 +150,10 @@ int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector int bpc); u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, int mode_clock, int mode_hdisplay, - bool bigjoiner); -bool intel_dp_need_joiner(struct intel_dp *intel_dp, - struct intel_connector *connector, - int hdisplay, int clock); + int num_joined_pipes); +int intel_dp_num_joined_pipes(struct intel_dp *intel_dp, + struct intel_connector *connector, + int hdisplay, int clock); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { @@ -190,8 +188,8 @@ void intel_dp_sync_state(struct intel_encoder *encoder, void intel_dp_check_frl_training(struct intel_dp *intel_dp); void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); -void intel_dp_phy_test(struct intel_encoder *encoder); +void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp); void intel_dp_wait_source_oui(struct intel_dp *intel_dp); int intel_dp_output_bpp(enum intel_output_format output_format, int bpp); @@ -204,4 +202,9 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector); bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder); +bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, + u8 lane_count); +bool intel_dp_has_connector(struct intel_dp *intel_dp, + const struct drm_connector_state *conn_state); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 3425b3643143..00c493cc8a4b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -19,6 +19,7 @@ #include "intel_dp_hdcp.h" #include "intel_hdcp.h" #include "intel_hdcp_regs.h" +#include "intel_hdcp_shim.h" static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder) { @@ -57,7 +58,7 @@ static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); u8 aksv[DRM_HDCP_KSV_LEN] = {}; ssize_t dpcd_ret; @@ -65,7 +66,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, an, DRM_HDCP_AN_LEN); if (dpcd_ret != DRM_HDCP_AN_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to write An over DP/AUX (%zd)\n", dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; @@ -81,7 +82,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV, aksv, DRM_HDCP_KSV_LEN); if (dpcd_ret != DRM_HDCP_KSV_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to write Aksv over DP/AUX (%zd)\n", dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; @@ -92,13 +93,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret != DRM_HDCP_KSV_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read Bksv from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -108,7 +109,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; /* @@ -119,7 +120,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret != DRM_HDCP_BSTATUS_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -128,7 +129,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, static int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux, - struct drm_i915_private *i915, + struct intel_display *display, u8 *bcaps) { ssize_t ret; @@ -136,7 +137,7 @@ int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux, ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bcaps from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -148,11 +149,11 @@ static int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, display, &bcaps); if (ret) return ret; @@ -164,13 +165,14 @@ static int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret != DRM_HDCP_RI_LEN) { - drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", + drm_dbg_kms(display->drm, + "Read Ri' from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -181,14 +183,14 @@ static int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -200,7 +202,7 @@ static int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; int i; @@ -212,7 +214,7 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, ksv_fifo + i * DRM_HDCP_KSV_LEN, len); if (ret != len) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read ksv[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; @@ -225,7 +227,7 @@ static int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) @@ -235,7 +237,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, DP_AUX_HDCP_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret != DRM_HDCP_V_PRIME_PART_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; } @@ -255,14 +257,14 @@ static bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return false; } @@ -274,11 +276,11 @@ static int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port, bool *hdcp_capable) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, display, &bcaps); if (ret) return ret; @@ -341,7 +343,7 @@ static int intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, u8 *rx_status) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; @@ -350,7 +352,7 @@ intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_DP_RXSTATUS_LEN); if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -396,7 +398,7 @@ static ssize_t intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; @@ -429,7 +431,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, } if (ret) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "msg_id %d, ret %d, timeout(mSec): %d\n", hdcp2_msg_data->msg_id, ret, timeout); @@ -513,8 +515,8 @@ static int intel_dp_hdcp2_read_msg(struct intel_connector *connector, u8 msg_id, void *buf, size_t size) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct drm_dp_aux *aux = &dig_port->dp.aux; struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; @@ -567,7 +569,7 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, ret = drm_dp_dpcd_read(aux, offset, (void *)byte, len); if (ret < 0) { - drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", + drm_dbg_kms(display->drm, "msg_id %d, ret %zd\n", msg_id, ret); return ret; } @@ -580,7 +582,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, if (hdcp2_msg_data->msg_read_timeout > 0) { msg_expired = ktime_after(ktime_get_raw(), msg_end); if (msg_expired) { - drm_dbg_kms(&i915->drm, "msg_id %d, entire msg read timeout(mSec): %d\n", + drm_dbg_kms(display->drm, + "msg_id %d, entire msg read timeout(mSec): %d\n", msg_id, hdcp2_msg_data->msg_read_timeout); return -ETIMEDOUT; } @@ -695,7 +698,7 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector, bool *hdcp_capable, bool *hdcp2_capable) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_dp_aux *aux; u8 bcaps; int ret; @@ -708,10 +711,10 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector, aux = &connector->port->aux; ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable); if (ret) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP2 DPCD capability read failed err: %d\n", ret); - ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(aux, display, &bcaps); if (ret) return ret; @@ -744,8 +747,8 @@ static int intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector, bool enable) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; @@ -753,7 +756,7 @@ intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector, hdcp->stream_transcoder, enable, TRANS_DDI_HDCP_SELECT); if (ret) - drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n", + drm_err(display->drm, "%s HDCP stream select failed (%d)\n", enable ? "Enable" : "Disable", ret); return ret; } @@ -762,8 +765,8 @@ static int intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, bool enable) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->stream_transcoder; @@ -779,11 +782,11 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, return -EINVAL; /* Wait for encryption confirmation */ - if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port), + if (intel_de_wait(display, HDCP_STATUS(display, cpu_transcoder, port), stream_enc_status, enable ? stream_enc_status : 0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", - transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); + drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", + transcoder_name(cpu_transcoder), str_enabled_disabled(enable)); return -ETIMEDOUT; } @@ -794,8 +797,8 @@ static int intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, bool enable) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->stream_transcoder; @@ -803,8 +806,8 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, enum port port = dig_port->base.port; int ret; - drm_WARN_ON(&i915->drm, enable && - !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port)) + drm_WARN_ON(display->drm, enable && + !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port)) & AUTH_STREAM_TYPE) != data->streams[0].stream_type); ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable); @@ -812,12 +815,12 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, return ret; /* Wait for encryption confirmation */ - if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe), + if (intel_de_wait(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe), STREAM_ENCRYPTION_STATUS, enable ? STREAM_ENCRYPTION_STATUS : 0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", - transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); + drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", + transcoder_name(cpu_transcoder), str_enabled_disabled(enable)); return -ETIMEDOUT; } @@ -872,13 +875,12 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { int intel_dp_hdcp_init(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { - struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_encoder *intel_encoder = &dig_port->base; enum port port = intel_encoder->port; struct intel_dp *intel_dp = &dig_port->dp; - if (!is_hdcp_supported(dev_priv, port)) + if (!is_hdcp_supported(display, port)) return 0; if (intel_connector->mst_port) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 40bedc31d6bf..397cc4ebae52 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,6 +21,8 @@ * IN THE SOFTWARE. */ +#include <linux/debugfs.h> + #include <drm/display/drm_dp_helper.h> #include "i915_drv.h" @@ -208,8 +210,10 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd); - for (i = 0; i < lttpr_count; i++) + for (i = 0; i < lttpr_count; i++) { intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); + drm_dp_dump_lttpr_desc(&intel_dp->aux, DP_PHY_LTTPR(i)); + } return lttpr_count; } @@ -1677,19 +1681,11 @@ void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); } -static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *connector) -{ - if (connector->mst_port) - return connector->mst_port; - else - return enc_to_intel_dp(intel_attached_encoder(connector)); -} - static int i915_dp_force_link_rate_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int current_rate = -1; int force_rate; int err; @@ -1760,7 +1756,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file, struct seq_file *m = file->private_data; struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int rate; int err; @@ -1787,7 +1783,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int current_lane_count = -1; int force_lane_count; int err; @@ -1862,7 +1858,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file, struct seq_file *m = file->private_data; struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int lane_count; int err; @@ -1889,7 +1885,7 @@ static int i915_dp_max_link_rate_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1908,7 +1904,7 @@ static int i915_dp_max_lane_count_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1927,7 +1923,7 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1945,7 +1941,7 @@ static int i915_dp_force_link_training_failure_write(void *data, u64 val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; if (val > 2) @@ -1969,7 +1965,7 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1987,7 +1983,7 @@ static int i915_dp_force_link_retrain_write(void *data, u64 val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -2010,7 +2006,7 @@ static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index eeaedd979354..5bba078c00d8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -41,9 +41,10 @@ #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_hdcp.h" +#include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_dp_tunnel.h" -#include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -152,7 +153,7 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int num_joined_pipes = crtc_state->joiner_pipes; + int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); return intel_dp_dsc_get_slice_count(connector, adjusted_mode->clock, @@ -559,7 +560,7 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp, */ limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24); - intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); + intel_dp_test_compute_config(intel_dp, crtc_state, limits); if (!intel_dp_compute_config_link_bpp_limits(intel_dp, crtc_state, @@ -588,6 +589,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct link_config_limits limits; bool dsc_needed, joiner_needs_dsc; + int num_joined_pipes; int ret = 0; if (pipe_config->fec_enable && @@ -597,16 +599,17 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; - if (intel_dp_need_joiner(intel_dp, connector, - adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_clock)) - pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, + adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_clock); + if (num_joined_pipes > 1) + pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->joiner_pipes); + joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || !intel_dp_mst_compute_config_limits(intel_dp, @@ -1005,6 +1008,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; @@ -1021,6 +1025,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_crtc *pipe_crtc; bool last_mst_stream; + int i; intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; @@ -1028,8 +1033,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, DISPLAY_VER(dev_priv) >= 12 && last_mst_stream && !intel_dp_mst_is_master_trans(old_crtc_state)); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -1053,8 +1057,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_ddi_disable_transcoder_func(old_crtc_state); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -1263,6 +1266,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; @@ -1273,7 +1277,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, enum transcoder trans = pipe_config->cpu_transcoder; bool first_mst_stream = intel_dp->active_mst_links == 1; struct intel_crtc *pipe_crtc; - int ret; + int ret, i; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); @@ -1320,8 +1324,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, intel_enable_transcoder(pipe_config); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(pipe_config)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -1442,10 +1445,11 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq; int max_rate, mode_rate, max_lanes, max_link_clock; int ret; - bool dsc = false, joiner = false; + bool dsc = false; u16 dsc_max_compressed_bpp = 0; u8 dsc_slice_count = 0; int target_clock = mode->clock; + int num_joined_pipes; if (drm_connector_is_unregistered(connector)) { *status = MODE_ERROR; @@ -1485,11 +1489,9 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, * corresponding link capabilities of the sink) in case the * stream is uncompressed for it by the last branch device. */ - if (intel_dp_need_joiner(intel_dp, intel_connector, - mode->hdisplay, target_clock)) { - joiner = true; - max_dotclk *= 2; - } + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector, + mode->hdisplay, target_clock); + max_dotclk *= num_joined_pipes; ret = drm_modeset_lock(&mgr->base.lock, ctx); if (ret) @@ -1515,20 +1517,20 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, max_lanes, target_clock, mode->hdisplay, - joiner, + num_joined_pipes, INTEL_OUTPUT_FORMAT_RGB, pipe_bpp, 64); dsc_slice_count = intel_dp_dsc_get_slice_count(intel_connector, target_clock, mode->hdisplay, - joiner); + num_joined_pipes); } dsc = dsc_max_compressed_bpp && dsc_slice_count; } - if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) { + if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) { *status = MODE_CLOCK_HIGH; return 0; } @@ -1538,7 +1540,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, return 0; } - *status = intel_mode_valid_max_plane_size(dev_priv, mode, joiner); + *status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes); return 0; } @@ -1571,6 +1573,8 @@ intel_dp_mst_detect(struct drm_connector *connector, if (!intel_display_driver_check_access(i915)) return connector->status; + intel_dp_flush_connector_commits(intel_connector); + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, intel_connector->port); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c new file mode 100644 index 000000000000..e05819300d77 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_test.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <linux/debugfs.h> + +#include <drm/display/drm_dp.h> +#include <drm/display/drm_dp_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_ddi.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_dp_mst.h" +#include "intel_dp_test.h" + +void intel_dp_test_reset(struct intel_dp *intel_dp) +{ + /* + * Clearing compliance test variables to allow capturing + * of values for next automated test request. + */ + memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); +} + +/* Adjust link config limits based on compliance test requests. */ +void intel_dp_test_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits) +{ + struct intel_display *display = to_intel_display(intel_dp); + + /* For DP Compliance we override the computed bpp for the pipe */ + if (intel_dp->compliance.test_data.bpc != 0) { + int bpp = 3 * intel_dp->compliance.test_data.bpc; + + limits->pipe.min_bpp = bpp; + limits->pipe.max_bpp = bpp; + pipe_config->dither_force_disable = bpp == 6 * 3; + + drm_dbg_kms(display->drm, "Setting pipe_bpp to %d\n", bpp); + } + + /* Use values requested by Compliance Test Request */ + if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { + int index; + + /* Validate the compliance test data since max values + * might have changed due to link train fallback. + */ + if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, + intel_dp->compliance.test_lane_count)) { + index = intel_dp_rate_index(intel_dp->common_rates, + intel_dp->num_common_rates, + intel_dp->compliance.test_link_rate); + if (index >= 0) { + limits->min_rate = intel_dp->compliance.test_link_rate; + limits->max_rate = intel_dp->compliance.test_link_rate; + } + limits->min_lane_count = intel_dp->compliance.test_lane_count; + limits->max_lane_count = intel_dp->compliance.test_lane_count; + } + } +} + +/* Compliance test status bits */ +#define INTEL_DP_RESOLUTION_PREFERRED 1 +#define INTEL_DP_RESOLUTION_STANDARD 2 +#define INTEL_DP_RESOLUTION_FAILSAFE 3 + +static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + int status = 0; + int test_link_rate; + u8 test_lane_count, test_link_bw; + /* (DP CTS 1.2) + * 4.3.1.11 + */ + /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, + &test_lane_count); + + if (status <= 0) { + drm_dbg_kms(display->drm, "Lane count read failed\n"); + return DP_TEST_NAK; + } + test_lane_count &= DP_MAX_LANE_COUNT_MASK; + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (status <= 0) { + drm_dbg_kms(display->drm, "Link Rate read failed\n"); + return DP_TEST_NAK; + } + test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + /* Validate the requested link rate and lane count */ + if (!intel_dp_link_params_valid(intel_dp, test_link_rate, + test_lane_count)) + return DP_TEST_NAK; + + intel_dp->compliance.test_lane_count = test_lane_count; + intel_dp->compliance.test_link_rate = test_link_rate; + + return DP_TEST_ACK; +} + +static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + u8 test_pattern; + u8 test_misc; + __be16 h_width, v_height; + int status = 0; + + /* Read the TEST_PATTERN (DP CTS 3.1.5) */ + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, + &test_pattern); + if (status <= 0) { + drm_dbg_kms(display->drm, "Test pattern read failed\n"); + return DP_TEST_NAK; + } + if (test_pattern != DP_COLOR_RAMP) + return DP_TEST_NAK; + + status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, + &h_width, 2); + if (status <= 0) { + drm_dbg_kms(display->drm, "H Width read failed\n"); + return DP_TEST_NAK; + } + + status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, + &v_height, 2); + if (status <= 0) { + drm_dbg_kms(display->drm, "V Height read failed\n"); + return DP_TEST_NAK; + } + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, + &test_misc); + if (status <= 0) { + drm_dbg_kms(display->drm, "TEST MISC read failed\n"); + return DP_TEST_NAK; + } + if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) + return DP_TEST_NAK; + if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) + return DP_TEST_NAK; + switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { + case DP_TEST_BIT_DEPTH_6: + intel_dp->compliance.test_data.bpc = 6; + break; + case DP_TEST_BIT_DEPTH_8: + intel_dp->compliance.test_data.bpc = 8; + break; + default: + return DP_TEST_NAK; + } + + intel_dp->compliance.test_data.video_pattern = test_pattern; + intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); + intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; + + return DP_TEST_ACK; +} + +static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + u8 test_result = DP_TEST_ACK; + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_connector *connector = &intel_connector->base; + + if (!intel_connector->detect_edid || connector->edid_corrupt || + intel_dp->aux.i2c_defer_count > 6) { + /* Check EDID read for NACKs, DEFERs and corruption + * (DP CTS 1.2 Core r1.1) + * 4.2.2.4 : Failed EDID read, I2C_NAK + * 4.2.2.5 : Failed EDID read, I2C_DEFER + * 4.2.2.6 : EDID corruption detected + * Use failsafe mode for all cases + */ + if (intel_dp->aux.i2c_nack_count > 0 || + intel_dp->aux.i2c_defer_count > 0) + drm_dbg_kms(display->drm, + "EDID read had %d NACKs, %d DEFERs\n", + intel_dp->aux.i2c_nack_count, + intel_dp->aux.i2c_defer_count); + intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; + } else { + /* FIXME: Get rid of drm_edid_raw() */ + const struct edid *block = drm_edid_raw(intel_connector->detect_edid); + + /* We have to write the checksum of the last block read */ + block += block->extensions; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, + block->checksum) <= 0) + drm_dbg_kms(display->drm, + "Failed to write EDID checksum\n"); + + test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; + intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; + } + + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; + + return test_result; +} + +static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum pipe pipe = crtc->pipe; + u32 pattern_val; + + switch (data->phy_pattern) { + case DP_LINK_QUAL_PATTERN_DISABLE: + drm_dbg_kms(display->drm, "Disable Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), 0x0); + if (DISPLAY_VER(display) >= 10) + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_NORMAL); + break; + case DP_LINK_QUAL_PATTERN_D10_2: + drm_dbg_kms(display->drm, "Set D10.2 Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); + break; + case DP_LINK_QUAL_PATTERN_ERROR_RATE: + drm_dbg_kms(display->drm, + "Set Error Count Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | + DDI_DP_COMP_CTL_SCRAMBLED_0); + break; + case DP_LINK_QUAL_PATTERN_PRBS7: + drm_dbg_kms(display->drm, "Set PRBS7 Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); + break; + case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: + /* + * FIXME: Ideally pattern should come from DPCD 0x250. As + * current firmware of DPR-100 could not set it, so hardcoding + * now for complaince test. + */ + drm_dbg_kms(display->drm, + "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); + pattern_val = 0x3e0f83e0; + intel_de_write(display, DDI_DP_COMP_PAT(pipe, 0), pattern_val); + pattern_val = 0x0f83e0f8; + intel_de_write(display, DDI_DP_COMP_PAT(pipe, 1), pattern_val); + pattern_val = 0x0000f83e; + intel_de_write(display, DDI_DP_COMP_PAT(pipe, 2), pattern_val); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | + DDI_DP_COMP_CTL_CUSTOM80); + break; + case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: + /* + * FIXME: Ideally pattern should come from DPCD 0x24A. As + * current firmware of DPR-100 could not set it, so hardcoding + * now for complaince test. + */ + drm_dbg_kms(display->drm, + "Set HBR2 compliance Phy Test Pattern\n"); + pattern_val = 0xFB; + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | + pattern_val); + break; + case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: + if (DISPLAY_VER(display) < 10) { + drm_warn(display->drm, + "Platform does not support TPS4\n"); + break; + } + drm_dbg_kms(display->drm, + "Set TPS4 compliance Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), 0x0); + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); + break; + default: + drm_warn(display->drm, "Invalid Phy Test Pattern\n"); + } +} + +static void intel_dp_process_phy_request(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, + link_status) < 0) { + drm_dbg_kms(display->drm, "failed to get link status\n"); + return; + } + + /* retrieve vswing & pre-emphasis setting */ + intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, + link_status); + + intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); + + intel_dp_phy_pattern_update(intel_dp, crtc_state); + + drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, + intel_dp->train_set, crtc_state->lane_count); + + drm_dp_set_phy_test_pattern(&intel_dp->aux, data, + intel_dp->dpcd[DP_DPCD_REV]); +} + +static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + + if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { + drm_dbg_kms(display->drm, + "DP Phy Test pattern AUX read failure\n"); + return DP_TEST_NAK; + } + + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; + + return DP_TEST_ACK; +} + +void intel_dp_test_request(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + u8 response = DP_TEST_NAK; + u8 request = 0; + int status; + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + drm_dbg_kms(display->drm, + "Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + drm_dbg_kms(display->drm, "LINK_TRAINING test requested\n"); + response = intel_dp_autotest_link_training(intel_dp); + break; + case DP_TEST_LINK_VIDEO_PATTERN: + drm_dbg_kms(display->drm, "TEST_PATTERN test requested\n"); + response = intel_dp_autotest_video_pattern(intel_dp); + break; + case DP_TEST_LINK_EDID_READ: + drm_dbg_kms(display->drm, "EDID test requested\n"); + response = intel_dp_autotest_edid(intel_dp); + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + drm_dbg_kms(display->drm, "PHY_PATTERN test requested\n"); + response = intel_dp_autotest_phy_pattern(intel_dp); + break; + default: + drm_dbg_kms(display->drm, "Invalid test request '%02x'\n", + request); + break; + } + + if (response & DP_TEST_ACK) + intel_dp->compliance.test_type = request; + +update_status: + status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + drm_dbg_kms(display->drm, + "Could not write test response to sink\n"); +} + +/* phy test */ + +static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u8 *pipe_mask) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; + int ret = 0; + + *pipe_mask = 0; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state = + connector->base.state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (!intel_dp_has_connector(intel_dp, conn_state)) + continue; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + continue; + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + break; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + drm_WARN_ON(display->drm, + !intel_crtc_has_dp_encoder(crtc_state)); + + if (!crtc_state->hw.active) + continue; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + continue; + + *pipe_mask |= BIT(crtc->pipe); + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static int intel_dp_do_phy_test(struct intel_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_display *display = to_intel_display(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_crtc *crtc; + u8 pipe_mask; + int ret; + + ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, + ctx); + if (ret) + return ret; + + ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); + if (ret) + return ret; + + if (pipe_mask == 0) + return 0; + + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] PHY test\n", + encoder->base.base.id, encoder->base.name); + + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + /* test on the MST master transcoder */ + if (DISPLAY_VER(display) >= 12 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && + !intel_dp_mst_is_master_trans(crtc_state)) + continue; + + intel_dp_process_phy_request(intel_dp, crtc_state); + break; + } + + return 0; +} + +bool intel_dp_test_phy(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; + struct drm_modeset_acquire_ctx ctx; + int ret; + + if (!intel_dp->compliance.test_active || + intel_dp->compliance.test_type != DP_TEST_LINK_PHY_TEST_PATTERN) + return false; + + drm_modeset_acquire_init(&ctx, 0); + + for (;;) { + ret = intel_dp_do_phy_test(encoder, &ctx); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + continue; + } + + break; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_WARN(encoder->base.dev, ret, + "Acquiring modeset locks failed with %i\n", ret); + + return true; +} + +bool intel_dp_test_short_pulse(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + bool reprobe_needed = false; + + switch (intel_dp->compliance.test_type) { + case DP_TEST_LINK_TRAINING: + drm_dbg_kms(display->drm, + "Link Training Compliance Test requested\n"); + /* Send a Hotplug Uevent to userspace to start modeset */ + drm_kms_helper_hotplug_event(display->drm); + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + drm_dbg_kms(display->drm, + "PHY test pattern Compliance Test requested\n"); + /* + * Schedule long hpd to do the test + * + * FIXME get rid of the ad-hoc phy test modeset code + * and properly incorporate it into the normal modeset. + */ + reprobe_needed = true; + } + + return reprobe_needed; +} + +static ssize_t i915_displayport_test_active_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_display *display = m->private; + char *input_buffer; + int status = 0; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + int val = 0; + + if (len == 0) + return 0; + + input_buffer = memdup_user_nul(ubuf, len); + if (IS_ERR(input_buffer)) + return PTR_ERR(input_buffer); + + drm_dbg_kms(display->drm, "Copied %d bytes from user\n", (unsigned int)len); + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) + break; + drm_dbg_kms(display->drm, "Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + intel_dp->compliance.test_active = true; + else + intel_dp->compliance.test_active = false; + } + } + drm_connector_list_iter_end(&conn_iter); + kfree(input_buffer); + if (status < 0) + return status; + + *offp += len; + return len; +} + +static int i915_displayport_test_active_show(struct seq_file *m, void *data) +{ + struct intel_display *display = m->private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->compliance.test_active) + seq_puts(m, "1"); + else + seq_puts(m, "0"); + } else { + seq_puts(m, "0"); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +static int i915_displayport_test_active_open(struct inode *inode, + struct file *file) +{ + return single_open(file, i915_displayport_test_active_show, + inode->i_private); +} + +static const struct file_operations i915_displayport_test_active_fops = { + .owner = THIS_MODULE, + .open = i915_displayport_test_active_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_displayport_test_active_write +}; + +static int i915_displayport_test_data_show(struct seq_file *m, void *data) +{ + struct intel_display *display = m->private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->compliance.test_type == + DP_TEST_LINK_EDID_READ) + seq_printf(m, "%lx", + intel_dp->compliance.test_data.edid); + else if (intel_dp->compliance.test_type == + DP_TEST_LINK_VIDEO_PATTERN) { + seq_printf(m, "hdisplay: %d\n", + intel_dp->compliance.test_data.hdisplay); + seq_printf(m, "vdisplay: %d\n", + intel_dp->compliance.test_data.vdisplay); + seq_printf(m, "bpc: %u\n", + intel_dp->compliance.test_data.bpc); + } else if (intel_dp->compliance.test_type == + DP_TEST_LINK_PHY_TEST_PATTERN) { + seq_printf(m, "pattern: %d\n", + intel_dp->compliance.test_data.phytest.phy_pattern); + seq_printf(m, "Number of lanes: %d\n", + intel_dp->compliance.test_data.phytest.num_lanes); + seq_printf(m, "Link Rate: %d\n", + intel_dp->compliance.test_data.phytest.link_rate); + seq_printf(m, "level: %02x\n", + intel_dp->train_set[0]); + } + } else { + seq_puts(m, "0"); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); + +static int i915_displayport_test_type_show(struct seq_file *m, void *data) +{ + struct intel_display *display = m->private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); + } else { + seq_puts(m, "0"); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); + +static const struct { + const char *name; + const struct file_operations *fops; +} intel_display_debugfs_files[] = { + {"i915_dp_test_data", &i915_displayport_test_data_fops}, + {"i915_dp_test_type", &i915_displayport_test_type_fops}, + {"i915_dp_test_active", &i915_displayport_test_active_fops}, +}; + +void intel_dp_test_debugfs_register(struct intel_display *display) +{ + struct drm_minor *minor = display->drm->primary; + int i; + + for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { + debugfs_create_file(intel_display_debugfs_files[i].name, + 0644, + minor->debugfs_root, + display, + intel_display_debugfs_files[i].fops); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.h b/drivers/gpu/drm/i915/display/intel_dp_test.h new file mode 100644 index 000000000000..dcc167e4c7f6 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_test.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_DP_TEST_H__ +#define __INTEL_DP_TEST_H__ + +#include <linux/types.h> + +struct intel_crtc_state; +struct intel_display; +struct intel_dp; +struct link_config_limits; + +void intel_dp_test_reset(struct intel_dp *intel_dp); +void intel_dp_test_request(struct intel_dp *intel_dp); +void intel_dp_test_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits); +bool intel_dp_test_phy(struct intel_dp *intel_dp); +bool intel_dp_test_short_pulse(struct intel_dp *intel_dp); +void intel_dp_test_debugfs_register(struct intel_display *display); + +#endif /* __INTEL_DP_TEST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h index a0c00b7d3303..e9314cf25a19 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h @@ -20,7 +20,7 @@ struct intel_dp; struct intel_encoder; struct intel_link_bw_limits; -#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915) +#if IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915) int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx); void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index d20e4e9cf7f7..0f12f2c3467c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -219,8 +219,10 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = { }; static const struct bxt_dpio_phy_info * -bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) +bxt_get_phy_list(struct intel_display *display, int *count) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (IS_GEMINILAKE(dev_priv)) { *count = ARRAY_SIZE(glk_dpio_phy_info); return glk_dpio_phy_info; @@ -231,22 +233,22 @@ bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) } static const struct bxt_dpio_phy_info * -bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) +bxt_get_phy_info(struct intel_display *display, enum dpio_phy phy) { int count; const struct bxt_dpio_phy_info *phy_list = - bxt_get_phy_list(dev_priv, &count); + bxt_get_phy_list(display, &count); return &phy_list[phy]; } -void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, +void bxt_port_to_phy_channel(struct intel_display *display, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { const struct bxt_dpio_phy_info *phy_info, *phys; int i, count; - phys = bxt_get_phy_list(dev_priv, &count); + phys = bxt_get_phy_list(display, &count); for (i = 0; i < count; i++) { phy_info = &phys[i]; @@ -265,7 +267,7 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, } } - drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c", + drm_WARN(display->drm, 1, "PHY not found for PORT %c", port_name(port)); *phy = DPIO_PHY0; *ch = DPIO_CH0; @@ -275,16 +277,16 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, * Like intel_de_rmw() but reads from a single per-lane register and * writes to the group register to write the same value to all the lanes. */ -static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915, +static u32 bxt_dpio_phy_rmw_grp(struct intel_display *display, i915_reg_t reg_single, i915_reg_t reg_group, u32 clear, u32 set) { u32 old, val; - old = intel_de_read(i915, reg_single); + old = intel_de_read(display, reg_single); val = (old & ~clear) | set; - intel_de_write(i915, reg_group, val); + intel_de_write(display, reg_group, val); return old; } @@ -292,30 +294,30 @@ static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915, void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; enum dpio_channel ch; enum dpio_phy phy; int lane, n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; - bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch); + bxt_port_to_phy_channel(display, encoder->port, &phy, &ch); /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ - bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch), + bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch), BXT_PORT_PCS_DW10_GRP(phy, ch), TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0); for (lane = 0; lane < crtc_state->lane_count; lane++) { int level = intel_ddi_level(encoder, crtc_state, lane); - intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW2_LN(phy, ch, lane), MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK, MARGIN_000(trans->entries[level].bxt.margin) | UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale)); @@ -325,50 +327,50 @@ void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, int level = intel_ddi_level(encoder, crtc_state, lane); u32 val; - intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW3_LN(phy, ch, lane), SCALE_DCOMP_METHOD, trans->entries[level].bxt.enable ? SCALE_DCOMP_METHOD : 0); - val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane)); + val = intel_de_read(display, BXT_PORT_TX_DW3_LN(phy, ch, lane)); if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Disabled scaling while ouniqetrangenmethod was set"); } for (lane = 0; lane < crtc_state->lane_count; lane++) { int level = intel_ddi_level(encoder, crtc_state, lane); - intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW4_LN(phy, ch, lane), DE_EMPHASIS_MASK, DE_EMPHASIS(trans->entries[level].bxt.deemphasis)); } - bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch), + bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch), BXT_PORT_PCS_DW10_GRP(phy, ch), 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); } -bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, +bool bxt_dpio_phy_is_enabled(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); - if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) + if (!(intel_de_read(display, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) return false; - if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) & + if ((intel_de_read(display, BXT_PORT_CL1CM_DW0(phy)) & (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) { - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "DDI PHY %d powered, but power hasn't settled\n", phy); return false; } - if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { - drm_dbg(&dev_priv->drm, + if (!(intel_de_read(display, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { + drm_dbg(display->drm, "DDI PHY %d powered, but still in reset\n", phy); return false; @@ -377,47 +379,44 @@ bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, return true; } -static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy) { - u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy)); + u32 val = intel_de_read(display, BXT_PORT_REF_DW6(phy)); return REG_FIELD_GET(GRC_CODE_MASK, val); } -static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, +static void bxt_phy_wait_grc_done(struct intel_display *display, enum dpio_phy phy) { - if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy), - GRC_DONE, 10)) - drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n", - phy); + if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) + drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy); } -static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; u32 val; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); - if (bxt_dpio_phy_is_enabled(dev_priv, phy)) { + if (bxt_dpio_phy_is_enabled(display, phy)) { /* Still read out the GRC value for state verification */ if (phy_info->rcomp_phy != -1) - dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy); + display->state.bxt_phy_grc = bxt_get_grc(display, phy); - if (bxt_dpio_phy_verify_state(dev_priv, phy)) { - drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, " + if (bxt_dpio_phy_verify_state(display, phy)) { + drm_dbg(display->drm, "DDI PHY %d already enabled, " "won't reprogram it\n", phy); return; } - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "DDI PHY %d enabled with invalid state, " "force reprogramming it\n", phy); } - intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); + intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); /* * The PHY registers start out inaccessible and respond to reads with @@ -427,92 +426,91 @@ static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv, * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ - if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy), + if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy), PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) - drm_err(&dev_priv->drm, "timeout during PHY%d power on\n", + drm_err(display->drm, "timeout during PHY%d power on\n", phy); /* Program PLL Rcomp code offset */ - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), + intel_de_rmw(display, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4)); - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), + intel_de_rmw(display, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4)); /* Program power gating */ - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, + intel_de_rmw(display, BXT_PORT_CL1CM_DW28(phy), 0, OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); if (phy_info->dual_channel) - intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0, + intel_de_rmw(display, BXT_PORT_CL2CM_DW6(phy), 0, DW6_OLDO_DYN_PWR_DOWN_EN); if (phy_info->rcomp_phy != -1) { u32 grc_code; - bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); + bxt_phy_wait_grc_done(display, phy_info->rcomp_phy); /* * PHY0 isn't connected to an RCOMP resistor so copy over * the corresponding calibrated value from PHY1, and disable * the automatic calibration on PHY0. */ - val = bxt_get_grc(dev_priv, phy_info->rcomp_phy); - dev_priv->display.state.bxt_phy_grc = val; + val = bxt_get_grc(display, phy_info->rcomp_phy); + display->state.bxt_phy_grc = val; grc_code = GRC_CODE_FAST(val) | GRC_CODE_SLOW(val) | GRC_CODE_NOM(val); - intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); - intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), + intel_de_write(display, BXT_PORT_REF_DW6(phy), grc_code); + intel_de_rmw(display, BXT_PORT_REF_DW8(phy), 0, GRC_DIS | GRC_RDY_OVRD); } if (phy_info->reset_delay) udelay(phy_info->reset_delay); - intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); + intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); } -void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); - intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); + intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); - intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); + intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); } -void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) { - const struct bxt_dpio_phy_info *phy_info = - bxt_get_phy_info(dev_priv, phy); + const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(display, phy); enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; - lockdep_assert_held(&dev_priv->display.power.domains.lock); + lockdep_assert_held(&display->power.domains.lock); was_enabled = true; if (rcomp_phy != -1) - was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy); + was_enabled = bxt_dpio_phy_is_enabled(display, rcomp_phy); /* * We need to copy the GRC calibration value from rcomp_phy, * so make sure it's powered up. */ if (!was_enabled) - _bxt_dpio_phy_init(dev_priv, rcomp_phy); + _bxt_dpio_phy_init(display, rcomp_phy); - _bxt_dpio_phy_init(dev_priv, phy); + _bxt_dpio_phy_init(display, phy); if (!was_enabled) - bxt_dpio_phy_uninit(dev_priv, rcomp_phy); + bxt_dpio_phy_uninit(display, rcomp_phy); } static bool __printf(6, 7) -__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, +__phy_reg_verify_state(struct intel_display *display, enum dpio_phy phy, i915_reg_t reg, u32 mask, u32 expected, const char *reg_fmt, ...) { @@ -520,7 +518,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, va_list args; u32 val; - val = intel_de_read(dev_priv, reg); + val = intel_de_read(display, reg); if ((val & mask) == expected) return true; @@ -528,7 +526,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, vaf.fmt = reg_fmt; vaf.va = &args; - drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " + drm_dbg(display->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " "current %08x, expected %08x (mask %08x)\n", phy, &vaf, reg.reg, val, (val & ~mask) | expected, mask); @@ -538,20 +536,20 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, return false; } -bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, +bool bxt_dpio_phy_verify_state(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; u32 mask; bool ok; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); #define _CHK(reg, mask, exp, fmt, ...) \ - __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ + __phy_reg_verify_state(display, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) - if (!bxt_dpio_phy_is_enabled(dev_priv, phy)) + if (!bxt_dpio_phy_is_enabled(display, phy)) return false; ok = true; @@ -575,7 +573,7 @@ bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, "BXT_PORT_CL2CM_DW6(%d)", phy); if (phy_info->rcomp_phy != -1) { - u32 grc_code = dev_priv->display.state.bxt_phy_grc; + u32 grc_code = display->state.bxt_phy_grc; grc_code = GRC_CODE_FAST(grc_code) | GRC_CODE_SLOW(grc_code) | @@ -614,20 +612,20 @@ bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count) void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, u8 lane_lat_optim_mask) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum dpio_phy phy; enum dpio_channel ch; int lane; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); for (lane = 0; lane < 4; lane++) { /* * Note that on CHV this flag is called UPAR, but has * the same function. */ - intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW14_LN(phy, ch, lane), LATENCY_OPTIM, lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0); } @@ -636,18 +634,18 @@ void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum dpio_phy phy; enum dpio_channel ch; int lane; u8 mask; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); mask = 0; for (lane = 0; lane < 4; lane++) { - u32 val = intel_de_read(dev_priv, + u32 val = intel_de_read(display, BXT_PORT_TX_DW14_LN(phy, ch, lane)); if (val & LATENCY_OPTIM) diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index 226994dcb89b..a82939165546 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -10,9 +10,9 @@ enum pipe; enum port; -struct drm_i915_private; struct intel_crtc_state; struct intel_digital_port; +struct intel_display; struct intel_encoder; enum dpio_channel { @@ -27,15 +27,15 @@ enum dpio_phy { }; #ifdef I915 -void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, +void bxt_port_to_phy_channel(struct intel_display *display, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); -void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); -void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); -bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, +void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy); +void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy); +bool bxt_dpio_phy_is_enabled(struct intel_display *display, enum dpio_phy phy); -bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, +bool bxt_dpio_phy_verify_state(struct intel_display *display, enum dpio_phy phy); u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count); void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, @@ -73,7 +73,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state); #else -static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, +static inline void bxt_port_to_phy_channel(struct intel_display *display, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { } @@ -81,18 +81,18 @@ static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { } -static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static inline void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) { } -static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static inline void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy) { } -static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, +static inline bool bxt_dpio_phy_is_enabled(struct intel_display *display, enum dpio_phy phy) { return false; } -static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, +static inline bool bxt_dpio_phy_verify_state(struct intel_display *display, enum dpio_phy phy) { return true; diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 340dfce480b8..198ceda790d2 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -589,11 +589,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) return false; - if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv)) + if (!IS_PINEVIEW(dev_priv) && + !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) if (clock->m1 <= clock->m2) return false; - if (!IS_LP(dev_priv)) { + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) { if (clock->p < limit->p.min || limit->p.max < clock->p) return false; if (clock->m < limit->m.min || limit->m.max < clock->m) @@ -780,7 +783,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, max_n = limit->n.max; /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { - /* based on hardware requirement, prefere larger m1,m2 */ + /* based on hardware requirement, prefer larger m1,m2 */ for (clock.m1 = limit->m1.max; clock.m1 >= limit->m1.min; clock.m1--) { for (clock.m2 = limit->m2.max; @@ -1000,6 +1003,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; @@ -1058,7 +1062,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv)) + intel_panel_use_ssc(display)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -1092,6 +1096,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; @@ -1128,7 +1133,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, dpll |= DPLL_DVO_2X_MODE; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv)) + intel_panel_use_ssc(display)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -1234,11 +1239,12 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state, static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) || + ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915)))) return 25; @@ -1268,6 +1274,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; @@ -1329,7 +1336,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, WARN_ON(reduced_clock->p2 != clock->p2); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv)) + intel_panel_use_ssc(display)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -1353,6 +1360,7 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, static int ilk_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1365,7 +1373,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, return 0; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", dev_priv->display.vbt.lvds_ssc_freq); @@ -1529,6 +1537,7 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state, static int g4x_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1536,7 +1545,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -1578,6 +1587,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, static int pnv_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1585,7 +1595,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -1616,6 +1626,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1623,7 +1634,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -1656,6 +1667,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1663,7 +1675,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 48000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -2212,7 +2224,8 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_crtc_state *crtc_state; crtc_state = intel_crtc_state_alloc(crtc); @@ -2318,12 +2331,13 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) static void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; - cur_state = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE; - I915_STATE_WARN(dev_priv, cur_state != state, - "PLL state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "PLL state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index f490b2157828..e60497bb8a94 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -173,18 +173,19 @@ void assert_shared_dpll(struct drm_i915_private *i915, struct intel_shared_dpll *pll, bool state) { + struct intel_display *display = &i915->display; bool cur_state; struct intel_dpll_hw_state hw_state; - if (drm_WARN(&i915->drm, !pll, + if (drm_WARN(display->drm, !pll, "asserting DPLL %s with no DPLL\n", str_on_off(state))) return; cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state); - I915_STATE_WARN(i915, cur_state != state, - "%s assertion failure (expected %s, current %s)\n", - pll->info->name, str_on_off(state), - str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "%s assertion failure (expected %s, current %s)\n", + pll->info->name, str_on_off(state), + str_on_off(cur_state)); } static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) @@ -545,14 +546,15 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915, static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; u32 val; bool enabled; - val = intel_de_read(i915, PCH_DREF_CONTROL); + val = intel_de_read(display, PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); - I915_STATE_WARN(i915, !enabled, - "PCH refclk assertion failure, should be active but is disabled\n"); + INTEL_DISPLAY_STATE_WARN(display, !enabled, + "PCH refclk assertion failure, should be active but is disabled\n"); } static void ibx_pch_dpll_enable(struct drm_i915_private *i915, @@ -2035,13 +2037,14 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *dpll_hw_state) { + struct intel_display *display = &i915->display; const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; u32 temp; - bxt_port_to_phy_channel(i915, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); /* Non-SSC reference */ intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); @@ -2157,6 +2160,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *dpll_hw_state) { + struct intel_display *display = &i915->display; struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ intel_wakeref_t wakeref; @@ -2165,7 +2169,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915, u32 val; bool ret; - bxt_port_to_phy_channel(i915, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); @@ -4619,6 +4623,7 @@ verify_single_dpll_state(struct drm_i915_private *i915, struct intel_crtc *crtc, const struct intel_crtc_state *new_crtc_state) { + struct intel_display *display = &i915->display; struct intel_dpll_hw_state dpll_hw_state = {}; u8 pipe_mask; bool active; @@ -4626,22 +4631,22 @@ verify_single_dpll_state(struct drm_i915_private *i915, active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state); if (!pll->info->always_on) { - I915_STATE_WARN(i915, !pll->on && pll->active_mask, - "%s: pll in active use but not on in sw tracking\n", - pll->info->name); - I915_STATE_WARN(i915, pll->on && !pll->active_mask, - "%s: pll is on but not used by any active pipe\n", - pll->info->name); - I915_STATE_WARN(i915, pll->on != active, - "%s: pll on state mismatch (expected %i, found %i)\n", - pll->info->name, pll->on, active); + INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask, + "%s: pll in active use but not on in sw tracking\n", + pll->info->name); + INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask, + "%s: pll is on but not used by any active pipe\n", + pll->info->name); + INTEL_DISPLAY_STATE_WARN(display, pll->on != active, + "%s: pll on state mismatch (expected %i, found %i)\n", + pll->info->name, pll->on, active); } if (!crtc) { - I915_STATE_WARN(i915, - pll->active_mask & ~pll->state.pipe_mask, - "%s: more active pll users than references: 0x%x vs 0x%x\n", - pll->info->name, pll->active_mask, pll->state.pipe_mask); + INTEL_DISPLAY_STATE_WARN(display, + pll->active_mask & ~pll->state.pipe_mask, + "%s: more active pll users than references: 0x%x vs 0x%x\n", + pll->info->name, pll->active_mask, pll->state.pipe_mask); return; } @@ -4649,23 +4654,23 @@ verify_single_dpll_state(struct drm_i915_private *i915, pipe_mask = BIT(crtc->pipe); if (new_crtc_state->hw.active) - I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask), - "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n", - pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask), + "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); else - I915_STATE_WARN(i915, pll->active_mask & pipe_mask, - "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", - pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask, + "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask), - "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", - pll->info->name, pipe_mask, pll->state.pipe_mask); + INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask), + "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", + pll->info->name, pipe_mask, pll->state.pipe_mask); - I915_STATE_WARN(i915, - pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, - sizeof(dpll_hw_state)), - "%s: pll hw state mismatch\n", - pll->info->name); + INTEL_DISPLAY_STATE_WARN(display, + pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, + sizeof(dpll_hw_state)), + "%s: pll hw state mismatch\n", + pll->info->name); } static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll, @@ -4678,6 +4683,7 @@ static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll, void intel_shared_dpll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -4693,16 +4699,16 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state, u8 pipe_mask = BIT(crtc->pipe); struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; - I915_STATE_WARN(i915, pll->active_mask & pipe_mask, - "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", - pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask, + "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); /* TC ports have both MG/TC and TBT PLL referenced simultaneously */ - I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll, - new_crtc_state->shared_dpll) && - pll->state.pipe_mask & pipe_mask, - "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n", - pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask); + INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll, + new_crtc_state->shared_dpll) && + pll->state.pipe_mask & pipe_mask, + "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n", + pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 3a6d99044828..ce8c76e44e6a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -242,7 +242,7 @@ void intel_dpt_suspend(struct drm_i915_private *i915) struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb) { - struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); struct drm_i915_private *i915 = to_i915(obj->dev); struct drm_i915_gem_object *dpt_obj; struct i915_address_space *vm; diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 3ca29afa5422..bb39eb96e812 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <linux/debugfs.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index da24e041d269..b7b44399adaa 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -4,6 +4,8 @@ * */ +#include <drm/drm_vblank.h> + #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" @@ -37,9 +39,16 @@ struct intel_dsb { unsigned int free_pos; /* - * ins_start_offset will help to store start dword of the dsb - * instuction and help in identifying the batch of auto-increment - * register. + * Previously emitted DSB instruction. Used to + * identify/adjust the instruction for indexed + * register writes. + */ + u32 ins[2]; + + /* + * Start of the previously emitted DSB instruction. + * Used to adjust the instruction for indexed + * register writes. */ unsigned int ins_start_offset; @@ -119,6 +128,12 @@ pre_commit_crtc_state(struct intel_atomic_state *state, return old_crtc_state; } +static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state) +{ + return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) - + intel_mode_vdisplay(&crtc_state->hw.adjusted_mode); +} + static int dsb_vtotal(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -215,9 +230,11 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) dsb->free_pos = ALIGN(dsb->free_pos, 2); dsb->ins_start_offset = dsb->free_pos; + dsb->ins[0] = ldw; + dsb->ins[1] = udw; - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw); - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]); } static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, @@ -233,10 +250,8 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, if (dsb->free_pos == 0) return false; - prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf, - dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK; - prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf, - dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK; + prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK; + prev_reg = dsb->ins[1] & DSB_REG_VALUE_MASK; return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg); } @@ -269,8 +284,6 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { - u32 old_val; - /* * For example the buffer will look like below for 3 dwords for auto * increment register: @@ -299,23 +312,27 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, /* convert to indexed write? */ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) { - u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf, - dsb->ins_start_offset + 0); + u32 prev_val = dsb->ins[0]; + + dsb->ins[0] = 1; /* count */ + dsb->ins[1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | + i915_mmio_reg_offset(reg); - intel_dsb_buffer_write(&dsb->dsb_buf, - dsb->ins_start_offset + 0, 1); /* count */ + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0, + dsb->ins[0]); intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1, - (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | - i915_mmio_reg_offset(reg)); - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val); + dsb->ins[1]); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, + prev_val); dsb->free_pos++; } intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val); /* Update the count */ - old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset); - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1); + dsb->ins[0]++; + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0, + dsb->ins[0]); /* if number of data words is odd, then the last dword should be 0.*/ if (dsb->free_pos & 0x1) @@ -370,6 +387,24 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb) intel_dsb_noop(dsb, 4); } +void intel_dsb_interrupt(struct intel_dsb *dsb) +{ + intel_dsb_emit(dsb, 0, + DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT); +} + +void intel_dsb_wait_usec(struct intel_dsb *dsb, int count) +{ + intel_dsb_emit(dsb, count, + DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT); +} + +void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count) +{ + intel_dsb_emit(dsb, count, + DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT); +} + static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb, u32 opcode, int lower, int upper) { @@ -510,6 +545,31 @@ static u32 dsb_error_int_en(struct intel_display *display) return errors; } +void intel_dsb_vblank_evade(struct intel_atomic_state *state, + struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = dsb->crtc; + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + /* FIXME calibrate sensibly */ + int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20); + int vblank_delay = dsb_vblank_delay(crtc_state); + int start, end; + + if (pre_commit_is_vrr_active(state, crtc)) { + end = intel_vrr_vmin_vblank_start(crtc_state); + start = end - vblank_delay - latency; + intel_dsb_wait_scanline_out(state, dsb, start, end); + + end = intel_vrr_vmax_vblank_start(crtc_state); + start = end - vblank_delay - latency; + intel_dsb_wait_scanline_out(state, dsb, start, end); + } else { + end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode); + start = end - vblank_delay - latency; + intel_dsb_wait_scanline_out(state, dsb, start, end); + } +} + static void _intel_dsb_chain(struct intel_atomic_state *state, struct intel_dsb *dsb, struct intel_dsb *chained_dsb, @@ -535,7 +595,7 @@ static void _intel_dsb_chain(struct intel_atomic_state *state, intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id), dsb_error_int_status(display) | DSB_PROG_INT_STATUS | - dsb_error_int_en(display)); + dsb_error_int_en(display) | DSB_PROG_INT_EN); if (ctrl & DSB_WAIT_FOR_VBLANK) { int dewake_scanline = dsb_dewake_scanline_start(state, crtc); @@ -577,6 +637,17 @@ void intel_dsb_chain(struct intel_atomic_state *state, wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0); } +void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, + struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = dsb->crtc; + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode, + dsb_vblank_delay(crtc_state)) + 1; + + intel_dsb_wait_usec(dsb, usecs); +} + static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, int hw_dewake_scanline) { @@ -603,7 +674,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), dsb_error_int_status(display) | DSB_PROG_INT_STATUS | - dsb_error_int_en(display)); + dsb_error_int_en(display) | DSB_PROG_INT_EN); intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id), intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf)); @@ -671,6 +742,9 @@ void intel_dsb_wait(struct intel_dsb *dsb) /* Attempt to reset it */ dsb->free_pos = 0; dsb->ins_start_offset = 0; + dsb->ins[0] = 0; + dsb->ins[1] = 0; + intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0); intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), @@ -706,10 +780,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, if (!i915->display.params.enable_dsb) return NULL; - /* TODO: DSB is broken in Xe KMD, so disabling it until fixed */ - if (!IS_ENABLED(I915)) - return NULL; - dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); if (!dsb) goto out; @@ -727,8 +797,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, dsb->id = dsb_id; dsb->crtc = crtc; dsb->size = size / 4; /* in dwords */ - dsb->free_pos = 0; - dsb->ins_start_offset = 0; dsb->chicken = dsb_chicken(state, crtc); dsb->hw_dewake_scanline = @@ -763,12 +831,29 @@ void intel_dsb_cleanup(struct intel_dsb *dsb) void intel_dsb_irq_handler(struct intel_display *display, enum pipe pipe, enum intel_dsb_id dsb_id) { - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); u32 tmp, errors; tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id)); intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp); + if (tmp & DSB_PROG_INT_STATUS) { + spin_lock(&display->drm->event_lock); + + if (crtc->dsb_event) { + /* + * Update vblank counter/timestmap in case it + * hasn't been done yet for this frame. + */ + drm_crtc_accurate_vblank_count(&crtc->base); + + drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event); + crtc->dsb_event = NULL; + } + + spin_unlock(&display->drm->event_lock); + } + errors = tmp & dsb_error_int_status(display); if (errors) drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n", diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index c352c12aa59f..33e0fc2ab380 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -39,12 +39,19 @@ void intel_dsb_reg_write_masked(struct intel_dsb *dsb, void intel_dsb_noop(struct intel_dsb *dsb, int count); void intel_dsb_nonpost_start(struct intel_dsb *dsb); void intel_dsb_nonpost_end(struct intel_dsb *dsb); +void intel_dsb_interrupt(struct intel_dsb *dsb); +void intel_dsb_wait_usec(struct intel_dsb *dsb, int count); +void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count); +void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, + struct intel_dsb *dsb); void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); void intel_dsb_wait_scanline_out(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); +void intel_dsb_vblank_evade(struct intel_atomic_state *state, + struct intel_dsb *dsb); void intel_dsb_chain(struct intel_atomic_state *state, struct intel_dsb *dsb, struct intel_dsb *chained_dsb, diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index bd5888ce4852..0be46c6c9611 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -76,7 +76,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, if (fixed_mode->clock > max_dotclk) return MODE_CLOCK_HIGH; - return intel_mode_valid_max_plane_size(dev_priv, mode, false); + return intel_mode_valid_max_plane_size(dev_priv, mode, 1); } struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index f0e3be0fe420..e8129a720210 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -323,6 +323,7 @@ enum { static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, int gpio, bool value) { + struct intel_display *display = &dev_priv->display; int index; if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) @@ -367,7 +368,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, case MIPI_AVEE_EN_2: index = gpio == MIPI_AVEE_EN_1 ? 1 : 2; - intel_de_rmw(dev_priv, GPIO(dev_priv, index), + intel_de_rmw(display, GPIO(display, index), GPIO_CLOCK_VAL_OUT, GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0)); @@ -376,7 +377,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, case MIPI_VIO_EN_2: index = gpio == MIPI_VIO_EN_1 ? 1 : 2; - intel_de_rmw(dev_priv, GPIO(dev_priv, index), + intel_de_rmw(display, GPIO(display, index), GPIO_DATA_VAL_OUT, GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT | GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0)); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 12e7628cbecf..2d5ffb37eac9 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -31,6 +31,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -416,6 +417,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, struct intel_dvo *intel_dvo, const struct intel_dvo_device *dvo) { + struct intel_display *display = &dev_priv->display; struct i2c_adapter *i2c; u32 dpll[I915_MAX_PIPES]; enum pipe pipe; @@ -427,7 +429,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * special cases, but otherwise default to what's defined * in the spec. */ - if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio)) + if (intel_gmbus_is_valid_pin(display, dvo->gpio)) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) gpio = GMBUS_PIN_SSC; @@ -439,7 +441,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - i2c = intel_gmbus_get_adapter(dev_priv, gpio); + i2c = intel_gmbus_get_adapter(display, gpio); intel_dvo->dev = *dvo; @@ -488,6 +490,7 @@ static bool intel_dvo_probe(struct drm_i915_private *i915, void intel_dvo_init(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_connector *connector; struct intel_encoder *encoder; struct intel_dvo *intel_dvo; @@ -548,7 +551,7 @@ void intel_dvo_init(struct drm_i915_private *i915) drm_connector_init_with_ddc(&i915->drm, &connector->base, &intel_dvo_connector_funcs, intel_dvo_connector_type(&intel_dvo->dev), - intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC)); + intel_gmbus_get_adapter(display, GMBUS_PIN_DPC)); drm_connector_helper_add(&connector->base, &intel_dvo_connector_helper_funcs); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 35557d98d7a7..6a7060889f40 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -3,15 +3,16 @@ * Copyright © 2021 Intel Corporation */ -#include <drm/drm_blend.h> -#include <drm/drm_modeset_helper.h> - #include <linux/dma-fence.h> #include <linux/dma-resv.h> -#include "gem/i915_gem_object.h" +#include <drm/drm_blend.h> +#include <drm/drm_gem.h> +#include <drm/drm_modeset_helper.h> + #include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_bo.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dpt.h" @@ -44,6 +45,14 @@ static const struct drm_format_info skl_ccs_formats[] = { .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, }; /* @@ -66,6 +75,30 @@ static const struct drm_format_info gen12_ccs_formats[] = { { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_YUYV, .num_planes = 2, .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, @@ -101,31 +134,79 @@ static const struct drm_format_info gen12_ccs_formats[] = { */ static const struct drm_format_info gen12_ccs_cc_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, }; static const struct drm_format_info gen12_flat_ccs_cc_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, }; @@ -268,7 +349,7 @@ static const struct intel_modifier_desc intel_modifiers[] = { .plane_caps = INTEL_PLANE_CAP_TILING_Y, }, { .modifier = I915_FORMAT_MOD_X_TILED, - .display_ver = DISPLAY_VER_ALL, + .display_ver = { 0, 29 }, .plane_caps = INTEL_PLANE_CAP_TILING_X, }, { .modifier = DRM_FORMAT_MOD_LINEAR, @@ -1237,7 +1318,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane, int plane_width, int *x, int *y) { - struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); int ret; ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane); @@ -1261,7 +1342,7 @@ static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int co * fb layout agrees with the fence layout. We already check that the * fb stride matches the fence stride elsewhere. */ - if (color_plane == 0 && i915_gem_object_is_tiled(obj) && + if (color_plane == 0 && intel_bo_is_tiled(obj) && (*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) { drm_dbg_kms(fb->base.dev, "bad fb plane %d offset: 0x%x\n", @@ -1581,7 +1662,7 @@ static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb) int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb) { - struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); u32 gtt_offset_rotated = 0; u32 gtt_offset_remapped = 0; unsigned int max_size = 0; @@ -1654,10 +1735,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * max_size = max(max_size, offset + size); } - if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) { + if (mul_u32_u32(max_size, tile_size) > obj->size) { drm_dbg_kms(&i915->drm, "fb too big for bo (need %llu bytes, have %zu bytes)\n", - mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size); + mul_u32_u32(max_size, tile_size), obj->size); return -EINVAL; } @@ -1881,7 +1962,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) intel_frontbuffer_put(intel_fb->frontbuffer); - intel_fb_bo_framebuffer_fini(intel_fb_obj(fb)); + intel_fb_bo_framebuffer_fini(intel_fb_bo(fb)); kfree(intel_fb); } @@ -1890,16 +1971,16 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file, unsigned int *handle) { - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev); + struct drm_gem_object *obj = intel_fb_bo(fb); + struct intel_display *display = to_intel_display(obj->dev); - if (i915_gem_object_is_userptr(obj)) { - drm_dbg(&i915->drm, + if (intel_bo_is_userptr(obj)) { + drm_dbg(display->drm, "attempting to use a userptr for a framebuffer, denied\n"); return -EINVAL; } - return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle); + return drm_gem_handle_create(file, obj, handle); } struct frontbuffer_fence_cb { @@ -1923,7 +2004,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, struct drm_clip_rect *clips, unsigned int num_clips) { - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); struct intel_frontbuffer *front = to_intel_frontbuffer(fb); struct dma_fence *fence; struct frontbuffer_fence_cb *cb; @@ -1932,10 +2013,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, if (!atomic_read(&front->bits)) return 0; - if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false))) + if (dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(false))) goto flush; - ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false), + ret = dma_resv_get_singleton(obj->resv, dma_resv_usage_rw(false), &fence); if (ret || !fence) goto flush; @@ -1962,7 +2043,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, return ret; flush: - i915_gem_object_flush_if_display(obj); + intel_bo_flush_if_display(obj); intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); return ret; } @@ -1974,10 +2055,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { }; int intel_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev); + struct drm_i915_private *dev_priv = to_i915(obj->dev); struct drm_framebuffer *fb = &intel_fb->base; u32 max_stride; int ret = -EINVAL; @@ -2053,7 +2134,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } } - fb->obj[i] = intel_bo_to_drm_bo(obj); + fb->obj[i] = obj; } ret = intel_fill_fb_info(dev_priv, intel_fb); @@ -2097,7 +2178,7 @@ intel_user_framebuffer_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *user_mode_cmd) { struct drm_framebuffer *fb; - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; struct drm_i915_private *i915 = to_i915(dev); @@ -2106,13 +2187,13 @@ intel_user_framebuffer_create(struct drm_device *dev, return ERR_CAST(obj); fb = intel_framebuffer_create(obj, &mode_cmd); - drm_gem_object_put(intel_bo_to_drm_bo(obj)); + drm_gem_object_put(obj); return fb; } struct drm_framebuffer * -intel_framebuffer_create(struct drm_i915_gem_object *obj, +intel_framebuffer_create(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { struct intel_framebuffer *intel_fb; @@ -2132,3 +2213,8 @@ err: kfree(intel_fb); return ERR_PTR(ret); } + +struct drm_gem_object *intel_fb_bo(const struct drm_framebuffer *fb) +{ + return fb ? fb->obj[0] : NULL; +} diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 827be3f7934c..d78993e5eb62 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -12,6 +12,7 @@ struct drm_device; struct drm_file; struct drm_framebuffer; +struct drm_gem_object; struct drm_i915_gem_object; struct drm_i915_private; struct drm_mode_fb_cmd2; @@ -85,9 +86,12 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio int intel_plane_compute_gtt(struct intel_plane_state *plane_state); int intel_framebuffer_init(struct intel_framebuffer *ifb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * +intel_framebuffer_create(struct drm_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd); +struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd); @@ -97,4 +101,6 @@ bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier); +struct drm_gem_object *intel_fb_bo(const struct drm_framebuffer *fb); + #endif /* __INTEL_FB_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c index 4be09541e509..810ca6ff8640 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_bo.c +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c @@ -11,15 +11,16 @@ #include "intel_fb.h" #include "intel_fb_bo.h" -void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj) +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) { /* Nothing to do for i915 */ } int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *_obj, struct drm_mode_fb_cmd2 *mode_cmd) { + struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int tiling, stride; @@ -74,7 +75,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, return 0; } -struct drm_i915_gem_object * +struct drm_gem_object * intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, struct drm_file *filp, const struct drm_mode_fb_cmd2 *mode_cmd) @@ -93,5 +94,5 @@ intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, return ERR_PTR(-EREMOTE); } - return obj; + return intel_bo_to_drm_bo(obj); } diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h index 232bf898b013..e71acd1bcb24 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_bo.h +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h @@ -7,18 +7,18 @@ #define __INTEL_FB_BO_H__ struct drm_file; -struct drm_mode_fb_cmd2; -struct drm_i915_gem_object; +struct drm_gem_object; struct drm_i915_private; +struct drm_mode_fb_cmd2; struct intel_framebuffer; -void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj); +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj); int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -struct drm_i915_gem_object * +struct drm_gem_object * intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd); diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 575b271e012b..d3a86f9c6bc8 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -26,7 +26,8 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb, { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *_obj = intel_fb_bo(fb); + struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct i915_gem_ww_ctx ww; struct i915_vma *vma; int ret; @@ -111,7 +112,8 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *_obj = intel_fb_bo(fb); + struct drm_i915_gem_object *obj = to_intel_bo(_obj); intel_wakeref_t wakeref; struct i915_gem_ww_ctx ww; struct i915_vma *vma; @@ -274,9 +276,11 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) * will trigger might_sleep() even if it won't actually sleep, * which is the case when the fb has already been pinned. */ - if (intel_plane_needs_physical(plane)) - plane_state->phys_dma_addr = - i915_gem_object_get_dma_address(intel_fb_obj(&fb->base), 0); + if (intel_plane_needs_physical(plane)) { + struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); + + plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0); + } } else { unsigned int alignment = intel_plane_fb_min_alignment(plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 52b79bacef4d..df05904bac8a 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -38,6 +38,7 @@ * forcibly disable it to allow proper screen updates. */ +#include <linux/debugfs.h> #include <linux/string_helpers.h> #include <drm/drm_blend.h> @@ -1346,7 +1347,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, /* Wa_14016291713 */ if ((IS_DISPLAY_VER(display, 12, 13) || - IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) && + IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) && crtc_state->has_psr && !crtc_state->has_panel_replay) { plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; return 0; @@ -1792,7 +1793,6 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) { struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); mutex_lock(&fbc->lock); @@ -1805,7 +1805,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) intel_fbc_deactivate(fbc, "FIFO underrun"); if (!fbc->flip_pending) - intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, fbc->state.plane->pipe)); __intel_fbc_disable(fbc); out: mutex_unlock(&fbc->lock); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 49a1ac4f5491..00852ff5b247 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -41,12 +41,11 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> -#include "gem/i915_gem_mman.h" -#include "gem/i915_gem_object.h" - #include "i915_drv.h" +#include "intel_bo.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" @@ -129,10 +128,9 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct intel_fbdev *fbdev = to_intel_fbdev(info->par); - struct drm_gem_object *bo = drm_gem_fb_get_obj(&fbdev->fb->base, 0); - struct drm_i915_gem_object *obj = to_intel_bo(bo); + struct drm_gem_object *obj = drm_gem_fb_get_obj(&fbdev->fb->base, 0); - return i915_gem_fb_mmap(obj, vma); + return intel_bo_fb_mmap(obj, vma); } static void intel_fbdev_fb_destroy(struct fb_info *info) @@ -187,7 +185,7 @@ static int intelfb_create(struct drm_fb_helper *helper, struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; int ret; mutex_lock(&ifbdev->hpd_lock); @@ -209,7 +207,7 @@ static int intelfb_create(struct drm_fb_helper *helper, drm_framebuffer_put(&fb->base); fb = NULL; } - if (!fb || drm_WARN_ON(dev, !intel_fb_obj(&fb->base))) { + if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) { drm_dbg_kms(&dev_priv->drm, "no BIOS fb, allocating a new one\n"); fb = intel_fbdev_fb_alloc(helper, sizes); @@ -247,7 +245,7 @@ static int intelfb_create(struct drm_fb_helper *helper, info->fbops = &intelfb_ops; - obj = intel_fb_obj(&fb->base); + obj = intel_fb_bo(&fb->base); ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma); if (ret) @@ -259,7 +257,7 @@ static int intelfb_create(struct drm_fb_helper *helper, * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (!i915_gem_object_is_shmem(obj) && !prealloc) + if (!intel_bo_is_shmem(obj) && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -323,8 +321,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - struct drm_i915_gem_object *obj = - intel_fb_obj(plane_state->uapi.fb); + struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb); if (!crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, @@ -340,12 +337,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, continue; } - if (intel_bo_to_drm_bo(obj)->size > max_size) { + if (obj->size > max_size) { drm_dbg_kms(&i915->drm, "found possible fb from [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); fb = to_intel_framebuffer(plane_state->uapi.fb); - max_size = intel_bo_to_drm_bo(obj)->size; + max_size = obj->size; } } @@ -533,7 +530,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous * full of whatever garbage was left in there. */ if (state == FBINFO_STATE_RUNNING && - !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base))) + !intel_bo_is_shmem(intel_fb_bo(&ifbdev->fb->base))) memset_io(info->screen_base, 0, info->screen_size); drm_fb_helper_set_suspend(&ifbdev->helper, state); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c index 497525ef9668..4991c35a2632 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -9,6 +9,7 @@ #include "i915_drv.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fbdev_fb.h" struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, @@ -60,15 +61,16 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, return ERR_PTR(-ENOMEM); } - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd); i915_gem_object_put(obj); return to_intel_framebuffer(fb); } int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma) + struct drm_gem_object *_obj, struct i915_vma *vma) { + struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct i915_gem_ww_ctx ww; void __iomem *vaddr; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h index 4832fe688fbf..e502ae375fc0 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h @@ -8,7 +8,7 @@ struct drm_fb_helper; struct drm_fb_helper_surface_size; -struct drm_i915_gem_object; +struct drm_gem_object; struct drm_i915_private; struct fb_info; struct i915_vma; @@ -16,6 +16,6 @@ struct i915_vma; struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes); int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma); + struct drm_gem_object *obj, struct i915_vma *vma); #endif diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 222cd0e1a2bc..98e1a3606227 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -26,9 +26,10 @@ struct intel_fdi_funcs { static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; - if (HAS_DDI(dev_priv)) { + if (HAS_DDI(display)) { /* * DDI does not have a specific FDI_TX register. * @@ -36,14 +37,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, * so pipe->transcoder cast is fine here. */ enum transcoder cpu_transcoder = (enum transcoder)pipe; - cur_state = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; + cur_state = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; } else { - cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; + cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; } - I915_STATE_WARN(dev_priv, cur_state != state, - "FDI TX state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "FDI TX state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe) @@ -59,12 +60,13 @@ void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe) static void assert_fdi_rx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; - cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; - I915_STATE_WARN(dev_priv, cur_state != state, - "FDI RX state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "FDI RX state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe) @@ -80,6 +82,7 @@ void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe) void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) { + struct intel_display *display = &i915->display; bool cur_state; /* ILK FDI PLL is always enabled */ @@ -87,23 +90,24 @@ void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ - if (HAS_DDI(i915)) + if (HAS_DDI(display)) return; - cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; - I915_STATE_WARN(i915, !cur_state, - "FDI TX PLL assertion failure, should be active but is disabled\n"); + cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, !cur_state, + "FDI TX PLL assertion failure, should be active but is disabled\n"); } static void assert_fdi_rx_pll(struct drm_i915_private *i915, enum pipe pipe, bool state) { + struct intel_display *display = &i915->display; bool cur_state; - cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; - I915_STATE_WARN(i915, cur_state != state, - "FDI RX PLL assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "FDI RX PLL assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) @@ -137,6 +141,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc, */ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state; const struct intel_crtc_state *new_crtc_state; @@ -145,7 +150,7 @@ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3) return 0; - crtc = intel_crtc_for_pipe(i915, PIPE_C); + crtc = intel_crtc_for_pipe(display, PIPE_C); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state) return 0; @@ -157,7 +162,7 @@ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) if (!old_crtc_state->fdi_lanes) return 0; - crtc = intel_crtc_for_pipe(i915, PIPE_B); + crtc = intel_crtc_for_pipe(display, PIPE_B); new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(new_crtc_state)) return PTR_ERR(new_crtc_state); @@ -184,6 +189,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc_state *pipe_config, enum pipe *pipe_to_reduce) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state = pipe_config->uapi.state; struct intel_crtc *other_crtc; @@ -223,7 +229,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, if (pipe_config->fdi_lanes <= 2) return 0; - other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); + other_crtc = intel_crtc_for_pipe(display, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -244,7 +250,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); + other_crtc = intel_crtc_for_pipe(display, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 8949fbb1cc60..cda1daf4cdea 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -57,6 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; enum pipe pipe; @@ -64,7 +65,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); if (crtc->cpu_fifo_underrun_disabled) return false; @@ -75,6 +76,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) static bool cpt_can_enable_serr_int(struct drm_device *dev) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; @@ -82,7 +84,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); if (crtc->pch_fifo_underrun_disabled) return false; @@ -93,6 +95,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); i915_reg_t reg = PIPESTAT(dev_priv, crtc->pipe); u32 enable_mask; @@ -106,7 +109,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); intel_de_posting_read(dev_priv, reg); - trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe); + trace_intel_cpu_fifo_underrun(display, crtc->pipe); drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe)); } @@ -147,6 +150,7 @@ static void ilk_set_fifo_underrun_reporting(struct drm_device *dev, static void ivb_check_fifo_underruns(struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT); @@ -159,7 +163,7 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc) intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); intel_de_posting_read(dev_priv, GEN7_ERR_INT); - trace_intel_cpu_fifo_underrun(dev_priv, pipe); + trace_intel_cpu_fifo_underrun(display, pipe); drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe)); } @@ -188,35 +192,15 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, } } -static u32 -icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv) -{ - u32 mask = PIPE_STATUS_UNDERRUN; - - if (DISPLAY_VER(dev_priv) >= 13) - mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD | - PIPE_STATUS_HARD_UNDERRUN_XELPD | - PIPE_STATUS_PORT_UNDERRUN_XELPD; - - return mask; -} - static void bdw_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - u32 mask = gen8_de_pipe_underrun_mask(dev_priv); - if (enable) { - if (DISPLAY_VER(dev_priv) >= 11) - intel_de_write(dev_priv, - ICL_PIPESTATUS(dev_priv, pipe), - icl_pipe_status_underrun_mask(dev_priv)); - - bdw_enable_pipe_irq(dev_priv, pipe, mask); - } else { - bdw_disable_pipe_irq(dev_priv, pipe, mask); - } + if (enable) + bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + else + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, @@ -235,6 +219,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pch_transcoder = crtc->pipe; u32 serr_int = intel_de_read(dev_priv, SERR_INT); @@ -248,7 +233,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); intel_de_posting_read(dev_priv, SERR_INT); - trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); + trace_intel_pch_fifo_underrun(display, pch_transcoder); drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n", pipe_name(pch_transcoder)); } @@ -282,8 +267,9 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); bool old; lockdep_assert_held(&dev_priv->irq_lock); @@ -351,8 +337,9 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pch_transcoder, bool enable) { + struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, pch_transcoder); + intel_crtc_for_pipe(display, pch_transcoder); unsigned long flags; bool old; @@ -395,8 +382,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); - u32 underruns = 0; + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); /* We may be called too early in init, thanks BIOS! */ if (crtc == NULL) @@ -407,37 +394,10 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, crtc->cpu_fifo_underrun_disabled) return; - /* - * Starting with display version 11, the PIPE_STAT register records - * whether an underrun has happened, and on XELPD+, it will also record - * whether the underrun was soft/hard and whether it was triggered by - * the downstream port logic. We should clear these bits (which use - * write-1-to-clear logic) too. - * - * Note that although the IIR gives us the same underrun and soft/hard - * information, PIPE_STAT is the only place we can find out whether - * the underrun was caused by the downstream port. - */ - if (DISPLAY_VER(dev_priv) >= 11) { - underruns = intel_de_read(dev_priv, - ICL_PIPESTATUS(dev_priv, pipe)) & - icl_pipe_status_underrun_mask(dev_priv); - intel_de_write(dev_priv, ICL_PIPESTATUS(dev_priv, pipe), - underruns); - } - if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) { - trace_intel_cpu_fifo_underrun(dev_priv, pipe); - - if (DISPLAY_VER(dev_priv) >= 11) - drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n", - pipe_name(pipe), - underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "", - underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "", - underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "", - underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : ""); - else - drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); + trace_intel_cpu_fifo_underrun(display, pipe); + + drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); } intel_fbc_handle_fifo_underrun_irq(&dev_priv->display); @@ -455,9 +415,11 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pch_transcoder) { + struct intel_display *display = &dev_priv->display; + if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, false)) { - trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); + trace_intel_pch_fifo_underrun(display, pch_transcoder); drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n", pipe_name(pch_transcoder)); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index af4576dee92a..6ed5f726ee60 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -55,9 +55,11 @@ * cancelled as soon as busyness is detected. */ -#include "gem/i915_gem_object_frontbuffer.h" +#include <drm/drm_gem.h> + #include "i915_active.h" #include "i915_drv.h" +#include "intel_bo.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dp.h" @@ -93,7 +95,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915, if (!frontbuffer_bits) return; - trace_intel_frontbuffer_flush(i915, frontbuffer_bits, origin); + trace_intel_frontbuffer_flush(display, frontbuffer_bits, origin); might_sleep(); intel_td_flush(i915); @@ -173,17 +175,17 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); - struct intel_display *display = &i915->display; + struct intel_display *display = to_intel_display(front->obj->dev); + struct drm_i915_private *i915 = to_i915(display->drm); if (origin == ORIGIN_CS) { - spin_lock(&i915->display.fb_tracking.lock); - i915->display.fb_tracking.busy_bits |= frontbuffer_bits; - i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&i915->display.fb_tracking.lock); + spin_lock(&display->fb_tracking.lock); + display->fb_tracking.busy_bits |= frontbuffer_bits; + display->fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&display->fb_tracking.lock); } - trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin); + trace_intel_frontbuffer_invalidate(display, frontbuffer_bits, origin); might_sleep(); intel_psr_invalidate(display, frontbuffer_bits, origin); @@ -195,14 +197,15 @@ void __intel_fb_flush(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); + struct intel_display *display = to_intel_display(front->obj->dev); + struct drm_i915_private *i915 = to_i915(display->drm); if (origin == ORIGIN_CS) { - spin_lock(&i915->display.fb_tracking.lock); + spin_lock(&display->fb_tracking.lock); /* Filter out new bits since rendering started. */ - frontbuffer_bits &= i915->display.fb_tracking.busy_bits; - i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->display.fb_tracking.lock); + frontbuffer_bits &= display->fb_tracking.busy_bits; + display->fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&display->fb_tracking.lock); } if (frontbuffer_bits) @@ -214,7 +217,7 @@ static void intel_frontbuffer_flush_work(struct work_struct *work) struct intel_frontbuffer *front = container_of(work, struct intel_frontbuffer, flush_work); - i915_gem_object_flush_if_display(front->obj); + intel_bo_flush_if_display(front->obj); intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); intel_frontbuffer_put(front); } @@ -255,31 +258,32 @@ static void frontbuffer_retire(struct i915_active *ref) } static void frontbuffer_release(struct kref *ref) - __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock) + __releases(&to_intel_display(front->obj->dev)->fb_tracking.lock) { struct intel_frontbuffer *ret, *front = container_of(ref, typeof(*front), ref); - struct drm_i915_gem_object *obj = front->obj; + struct drm_gem_object *obj = front->obj; + struct intel_display *display = to_intel_display(obj->dev); - drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits)); + drm_WARN_ON(display->drm, atomic_read(&front->bits)); - i915_ggtt_clear_scanout(obj); + i915_ggtt_clear_scanout(to_intel_bo(obj)); - ret = i915_gem_object_set_frontbuffer(obj, NULL); - drm_WARN_ON(&intel_bo_to_i915(obj)->drm, ret); - spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock); + ret = intel_bo_set_frontbuffer(obj, NULL); + drm_WARN_ON(display->drm, ret); + spin_unlock(&display->fb_tracking.lock); i915_active_fini(&front->write); kfree_rcu(front, rcu); } struct intel_frontbuffer * -intel_frontbuffer_get(struct drm_i915_gem_object *obj) +intel_frontbuffer_get(struct drm_gem_object *obj) { - struct drm_i915_private *i915 = intel_bo_to_i915(obj); + struct drm_i915_private *i915 = to_i915(obj->dev); struct intel_frontbuffer *front, *cur; - front = i915_gem_object_get_frontbuffer(obj); + front = intel_bo_get_frontbuffer(obj); if (front) return front; @@ -297,7 +301,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work); spin_lock(&i915->display.fb_tracking.lock); - cur = i915_gem_object_set_frontbuffer(obj, front); + cur = intel_bo_set_frontbuffer(obj, front); spin_unlock(&i915->display.fb_tracking.lock); if (cur != front) kfree(front); @@ -308,7 +312,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front) { kref_put_lock(&front->ref, frontbuffer_release, - &intel_bo_to_i915(front->obj)->display.fb_tracking.lock); + &to_intel_display(front->obj->dev)->fb_tracking.lock); } /** @@ -337,13 +341,17 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { - drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm, + struct intel_display *display = to_intel_display(old->obj->dev); + + drm_WARN_ON(display->drm, !(atomic_read(&old->bits) & frontbuffer_bits)); atomic_andnot(frontbuffer_bits, &old->bits); } if (new) { - drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm, + struct intel_display *display = to_intel_display(new->obj->dev); + + drm_WARN_ON(display->drm, atomic_read(&new->bits) & frontbuffer_bits); atomic_or(frontbuffer_bits, &new->bits); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index abb51e8bb920..6237780a9f68 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -30,6 +30,7 @@ #include "i915_active_types.h" +struct drm_gem_object; struct drm_i915_private; enum fb_op_origin { @@ -44,7 +45,7 @@ struct intel_frontbuffer { struct kref ref; atomic_t bits; struct i915_active write; - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; struct rcu_head rcu; struct work_struct flush_work; @@ -77,7 +78,7 @@ void intel_frontbuffer_flip(struct drm_i915_private *i915, void intel_frontbuffer_put(struct intel_frontbuffer *front); struct intel_frontbuffer * -intel_frontbuffer_get(struct drm_i915_gem_object *obj); +intel_frontbuffer_get(struct drm_gem_object *obj); void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 6470f75106bd..e3d938c7f83e 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -48,7 +48,7 @@ struct intel_gmbus { u32 reg0; i915_reg_t gpio_reg; struct i2c_algo_bit_data bit_algo; - struct drm_i915_private *i915; + struct intel_display *display; }; enum gmbus_gpio { @@ -149,9 +149,10 @@ static const struct gmbus_pin gmbus_pins_mtp[] = { [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, }; -static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, +static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display, unsigned int pin) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct gmbus_pin *pins; size_t size; @@ -173,7 +174,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { pins = gmbus_pins_bxt; size = ARRAY_SIZE(gmbus_pins_bxt); - } else if (DISPLAY_VER(i915) == 9) { + } else if (DISPLAY_VER(display) == 9) { pins = gmbus_pins_skl; size = ARRAY_SIZE(gmbus_pins_skl); } else if (IS_BROADWELL(i915)) { @@ -190,9 +191,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, return &pins[pin]; } -bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915, unsigned int pin) +bool intel_gmbus_is_valid_pin(struct intel_display *display, unsigned int pin) { - return get_gmbus_pin(i915, pin); + return get_gmbus_pin(display, pin); } /* Intel GPIO access functions */ @@ -206,42 +207,45 @@ to_intel_gmbus(struct i2c_adapter *i2c) } void -intel_gmbus_reset(struct drm_i915_private *i915) +intel_gmbus_reset(struct intel_display *display) { - intel_de_write(i915, GMBUS0(i915), 0); - intel_de_write(i915, GMBUS4(i915), 0); + intel_de_write(display, GMBUS0(display), 0); + intel_de_write(display, GMBUS4(display), 0); } -static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, +static void pnv_gmbus_clock_gating(struct intel_display *display, bool enable) { /* When using bit bashing for I2C, this bit needs to be set to 1 */ - intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, + intel_de_rmw(display, DSPCLK_GATE_D(display), + PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } -static void pch_gmbus_clock_gating(struct drm_i915_private *i915, +static void pch_gmbus_clock_gating(struct intel_display *display, bool enable) { - intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, + intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, + PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } -static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, +static void bxt_gmbus_clock_gating(struct intel_display *display, bool enable) { - intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, + intel_de_rmw(display, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, !enable ? BXT_GMBUS_GATING_DIS : 0); } static u32 get_reserved(struct intel_gmbus *bus) { - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ if (!IS_I830(i915) && !IS_I845G(i915)) - reserved = intel_de_read_notrace(i915, bus->gpio_reg) & + reserved = intel_de_read_notrace(display, bus->gpio_reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); return reserved; @@ -250,31 +254,31 @@ static u32 get_reserved(struct intel_gmbus *bus) static int get_clock(void *data) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); - intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); - intel_de_write_notrace(i915, bus->gpio_reg, reserved); + intel_de_write_notrace(display, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); + intel_de_write_notrace(display, bus->gpio_reg, reserved); - return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; + return (intel_de_read_notrace(display, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); - intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); - intel_de_write_notrace(i915, bus->gpio_reg, reserved); + intel_de_write_notrace(display, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); + intel_de_write_notrace(display, bus->gpio_reg, reserved); - return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; + return (intel_de_read_notrace(display, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); u32 clock_bits; @@ -284,14 +288,14 @@ static void set_clock(void *data, int state_high) clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; - intel_de_write_notrace(i915, bus->gpio_reg, reserved | clock_bits); - intel_de_posting_read(i915, bus->gpio_reg); + intel_de_write_notrace(display, bus->gpio_reg, reserved | clock_bits); + intel_de_posting_read(display, bus->gpio_reg); } static void set_data(void *data, int state_high) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); u32 data_bits; @@ -301,20 +305,21 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - intel_de_write_notrace(i915, bus->gpio_reg, reserved | data_bits); - intel_de_posting_read(i915, bus->gpio_reg); + intel_de_write_notrace(display, bus->gpio_reg, reserved | data_bits); + intel_de_posting_read(display, bus->gpio_reg); } static int intel_gpio_pre_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); - intel_gmbus_reset(i915); + intel_gmbus_reset(display); if (IS_PINEVIEW(i915)) - pnv_gmbus_clock_gating(i915, false); + pnv_gmbus_clock_gating(display, false); set_data(bus, 1); set_clock(bus, 1); @@ -326,13 +331,14 @@ static void intel_gpio_post_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); set_data(bus, 1); set_clock(bus, 1); if (IS_PINEVIEW(i915)) - pnv_gmbus_clock_gating(i915, true); + pnv_gmbus_clock_gating(display, true); } static void @@ -355,16 +361,17 @@ intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg) algo->data = bus; } -static bool has_gmbus_irq(struct drm_i915_private *i915) +static bool has_gmbus_irq(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); /* * encoder->shutdown() may want to use GMBUS * after irqs have already been disabled. */ - return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); + return HAS_GMBUS_IRQ(display) && intel_irqs_enabled(i915); } -static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) +static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en) { DEFINE_WAIT(wait); u32 gmbus2; @@ -374,21 +381,21 @@ static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!has_gmbus_irq(i915)) + if (!has_gmbus_irq(display)) irq_en = 0; - add_wait_queue(&i915->display.gmbus.wait_queue, &wait); - intel_de_write_fw(i915, GMBUS4(i915), irq_en); + add_wait_queue(&display->gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), irq_en); status |= GMBUS_SATOER; - ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, + ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status, 2); if (ret) - ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, + ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status, 50); - intel_de_write_fw(i915, GMBUS4(i915), 0); - remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), 0); + remove_wait_queue(&display->gmbus.wait_queue, &wait); if (gmbus2 & GMBUS_SATOER) return -ENXIO; @@ -397,7 +404,7 @@ static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) } static int -gmbus_wait_idle(struct drm_i915_private *i915) +gmbus_wait_idle(struct intel_display *display) { DEFINE_WAIT(wait); u32 irq_enable; @@ -405,33 +412,33 @@ gmbus_wait_idle(struct drm_i915_private *i915) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (has_gmbus_irq(i915)) + if (has_gmbus_irq(display)) irq_enable = GMBUS_IDLE_EN; - add_wait_queue(&i915->display.gmbus.wait_queue, &wait); - intel_de_write_fw(i915, GMBUS4(i915), irq_enable); + add_wait_queue(&display->gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), irq_enable); - ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10); + ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10); - intel_de_write_fw(i915, GMBUS4(i915), 0); - remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), 0); + remove_wait_queue(&display->gmbus.wait_queue, &wait); return ret; } -static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915) +static unsigned int gmbus_max_xfer_size(struct intel_display *display) { - return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : + return DISPLAY_VER(display) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : GMBUS_BYTE_COUNT_MAX; } static int -gmbus_xfer_read_chunk(struct drm_i915_private *i915, +gmbus_xfer_read_chunk(struct intel_display *display, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus0_reg, u32 gmbus1_index) { unsigned int size = len; - bool burst_read = len > gmbus_max_xfer_size(i915); + bool burst_read = len > gmbus_max_xfer_size(display); bool extra_byte_added = false; if (burst_read) { @@ -444,21 +451,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, len++; } size = len % 256 + 256; - intel_de_write_fw(i915, GMBUS0(i915), + intel_de_write_fw(display, GMBUS0(display), gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); } - intel_de_write_fw(i915, GMBUS1(i915), + intel_de_write_fw(display, GMBUS1(display), gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; u32 val, loop = 0; - ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(display, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; - val = intel_de_read_fw(i915, GMBUS3(i915)); + val = intel_de_read_fw(display, GMBUS3(display)); do { if (extra_byte_added && len == 1) break; @@ -469,7 +476,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, if (burst_read && len == size - 4) /* Reset the override bit */ - intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg); + intel_de_write_fw(display, GMBUS0(display), gmbus0_reg); } return 0; @@ -486,9 +493,10 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, #define INTEL_GMBUS_BURST_READ_MAX_LEN 767U static int -gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, +gmbus_xfer_read(struct intel_display *display, struct i2c_msg *msg, u32 gmbus0_reg, u32 gmbus1_index) { + struct drm_i915_private *i915 = to_i915(display->drm); u8 *buf = msg->buf; unsigned int rx_size = msg->len; unsigned int len; @@ -498,9 +506,9 @@ gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, if (HAS_GMBUS_BURST_READ(i915)) len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); else - len = min(rx_size, gmbus_max_xfer_size(i915)); + len = min(rx_size, gmbus_max_xfer_size(display)); - ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len, + ret = gmbus_xfer_read_chunk(display, msg->addr, buf, len, gmbus0_reg, gmbus1_index); if (ret) return ret; @@ -513,7 +521,7 @@ gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, } static int -gmbus_xfer_write_chunk(struct drm_i915_private *i915, +gmbus_xfer_write_chunk(struct intel_display *display, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus1_index) { @@ -526,8 +534,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915, len -= 1; } - intel_de_write_fw(i915, GMBUS3(i915), val); - intel_de_write_fw(i915, GMBUS1(i915), + intel_de_write_fw(display, GMBUS3(display), val); + intel_de_write_fw(display, GMBUS1(display), gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -537,9 +545,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915, val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); - intel_de_write_fw(i915, GMBUS3(i915), val); + intel_de_write_fw(display, GMBUS3(display), val); - ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(display, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; } @@ -548,7 +556,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915, } static int -gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, +gmbus_xfer_write(struct intel_display *display, struct i2c_msg *msg, u32 gmbus1_index) { u8 *buf = msg->buf; @@ -557,9 +565,9 @@ gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, int ret; do { - len = min(tx_size, gmbus_max_xfer_size(i915)); + len = min(tx_size, gmbus_max_xfer_size(display)); - ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len, + ret = gmbus_xfer_write_chunk(display, msg->addr, buf, len, gmbus1_index); if (ret) return ret; @@ -586,7 +594,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) } static int -gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, +gmbus_index_xfer(struct intel_display *display, struct i2c_msg *msgs, u32 gmbus0_reg) { u32 gmbus1_index = 0; @@ -602,17 +610,17 @@ gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, /* GMBUS5 holds 16-bit index */ if (gmbus5) - intel_de_write_fw(i915, GMBUS5(i915), gmbus5); + intel_de_write_fw(display, GMBUS5(display), gmbus5); if (msgs[1].flags & I2C_M_RD) - ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg, + ret = gmbus_xfer_read(display, &msgs[1], gmbus0_reg, gmbus1_index); else - ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index); + ret = gmbus_xfer_write(display, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) - intel_de_write_fw(i915, GMBUS5(i915), 0); + intel_de_write_fw(display, GMBUS5(display), 0); return ret; } @@ -622,34 +630,35 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, u32 gmbus0_source) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); int i = 0, inc, try = 0; int ret = 0; /* Display WA #0868: skl,bxt,kbl,cfl,glk */ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - bxt_gmbus_clock_gating(i915, false); + bxt_gmbus_clock_gating(display, false); else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) - pch_gmbus_clock_gating(i915, false); + pch_gmbus_clock_gating(display, false); retry: - intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0); + intel_de_write_fw(display, GMBUS0(display), gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { - ret = gmbus_index_xfer(i915, &msgs[i], + ret = gmbus_index_xfer(display, &msgs[i], gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { - ret = gmbus_xfer_read(i915, &msgs[i], + ret = gmbus_xfer_read(display, &msgs[i], gmbus0_source | bus->reg0, 0); } else { - ret = gmbus_xfer_write(i915, &msgs[i], 0); + ret = gmbus_xfer_write(display, &msgs[i], 0); } if (!ret) - ret = gmbus_wait(i915, + ret = gmbus_wait(display, GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN); if (ret == -ETIMEDOUT) goto timeout; @@ -661,19 +670,19 @@ retry: * a STOP on the very first cycle. To simplify the code we * unconditionally generate the STOP condition with an additional gmbus * cycle. */ - intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + intel_de_write_fw(display, GMBUS1(display), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ - if (gmbus_wait_idle(i915)) { - drm_dbg_kms(&i915->drm, + if (gmbus_wait_idle(display)) { + drm_dbg_kms(display->drm, "GMBUS [%s] timed out waiting for idle\n", adapter->name); ret = -ETIMEDOUT; } - intel_de_write_fw(i915, GMBUS0(i915), 0); + intel_de_write_fw(display, GMBUS0(display), 0); ret = ret ?: i; goto out; @@ -692,8 +701,8 @@ clear_err: * it's slow responding and only answers on the 2nd retry. */ ret = -ENXIO; - if (gmbus_wait_idle(i915)) { - drm_dbg_kms(&i915->drm, + if (gmbus_wait_idle(display)) { + drm_dbg_kms(display->drm, "GMBUS [%s] timed out after NAK\n", adapter->name); ret = -ETIMEDOUT; @@ -703,11 +712,11 @@ clear_err: * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the target's NAK. */ - intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT); - intel_de_write_fw(i915, GMBUS1(i915), 0); - intel_de_write_fw(i915, GMBUS0(i915), 0); + intel_de_write_fw(display, GMBUS1(display), GMBUS_SW_CLR_INT); + intel_de_write_fw(display, GMBUS1(display), 0); + intel_de_write_fw(display, GMBUS0(display), 0); - drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", + drm_dbg_kms(display->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); @@ -718,7 +727,7 @@ clear_err: * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. */ if (ret == -ENXIO && i == 0 && try++ == 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "GMBUS [%s] NAK on first message, retry\n", adapter->name); goto retry; @@ -727,10 +736,10 @@ clear_err: goto out; timeout: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "GMBUS [%s] timed out, falling back to bit banging on pin %d\n", bus->adapter.name, bus->reg0 & 0xff); - intel_de_write_fw(i915, GMBUS0(i915), 0); + intel_de_write_fw(display, GMBUS0(display), 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging @@ -741,9 +750,9 @@ timeout: out: /* Display WA #0868: skl,bxt,kbl,cfl,glk */ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - bxt_gmbus_clock_gating(i915, true); + bxt_gmbus_clock_gating(display, true); else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) - pch_gmbus_clock_gating(i915, true); + pch_gmbus_clock_gating(display, true); return ret; } @@ -752,7 +761,8 @@ static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; int ret; @@ -776,7 +786,8 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = {}; struct i2c_msg msgs[] = { @@ -797,7 +808,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) int ret; wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); - mutex_lock(&i915->display.gmbus.mutex); + mutex_lock(&display->gmbus.mutex); /* * In order to output Aksv to the receiver, use an indexed write to @@ -806,7 +817,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) */ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); - mutex_unlock(&i915->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; @@ -830,27 +841,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - mutex_lock(&i915->display.gmbus.mutex); + mutex_lock(&display->gmbus.mutex); } static int gmbus_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - return mutex_trylock(&i915->display.gmbus.mutex); + return mutex_trylock(&display->gmbus.mutex); } static void gmbus_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - mutex_unlock(&i915->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); } static const struct i2c_lock_operations gmbus_lock_ops = { @@ -861,31 +872,32 @@ static const struct i2c_lock_operations gmbus_lock_ops = { /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses - * @i915: i915 device private + * @display: display device */ -int intel_gmbus_setup(struct drm_i915_private *i915) +int intel_gmbus_setup(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct drm_i915_private *i915 = to_i915(display->drm); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); unsigned int pin; int ret; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) - i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE; - else if (!HAS_GMCH(i915)) + display->gmbus.mmio_base = VLV_DISPLAY_BASE; + else if (!HAS_GMCH(display)) /* * Broxton uses the same PCH offsets for South Display Engine, * even though it doesn't have a PCH. */ - i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE; + display->gmbus.mmio_base = PCH_DISPLAY_BASE; - mutex_init(&i915->display.gmbus.mutex); - init_waitqueue_head(&i915->display.gmbus.wait_queue); + mutex_init(&display->gmbus.mutex); + init_waitqueue_head(&display->gmbus.wait_queue); - for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(display->gmbus.bus); pin++) { const struct gmbus_pin *gmbus_pin; struct intel_gmbus *bus; - gmbus_pin = get_gmbus_pin(i915, pin); + gmbus_pin = get_gmbus_pin(display, pin); if (!gmbus_pin) continue; @@ -901,7 +913,7 @@ int intel_gmbus_setup(struct drm_i915_private *i915) "i915 gmbus %s", gmbus_pin->name); bus->adapter.dev.parent = &pdev->dev; - bus->i915 = i915; + bus->display = display; bus->adapter.algo = &gmbus_algorithm; bus->adapter.lock_ops = &gmbus_lock_ops; @@ -919,7 +931,7 @@ int intel_gmbus_setup(struct drm_i915_private *i915) if (IS_I830(i915)) bus->force_bit = 1; - intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio)); + intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio)); ret = i2c_add_adapter(&bus->adapter); if (ret) { @@ -927,43 +939,43 @@ int intel_gmbus_setup(struct drm_i915_private *i915) goto err; } - i915->display.gmbus.bus[pin] = bus; + display->gmbus.bus[pin] = bus; } - intel_gmbus_reset(i915); + intel_gmbus_reset(display); return 0; err: - intel_gmbus_teardown(i915); + intel_gmbus_teardown(display); return ret; } -struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915, +struct i2c_adapter *intel_gmbus_get_adapter(struct intel_display *display, unsigned int pin) { - if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) || - !i915->display.gmbus.bus[pin])) + if (drm_WARN_ON(display->drm, pin >= ARRAY_SIZE(display->gmbus.bus) || + !display->gmbus.bus[pin])) return NULL; - return &i915->display.gmbus.bus[pin]->adapter; + return &display->gmbus.bus[pin]->adapter; } void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - mutex_lock(&i915->display.gmbus.mutex); + mutex_lock(&display->gmbus.mutex); bus->force_bit += force_bit ? 1 : -1; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "%sabling bit-banging on %s. force bit now %d\n", force_bit ? "en" : "dis", adapter->name, bus->force_bit); - mutex_unlock(&i915->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); } bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) @@ -973,25 +985,25 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) return bus->force_bit; } -void intel_gmbus_teardown(struct drm_i915_private *i915) +void intel_gmbus_teardown(struct intel_display *display) { unsigned int pin; - for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(display->gmbus.bus); pin++) { struct intel_gmbus *bus; - bus = i915->display.gmbus.bus[pin]; + bus = display->gmbus.bus[pin]; if (!bus) continue; i2c_del_adapter(&bus->adapter); kfree(bus); - i915->display.gmbus.bus[pin] = NULL; + display->gmbus.bus[pin] = NULL; } } -void intel_gmbus_irq_handler(struct drm_i915_private *i915) +void intel_gmbus_irq_handler(struct intel_display *display) { - wake_up_all(&i915->display.gmbus.wait_queue); + wake_up_all(&display->gmbus.wait_queue); } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index 8111eb23e2af..35a200a9efc0 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -8,8 +8,8 @@ #include <linux/types.h> -struct drm_i915_private; struct i2c_adapter; +struct intel_display; #define GMBUS_PIN_DISABLED 0 #define GMBUS_PIN_SSC 1 @@ -34,18 +34,17 @@ struct i2c_adapter; #define GMBUS_NUM_PINS 15 /* including 0 */ -int intel_gmbus_setup(struct drm_i915_private *dev_priv); -void intel_gmbus_teardown(struct drm_i915_private *dev_priv); -bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, - unsigned int pin); +int intel_gmbus_setup(struct intel_display *display); +void intel_gmbus_teardown(struct intel_display *display); +bool intel_gmbus_is_valid_pin(struct intel_display *display, unsigned int pin); int intel_gmbus_output_aksv(struct i2c_adapter *adapter); struct i2c_adapter * -intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); +intel_gmbus_get_adapter(struct intel_display *display, unsigned int pin); void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter); -void intel_gmbus_reset(struct drm_i915_private *dev_priv); +void intel_gmbus_reset(struct intel_display *display); -void intel_gmbus_irq_handler(struct drm_i915_private *i915); +void intel_gmbus_irq_handler(struct intel_display *display); #endif /* __INTEL_GMBUS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h index 53aacbda983c..59bad1dda6d6 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h @@ -8,9 +8,9 @@ #include "i915_reg_defs.h" -#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base) +#define __GMBUS_MMIO_BASE(__display) ((__display)->gmbus.mmio_base) -#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio)) +#define GPIO(__display, gpio) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5010 + 4 * (gpio)) #define GPIO_CLOCK_DIR_MASK (1 << 0) #define GPIO_CLOCK_DIR_IN (0 << 1) #define GPIO_CLOCK_DIR_OUT (1 << 1) @@ -27,7 +27,7 @@ #define GPIO_DATA_PULLUP_DISABLE (1 << 13) /* clock/port select */ -#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100) +#define GMBUS0(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5100) #define GMBUS_AKSV_SELECT (1 << 11) #define GMBUS_RATE_100KHZ (0 << 8) #define GMBUS_RATE_50KHZ (1 << 8) @@ -37,7 +37,7 @@ #define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) /* command/status */ -#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104) +#define GMBUS1(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5104) #define GMBUS_SW_CLR_INT (1 << 31) #define GMBUS_SW_RDY (1 << 30) #define GMBUS_ENT (1 << 29) /* enable timeout */ @@ -54,7 +54,7 @@ #define GMBUS_SLAVE_WRITE (0 << 0) /* status */ -#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108) +#define GMBUS2(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5108) #define GMBUS_INUSE (1 << 15) #define GMBUS_HW_WAIT_PHASE (1 << 14) #define GMBUS_STALL_TIMEOUT (1 << 13) @@ -64,10 +64,10 @@ #define GMBUS_ACTIVE (1 << 9) /* data buffer bytes 3-0 */ -#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c) +#define GMBUS3(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x510c) /* interrupt mask (Pineview+) */ -#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110) +#define GMBUS4(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5110) #define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) #define GMBUS_NAK_EN (1 << 3) #define GMBUS_IDLE_EN (1 << 2) @@ -75,7 +75,7 @@ #define GMBUS_HW_RDY_EN (1 << 0) /* byte index */ -#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120) +#define GMBUS5(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5120) #define GMBUS_2BYTE_INDEX_EN (1 << 31) #endif /* __INTEL_GMBUS_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 377939de0ff4..f6d42ec6949e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -25,6 +25,7 @@ #include "intel_hdcp.h" #include "intel_hdcp_gsc.h" #include "intel_hdcp_regs.h" +#include "intel_hdcp_shim.h" #include "intel_pcode.h" #define KEY_LOAD_TRIES 5 @@ -35,20 +36,20 @@ static void intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder, struct intel_hdcp *hdcp) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); /* Here we assume HDMI is in TMDS mode of operation */ if (encoder->type != INTEL_OUTPUT_HDMI) return; - if (DISPLAY_VER(dev_priv) >= 14) { - if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER)) - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), + if (DISPLAY_VER(display) >= 14) { + if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) + intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), 0, HDCP_LINE_REKEY_DISABLE); - else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) || - IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER)) - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder), + else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) || + IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder), 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE); } } @@ -95,10 +96,10 @@ static int intel_hdcp_required_content_stream(struct intel_atomic_state *state, struct intel_digital_port *dig_port) { + struct intel_display *display = to_intel_display(state); struct drm_connector_list_iter conn_iter; struct intel_digital_port *conn_dig_port; struct intel_connector *connector; - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; bool enforce_type0 = false; int k; @@ -111,7 +112,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state, if (!dig_port->hdcp_mst_type1_capable) enforce_type0 = true; - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->base.status == connector_status_disconnected) continue; @@ -133,7 +134,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state, } drm_connector_list_iter_end(&conn_iter); - if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) + if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0)) return -EINVAL; /* @@ -181,7 +182,7 @@ static int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret, i, tries = 2; /* HDCP spec states that we must retry the bksv if it is invalid */ @@ -193,7 +194,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, break; } if (i == tries) { - drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); + drm_dbg_kms(display->drm, "Bksv is invalid\n"); return -ENODEV; } @@ -232,7 +233,7 @@ bool intel_hdcp_get_capability(struct intel_connector *connector) */ static bool intel_hdcp2_prerequisite(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; /* I915 support for HDCP2.2 */ @@ -240,18 +241,18 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector) return false; /* If MTL+ make sure gsc is loaded and proxy is setup */ - if (intel_hdcp_gsc_cs_required(i915)) { - if (!intel_hdcp_gsc_check_status(i915)) + if (intel_hdcp_gsc_cs_required(display)) { + if (!intel_hdcp_gsc_check_status(display)) return false; } /* MEI/GSC interface is solid depending on which is used */ - mutex_lock(&i915->display.hdcp.hdcp_mutex); - if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + if (!display->hdcp.comp_added || !display->hdcp.arbiter) { + mutex_unlock(&display->hdcp.hdcp_mutex); return false; } - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return true; } @@ -287,19 +288,19 @@ void intel_hdcp_get_remote_capability(struct intel_connector *connector, *hdcp2_capable = false; } -static bool intel_hdcp_in_use(struct drm_i915_private *i915, +static bool intel_hdcp_in_use(struct intel_display *display, enum transcoder cpu_transcoder, enum port port) { - return intel_de_read(i915, - HDCP_STATUS(i915, cpu_transcoder, port)) & + return intel_de_read(display, + HDCP_STATUS(display, cpu_transcoder, port)) & HDCP_STATUS_ENC; } -static bool intel_hdcp2_in_use(struct drm_i915_private *i915, +static bool intel_hdcp2_in_use(struct intel_display *display, enum transcoder cpu_transcoder, enum port port) { - return intel_de_read(i915, - HDCP2_STATUS(i915, cpu_transcoder, port)) & + return intel_de_read(display, + HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS; } @@ -324,8 +325,9 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, return 0; } -static bool hdcp_key_loadable(struct drm_i915_private *i915) +static bool hdcp_key_loadable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); enum i915_power_well_id id; intel_wakeref_t wakeref; bool enabled = false; @@ -352,19 +354,20 @@ static bool hdcp_key_loadable(struct drm_i915_private *i915) return enabled; } -static void intel_hdcp_clear_keys(struct drm_i915_private *i915) +static void intel_hdcp_clear_keys(struct intel_display *display) { - intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); - intel_de_write(i915, HDCP_KEY_STATUS, + intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); + intel_de_write(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); } -static int intel_hdcp_load_keys(struct drm_i915_private *i915) +static int intel_hdcp_load_keys(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; u32 val; - val = intel_de_read(i915, HDCP_KEY_STATUS); + val = intel_de_read(display, HDCP_KEY_STATUS); if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) return 0; @@ -373,7 +376,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) * out of reset. So if Key is not already loaded, its an error state. */ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) + if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) return -ENXIO; /* @@ -384,20 +387,20 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) * process from other platforms. These platforms use the GT Driver * Mailbox interface. */ - if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) { + if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) { ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to initiate HDCP key load (%d)\n", ret); return ret; } } else { - intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); + intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); } /* Wait for the keys to load (500us) */ - ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS, + ret = intel_de_wait_custom(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 10, 1, &val); if (ret) @@ -406,27 +409,27 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) return -ENXIO; /* Send Aksv over to PCH display for use in authentication */ - intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); + intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); return 0; } /* Returns updated SHA-1 index */ -static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text) +static int intel_write_sha_text(struct intel_display *display, u32 sha_text) { - intel_de_write(i915, HDCP_SHA_TEXT, sha_text); - if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { - drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n"); + intel_de_write(display, HDCP_SHA_TEXT, sha_text); + if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { + drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; } return 0; } static -u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, +u32 intel_hdcp_get_repeater_ctl(struct intel_display *display, enum transcoder cpu_transcoder, enum port port) { - if (DISPLAY_VER(i915) >= 12) { + if (DISPLAY_VER(display) >= 12) { switch (cpu_transcoder) { case TRANSCODER_A: return HDCP_TRANSA_REP_PRESENT | @@ -441,7 +444,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, return HDCP_TRANSD_REP_PRESENT | HDCP_TRANSD_SHA1_M0; default: - drm_err(&i915->drm, "Unknown transcoder %d\n", + drm_err(display->drm, "Unknown transcoder %d\n", cpu_transcoder); return 0; } @@ -459,7 +462,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: - drm_err(&i915->drm, "Unknown port %d\n", port); + drm_err(display->drm, "Unknown port %d\n", port); return 0; } } @@ -469,8 +472,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port = dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; @@ -481,7 +484,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, ret = shim->read_v_prime_part(dig_port, i, &vprime); if (ret) return ret; - intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime); + intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime); } /* @@ -497,8 +500,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx = 0; sha_text = 0; sha_leftovers = 0; - rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); - intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; @@ -510,14 +513,14 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_text |= ksv[j] << off; } - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; /* Programming guide writes this every 64 bytes */ sha_idx += sizeof(sha_text); if (!(sha_idx % 64)) - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Store the leftover bytes from the ksv in sha_text */ @@ -534,7 +537,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, if (sizeof(sha_text) > sha_leftovers) continue; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_leftovers = 0; @@ -550,73 +553,73 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, */ if (sha_leftovers == 0) { /* Write 16 bits of text, 16 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); - ret = intel_write_sha_text(i915, + ret = intel_write_sha_text(display, bstatus[0] << 8 | bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 16 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 1) { /* Write 24 bits of text, 8 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); sha_text |= bstatus[0] << 16 | bstatus[1] << 8; /* Only 24-bits of data, must be in the LSB */ sha_text = (sha_text & 0xffffff00) >> 8; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 24 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 2) { /* Write 32 bits of text */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0] << 8 | bstatus[1]; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 64 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); for (i = 0; i < 2; i++) { - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); @@ -626,56 +629,56 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, * Terminate the SHA-1 stream by hand. For the other leftover * cases this is appended by the hardware. */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 3) { /* Write 32 bits of text (filled from LSB) */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0]; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); - ret = intel_write_sha_text(i915, bstatus[1]); + ret = intel_write_sha_text(display, bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else { - drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n", + drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n", sha_leftovers); return -EINVAL; } - intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ while ((sha_idx % 64) < (64 - sizeof(sha_text))) { - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); @@ -687,20 +690,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, * - 10 bytes for BINFO/BSTATUS(2), M0(8) */ sha_text = (num_downstream * 5 + 10) * 8; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; /* Tell the HW we're done with the hash and wait for it to ACK */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); - if (intel_de_wait_for_set(i915, HDCP_REP_CTL, + if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, 1)) { - drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n"); + drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } - if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { - drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n"); + if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { + drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n"); return -ENXIO; } @@ -711,15 +714,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, static int intel_hdcp_auth_downstream(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct intel_hdcp_shim *shim = connector->hdcp.shim; u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, tries = 3; ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "KSV list failed to become ready (%d)\n", ret); return ret; } @@ -730,7 +733,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n"); + drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n"); return -EPERM; } @@ -743,14 +746,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); if (num_downstream == 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Repeater with zero downstream devices\n"); return -EINVAL; } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); if (!ksv_fifo) { - drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n"); + drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n"); return -ENOMEM; } @@ -758,9 +761,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (ret) goto err; - if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo, + if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo, num_downstream) > 0) { - drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n"); + drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n"); ret = -EPERM; goto err; } @@ -778,12 +781,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) } if (i == tries) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "V Prime validation failed.(%d)\n", ret); goto err; } - drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n", + drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n", num_downstream); ret = 0; err: @@ -794,8 +797,8 @@ err: /* Implements Part 1 of the HDCP authorization procedure */ static int intel_hdcp_auth(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; @@ -827,7 +830,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret) return ret; if (!hdcp_capable) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel is not HDCP capable\n"); return -EINVAL; } @@ -835,24 +838,24 @@ static int intel_hdcp_auth(struct intel_connector *connector) /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) - intel_de_write(i915, - HDCP_ANINIT(i915, cpu_transcoder, port), + intel_de_write(display, + HDCP_ANINIT(display, cpu_transcoder, port), get_random_u32()); - intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_de_wait_for_set(i915, - HDCP_STATUS(i915, cpu_transcoder, port), + if (intel_de_wait_for_set(display, + HDCP_STATUS(display, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { - drm_err(&i915->drm, "Timed out waiting for An\n"); + drm_err(display->drm, "Timed out waiting for An\n"); return -ETIMEDOUT; } - an.reg[0] = intel_de_read(i915, - HDCP_ANLO(i915, cpu_transcoder, port)); - an.reg[1] = intel_de_read(i915, - HDCP_ANHI(i915, cpu_transcoder, port)); + an.reg[0] = intel_de_read(display, + HDCP_ANLO(display, cpu_transcoder, port)); + an.reg[1] = intel_de_read(display, + HDCP_ANHI(display, cpu_transcoder, port)); ret = shim->write_an_aksv(dig_port, an.shim); if (ret) return ret; @@ -865,34 +868,34 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret < 0) return ret; - if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) { - drm_err(&i915->drm, "BKSV is revoked\n"); + if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) { + drm_err(display->drm, "BKSV is revoked\n"); return -EPERM; } - intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port), bksv.reg[0]); - intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port), bksv.reg[1]); ret = shim->repeater_present(dig_port, &repeater_present); if (ret) return ret; if (repeater_present) - intel_de_write(i915, HDCP_REP_CTL, - intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port)); + intel_de_write(display, HDCP_REP_CTL, + intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port)); ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) return ret; - intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { - drm_err(&i915->drm, "Timed out waiting for R0 ready\n"); + drm_err(display->drm, "Timed out waiting for R0 ready\n"); return -ETIMEDOUT; } @@ -918,30 +921,30 @@ static int intel_hdcp_auth(struct intel_connector *connector) ret = shim->read_ri_prime(dig_port, ri.shim); if (ret) return ret; - intel_de_write(i915, - HDCP_RPRIME(i915, cpu_transcoder, port), + intel_de_write(display, + HDCP_RPRIME(display, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Timed out waiting for Ri prime match (%x)\n", - intel_de_read(i915, - HDCP_STATUS(i915, cpu_transcoder, port))); + intel_de_read(display, + HDCP_STATUS(display, cpu_transcoder, port))); return -ETIMEDOUT; } /* Wait for encryption confirmation */ - if (intel_de_wait_for_set(i915, - HDCP_STATUS(i915, cpu_transcoder, port), + if (intel_de_wait_for_set(display, + HDCP_STATUS(display, cpu_transcoder, port), HDCP_STATUS_ENC, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, "Timed out waiting for encryption\n"); + drm_err(display->drm, "Timed out waiting for encryption\n"); return -ETIMEDOUT; } @@ -949,42 +952,42 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (shim->stream_encryption) { ret = shim->stream_encryption(connector, true); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", + drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } if (repeater_present) return intel_hdcp_auth_downstream(connector); - drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n"); + drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n"); return 0; } static int _intel_hdcp_disable(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; u32 repeater_ctl; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", connector->base.base.id, connector->base.name); if (hdcp->shim->stream_encryption) { ret = hdcp->shim->stream_encryption(connector, false); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", + drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); /* * If there are other connectors on this port using HDCP, @@ -996,51 +999,51 @@ static int _intel_hdcp_disable(struct intel_connector *connector) } hdcp->hdcp_encrypted = false; - intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0); - if (intel_de_wait_for_clear(i915, - HDCP_STATUS(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); + if (intel_de_wait_for_clear(display, + HDCP_STATUS(display, cpu_transcoder, port), ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } - repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, + repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); - intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0); + intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0); ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { - drm_err(&i915->drm, "Failed to disable HDCP signalling\n"); + drm_err(display->drm, "Failed to disable HDCP signalling\n"); return ret; } - drm_dbg_kms(&i915->drm, "HDCP is disabled\n"); + drm_dbg_kms(display->drm, "HDCP is disabled\n"); return 0; } static int intel_hdcp1_enable(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int i, ret, tries = 3; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", connector->base.base.id, connector->base.name); - if (!hdcp_key_loadable(i915)) { - drm_err(&i915->drm, "HDCP key Load is not possible\n"); + if (!hdcp_key_loadable(display)) { + drm_err(display->drm, "HDCP key Load is not possible\n"); return -ENXIO; } for (i = 0; i < KEY_LOAD_TRIES; i++) { - ret = intel_hdcp_load_keys(i915); + ret = intel_hdcp_load_keys(display); if (!ret) break; - intel_hdcp_clear_keys(i915); + intel_hdcp_clear_keys(display); } if (ret) { - drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n", + drm_err(display->drm, "Could not load HDCP keys, (%d)\n", ret); return ret; } @@ -1053,13 +1056,13 @@ static int intel_hdcp1_enable(struct intel_connector *connector) return 0; } - drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret); + drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret); /* Ensuring HDCP encryption and signalling are stopped. */ _intel_hdcp_disable(connector); } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } @@ -1072,20 +1075,20 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) static void intel_hdcp_update_value(struct intel_connector *connector, u64 value, bool update_property) { - struct drm_device *dev = connector->base.dev; + struct intel_display *display = to_intel_display(connector); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *i915 = to_i915(connector->base.dev); - drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); + drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex)); if (hdcp->value == value) return; - drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); + drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex)); if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { - if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) + if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0)) dig_port->num_hdcp_streams--; } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { dig_port->num_hdcp_streams++; @@ -1102,8 +1105,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector, /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; @@ -1121,12 +1124,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (drm_WARN_ON(&i915->drm, - !intel_hdcp_in_use(i915, cpu_transcoder, port))) { - drm_err(&i915->drm, + if (drm_WARN_ON(display->drm, + !intel_hdcp_in_use(display, cpu_transcoder, port))) { + drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", connector->base.base.id, connector->base.name, - intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); + intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); ret = -ENXIO; intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, @@ -1142,13 +1145,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", connector->base.base.id, connector->base.name); ret = _intel_hdcp_disable(connector); if (ret) { - drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret); + drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); @@ -1169,9 +1172,9 @@ static void intel_hdcp_prop_work(struct work_struct *work) struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, prop_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); - drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); + drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); mutex_lock(&hdcp->mutex); /* @@ -1184,40 +1187,40 @@ static void intel_hdcp_prop_work(struct work_struct *work) hdcp->value); mutex_unlock(&hdcp->mutex); - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); drm_connector_put(&connector->base); } -bool is_hdcp_supported(struct drm_i915_private *i915, enum port port) +bool is_hdcp_supported(struct intel_display *display, enum port port) { - return DISPLAY_RUNTIME_INFO(i915)->has_hdcp && - (DISPLAY_VER(i915) >= 12 || port < PORT_E); + return DISPLAY_RUNTIME_INFO(display)->has_hdcp && + (DISPLAY_VER(display) >= 12 || port < PORT_E); } static int hdcp2_prepare_ake_init(struct intel_connector *connector, struct hdcp2_ake_init *ake_data) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); if (ret) - drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n", + drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1229,17 +1232,17 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, struct hdcp2_ake_no_stored_km *ek_pub_km, size_t *msg_sz) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } @@ -1247,9 +1250,9 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, rx_cert, paired, ek_pub_km, msg_sz); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n", + drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1257,24 +1260,24 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, static int hdcp2_verify_hprime(struct intel_connector *connector, struct hdcp2_ake_send_hprime *rx_hprime) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1283,25 +1286,25 @@ static int hdcp2_store_pairing_info(struct intel_connector *connector, struct hdcp2_ake_send_pairing_info *pairing_info) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); if (ret < 0) - drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n", + drm_dbg_kms(display->drm, "Store pairing info failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1310,25 +1313,25 @@ static int hdcp2_prepare_lc_init(struct intel_connector *connector, struct hdcp2_lc_init *lc_init) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); if (ret < 0) - drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n", + drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1337,25 +1340,25 @@ static int hdcp2_verify_lprime(struct intel_connector *connector, struct hdcp2_lc_send_lprime *rx_lprime) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n", + drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1363,25 +1366,25 @@ hdcp2_verify_lprime(struct intel_connector *connector, static int hdcp2_prepare_skey(struct intel_connector *connector, struct hdcp2_ske_send_eks *ske_data) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); if (ret < 0) - drm_dbg_kms(&i915->drm, "Get session key failed. %d\n", + drm_dbg_kms(display->drm, "Get session key failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1392,17 +1395,17 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, *rep_topology, struct hdcp2_rep_send_ack *rep_send_ack) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } @@ -1411,9 +1414,9 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, rep_topology, rep_send_ack); if (ret < 0) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Verify rep topology failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1422,71 +1425,71 @@ static int hdcp2_verify_mprime(struct intel_connector *connector, struct hdcp2_rep_stream_ready *stream_ready) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } static int hdcp2_authenticate_port(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); if (ret < 0) - drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n", + drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } static int hdcp2_close_session(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, &dig_port->hdcp_port_data); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1499,7 +1502,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector) /* Authentication flow starts from here */ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_ake_init ake_init; @@ -1510,7 +1513,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) } msgs; const struct intel_hdcp_shim *shim = hdcp->shim; size_t size; - int ret; + int ret, i; /* Init for seq_num */ hdcp->seq_num_v = 0; @@ -1520,27 +1523,50 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - ret = shim->write_2_2_msg(connector, &msgs.ake_init, - sizeof(msgs.ake_init)); - if (ret < 0) - return ret; + /* + * Retry the first read and write to downstream at least 10 times + * with a 50ms delay if not hdcp2 capable(dock decides to stop advertising + * hdcp2 capability for some reason). The reason being that + * during suspend resume dock usually keeps the HDCP2 registers inaccesible + * causing AUX error. This wouldn't be a big problem if the userspace + * just kept retrying with some delay while it continues to play low + * value content but most userpace applications end up throwing an error + * when it receives one from KMD. This makes sure we give the dock + * and the sink devices to complete its power cycle and then try HDCP + * authentication. The values of 10 and delay of 50ms was decided based + * on multiple trial and errors. + */ + for (i = 0; i < 10; i++) { + if (!intel_hdcp2_get_capability(connector)) { + msleep(50); + continue; + } + + ret = shim->write_2_2_msg(connector, &msgs.ake_init, + sizeof(msgs.ake_init)); + if (ret < 0) + continue; + + ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, + &msgs.send_cert, sizeof(msgs.send_cert)); + if (ret > 0) + break; + } - ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, - &msgs.send_cert, sizeof(msgs.send_cert)); if (ret < 0) return ret; if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { - drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n"); + drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); - if (drm_hdcp_check_ksvs_revoked(&i915->drm, + if (drm_hdcp_check_ksvs_revoked(display->drm, msgs.send_cert.cert_rx.receiver_id, 1) > 0) { - drm_err(&i915->drm, "Receiver ID is revoked\n"); + drm_err(display->drm, "Receiver ID is revoked\n"); return -EPERM; } @@ -1691,8 +1717,8 @@ out: static int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_send_receiverid_list recvid_list; @@ -1712,7 +1738,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { - drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n"); + drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n"); return -EINVAL; } @@ -1725,7 +1751,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP1.x or 2.0 Legacy Device Downstream\n"); return -EINVAL; } @@ -1735,23 +1761,23 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); if (!hdcp->hdcp2_encrypted && seq_num_v) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Non zero Seq_num_v at first RecvId_List msg\n"); return -EINVAL; } if (seq_num_v < hdcp->seq_num_v) { /* Roll over of the seq_num_v from repeater. Reauthenticate. */ - drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n"); + drm_dbg_kms(display->drm, "Seq_num_v roll over.\n"); return -EINVAL; } device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - if (drm_hdcp_check_ksvs_revoked(&i915->drm, + if (drm_hdcp_check_ksvs_revoked(display->drm, msgs.recvid_list.receiver_ids, device_cnt) > 0) { - drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n"); + drm_err(display->drm, "Revoked receiver ID(s) is in list\n"); return -EPERM; } @@ -1772,27 +1798,27 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) static int hdcp2_authenticate_sink(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; int ret; ret = hdcp2_authentication_key_exchange(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); + drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret); return ret; } ret = hdcp2_locality_check(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Locality Check failed. Err : %d\n", ret); return ret; } ret = hdcp2_session_key_exchange(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); + drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret); return ret; } @@ -1807,7 +1833,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) if (hdcp->is_repeater) { ret = hdcp2_authenticate_repeater_topology(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Repeater Auth Failed. Err: %d\n", ret); return ret; } @@ -1818,17 +1844,17 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) static int hdcp2_enable_stream_encryption(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; enum port port = dig_port->base.port; int ret = 0; - if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & + if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", connector->base.base.id, connector->base.name); ret = -EPERM; goto link_recover; @@ -1837,11 +1863,11 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, true); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", + drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } @@ -1849,7 +1875,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) link_recover: if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); dig_port->hdcp_auth_status = false; data->k = 0; @@ -1859,35 +1885,35 @@ link_recover: static int hdcp2_enable_encryption(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - drm_WARN_ON(&i915->drm, - intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & + drm_WARN_ON(display->drm, + intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to enable HDCP signalling. %d\n", ret); return ret; } } - if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & + if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_AUTH_STATUS) /* Link is Authenticated. Now set for Encryption */ - intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), + intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 0, CTL_LINK_ENCRYPTION_REQ); - ret = intel_de_wait_for_set(i915, - HDCP2_STATUS(i915, cpu_transcoder, + ret = intel_de_wait_for_set(display, + HDCP2_STATUS(display, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); @@ -1898,32 +1924,33 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) static int hdcp2_disable_encryption(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & - LINK_ENCRYPTION_STATUS)); + drm_WARN_ON(display->drm, + !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS)); - intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), + intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), CTL_LINK_ENCRYPTION_REQ, 0); - ret = intel_de_wait_for_clear(i915, - HDCP2_STATUS(i915, cpu_transcoder, + ret = intel_de_wait_for_clear(display, + HDCP2_STATUS(display, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) - drm_dbg_kms(&i915->drm, "Disable Encryption Timedout"); + drm_dbg_kms(display->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to disable HDCP signalling. %d\n", ret); return ret; @@ -1936,7 +1963,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) static int hdcp2_propagate_stream_management_info(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); int i, tries = 3, ret; if (!connector->hdcp.is_repeater) @@ -1949,12 +1976,12 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector) /* Lets restart the auth incase of seq_num_m roll over */ if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "seq_num_m roll over.(%d)\n", ret); break; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP2 stream management %d of %d Failed.(%d)\n", i + 1, tries, ret); } @@ -1965,8 +1992,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector) static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret = 0, i, tries = 3; for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { @@ -1974,7 +2001,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, if (!ret) { ret = intel_hdcp_prepare_streams(state, connector); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Prepare stream failed.(%d)\n", ret); break; @@ -1982,7 +2009,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, ret = hdcp2_propagate_stream_management_info(connector); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Stream management failed.(%d)\n", ret); break; @@ -1991,15 +2018,15 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, ret = hdcp2_authenticate_port(connector); if (!ret) break; - drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n", + drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n", ret); } /* Clearing the mei hdcp session */ - drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", + drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", i + 1, tries, ret); if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); } if (!ret && !dig_port->hdcp_auth_status) { @@ -2010,10 +2037,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); ret = hdcp2_enable_encryption(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Encryption Enable Failed.(%d)\n", ret); if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); } } @@ -2026,11 +2053,11 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, static int _intel_hdcp2_enable(struct intel_atomic_state *state, struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", connector->base.base.id, connector->base.name, hdcp->content_type); @@ -2038,12 +2065,12 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state, ret = hdcp2_authenticate_and_encrypt(state, connector); if (ret) { - drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", + drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", hdcp->content_type, ret); return ret; } - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", connector->base.base.id, connector->base.name, hdcp->content_type); @@ -2054,23 +2081,23 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state, static int _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", connector->base.base.id, connector->base.name); if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, false); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", + drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) @@ -2080,7 +2107,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery ret = hdcp2_disable_encryption(connector); if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); connector->hdcp.hdcp2_encrypted = false; dig_port->hdcp_auth_status = false; @@ -2092,8 +2119,8 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery /* Implements the Link Integrity Check for HDCP2.2 */ static int intel_hdcp2_check_link(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; @@ -2110,11 +2137,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - if (drm_WARN_ON(&i915->drm, - !intel_hdcp2_in_use(i915, cpu_transcoder, port))) { - drm_err(&i915->drm, + if (drm_WARN_ON(display->drm, + !intel_hdcp2_in_use(display, cpu_transcoder, port))) { + drm_err(display->drm, "HDCP2.2 link stopped the encryption, %x\n", - intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port))); + intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port))); ret = -ENXIO; _intel_hdcp2_disable(connector, true); intel_hdcp_update_value(connector, @@ -2137,17 +2164,17 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP2.2 Downstream topology change\n"); } else { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", connector->base.base.id, connector->base.name); } ret = _intel_hdcp2_disable(connector, true); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", connector->base.base.id, connector->base.name, ret); intel_hdcp_update_value(connector, @@ -2169,7 +2196,8 @@ static void intel_hdcp_check_work(struct work_struct *work) struct intel_hdcp, check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); + struct drm_i915_private *i915 = to_i915(display->drm); if (drm_connector_is_unregistered(&connector->base)) return; @@ -2186,13 +2214,12 @@ static int i915_hdcp_component_bind(struct device *drv_kdev, struct device *mei_kdev, void *data) { struct intel_display *display = to_intel_display(drv_kdev); - struct drm_i915_private *i915 = to_i915(display->drm); - drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data; - i915->display.hdcp.arbiter->hdcp_dev = mei_kdev; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg(display->drm, "I915 HDCP comp bind\n"); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data; + display->hdcp.arbiter->hdcp_dev = mei_kdev; + mutex_unlock(&display->hdcp.hdcp_mutex); return 0; } @@ -2201,12 +2228,11 @@ static void i915_hdcp_component_unbind(struct device *drv_kdev, struct device *mei_kdev, void *data) { struct intel_display *display = to_intel_display(drv_kdev); - struct drm_i915_private *i915 = to_i915(display->drm); - drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.arbiter = NULL; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg(display->drm, "I915 HDCP comp unbind\n"); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = NULL; + mutex_unlock(&display->hdcp.hdcp_mutex); } static const struct component_ops i915_hdcp_ops = { @@ -2240,11 +2266,11 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; enum port port = dig_port->base.port; - if (DISPLAY_VER(i915) < 12) + if (DISPLAY_VER(display) < 12) data->hdcp_ddi = intel_get_hdcp_ddi_index(port); else /* @@ -2264,55 +2290,57 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, data->protocol = (u8)shim->protocol; if (!data->streams) - data->streams = kcalloc(INTEL_NUM_PIPES(i915), + data->streams = kcalloc(INTEL_NUM_PIPES(display), sizeof(struct hdcp2_streamid_type), GFP_KERNEL); if (!data->streams) { - drm_err(&i915->drm, "Out of Memory\n"); + drm_err(display->drm, "Out of Memory\n"); return -ENOMEM; } return 0; } -static bool is_hdcp2_supported(struct drm_i915_private *i915) +static bool is_hdcp2_supported(struct intel_display *display) { - if (intel_hdcp_gsc_cs_required(i915)) + struct drm_i915_private *i915 = to_i915(display->drm); + + if (intel_hdcp_gsc_cs_required(display)) return true; if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) return false; - return (DISPLAY_VER(i915) >= 10 || + return (DISPLAY_VER(display) >= 10 || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)); } -void intel_hdcp_component_init(struct drm_i915_private *i915) +void intel_hdcp_component_init(struct intel_display *display) { int ret; - if (!is_hdcp2_supported(i915)) + if (!is_hdcp2_supported(display)) return; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added); + mutex_lock(&display->hdcp.hdcp_mutex); + drm_WARN_ON(display->drm, display->hdcp.comp_added); - i915->display.hdcp.comp_added = true; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); - if (intel_hdcp_gsc_cs_required(i915)) - ret = intel_hdcp_gsc_init(i915); + display->hdcp.comp_added = true; + mutex_unlock(&display->hdcp.hdcp_mutex); + if (intel_hdcp_gsc_cs_required(display)) + ret = intel_hdcp_gsc_init(display); else - ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops, + ret = component_add_typed(display->drm->dev, &i915_hdcp_ops, I915_COMPONENT_HDCP); if (ret < 0) { - drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n", + drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n", ret); - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.comp_added = false; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.comp_added = false; + mutex_unlock(&display->hdcp.hdcp_mutex); return; } } @@ -2321,13 +2349,13 @@ static void intel_hdcp2_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret; ret = initialize_hdcp_port_data(connector, dig_port, shim); if (ret) { - drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); + drm_dbg_kms(display->drm, "Mei hdcp data init failed\n"); return; } @@ -2338,19 +2366,18 @@ int intel_hdcp_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret; if (!shim) return -EINVAL; - if (is_hdcp2_supported(i915)) + if (is_hdcp2_supported(display)) intel_hdcp2_init(connector, dig_port, shim); - ret = - drm_connector_attach_content_protection_property(&connector->base, - hdcp->hdcp2_supported); + ret = drm_connector_attach_content_protection_property(&connector->base, + hdcp->hdcp2_supported); if (ret) { hdcp->hdcp2_supported = false; kfree(dig_port->hdcp_port_data.streams); @@ -2371,7 +2398,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -2383,14 +2411,14 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, return -ENOENT; if (!connector->encoder) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", connector->base.base.id, connector->base.name); return -ENODEV; } mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = (u8)conn_state->hdcp_content_type; @@ -2402,7 +2430,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, hdcp->stream_transcoder = INVALID_TRANSCODER; } - if (DISPLAY_VER(i915) >= 12) + if (DISPLAY_VER(display) >= 12) dig_port->hdcp_port_data.hdcp_transcoder = intel_get_hdcp_transcoder(hdcp->cpu_transcoder); @@ -2553,21 +2581,21 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, _intel_hdcp_enable(state, encoder, crtc_state, conn_state); } -void intel_hdcp_component_fini(struct drm_i915_private *i915) +void intel_hdcp_component_fini(struct intel_display *display) { - mutex_lock(&i915->display.hdcp.hdcp_mutex); - if (!i915->display.hdcp.comp_added) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + if (!display->hdcp.comp_added) { + mutex_unlock(&display->hdcp.hdcp_mutex); return; } - i915->display.hdcp.comp_added = false; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + display->hdcp.comp_added = false; + mutex_unlock(&display->hdcp.hdcp_mutex); - if (intel_hdcp_gsc_cs_required(i915)) - intel_hdcp_gsc_fini(i915); + if (intel_hdcp_gsc_cs_required(display)) + intel_hdcp_gsc_fini(display); else - component_del(i915->drm.dev, &i915_hdcp_ops); + component_del(display->drm->dev, &i915_hdcp_ops); } void intel_hdcp_cleanup(struct intel_connector *connector) @@ -2657,7 +2685,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); + struct drm_i915_private *i915 = to_i915(display->drm); if (!hdcp->shim) return; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 477f2d2bb120..d99830cfb798 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -12,13 +12,13 @@ struct drm_connector; struct drm_connector_state; -struct drm_i915_private; struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; +struct intel_digital_port; +struct intel_display; struct intel_encoder; struct intel_hdcp_shim; -struct intel_digital_port; enum port; enum transcoder; @@ -37,14 +37,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool is_hdcp_supported(struct drm_i915_private *i915, enum port port); +bool is_hdcp_supported(struct intel_display *display, enum port port); bool intel_hdcp_get_capability(struct intel_connector *connector); bool intel_hdcp2_get_capability(struct intel_connector *connector); void intel_hdcp_get_remote_capability(struct intel_connector *connector, bool *hdcp_capable, bool *hdcp2_capable); -void intel_hdcp_component_init(struct drm_i915_private *i915); -void intel_hdcp_component_fini(struct drm_i915_private *i915); +void intel_hdcp_component_init(struct intel_display *display); +void intel_hdcp_component_fini(struct intel_display *display); void intel_hdcp_cleanup(struct intel_connector *connector); void intel_hdcp_handle_cp_irq(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 16afeb8a3a8d..55965844d829 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -19,18 +19,19 @@ struct intel_hdcp_gsc_message { void *hdcp_cmd_out; }; -bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) +bool intel_hdcp_gsc_cs_required(struct intel_display *display) { - return DISPLAY_VER(i915) >= 14; + return DISPLAY_VER(display) >= 14; } -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915) +bool intel_hdcp_gsc_check_status(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_gt *gt = i915->media_gt; struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "GSC components required for HDCP2.2 are not ready\n"); return false; } @@ -106,8 +107,9 @@ static const struct i915_hdcp_ops gsc_hdcp_ops = { .close_hdcp_session = intel_hdcp_gsc_close_session, }; -static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) +static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_hdcp_gsc_message *hdcp_message; int ret; @@ -120,19 +122,19 @@ static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) * NOTE: No need to lock the comp mutex here as it is already * going to be taken before this function called */ - i915->display.hdcp.hdcp_message = hdcp_message; + display->hdcp.hdcp_message = hdcp_message; ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message); if (ret) - drm_err(&i915->drm, "Could not initialize hdcp_message\n"); + drm_err(display->drm, "Could not initialize hdcp_message\n"); return ret; } -static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) +static void intel_hdcp_gsc_free_message(struct intel_display *display) { struct intel_hdcp_gsc_message *hdcp_message = - i915->display.hdcp.hdcp_message; + display->hdcp.hdcp_message; hdcp_message->hdcp_cmd_in = NULL; hdcp_message->hdcp_cmd_out = NULL; @@ -140,7 +142,7 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) kfree(hdcp_message); } -int intel_hdcp_gsc_init(struct drm_i915_private *i915) +int intel_hdcp_gsc_init(struct intel_display *display) { struct i915_hdcp_arbiter *data; int ret; @@ -149,20 +151,20 @@ int intel_hdcp_gsc_init(struct drm_i915_private *i915) if (!data) return -ENOMEM; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.arbiter = data; - i915->display.hdcp.arbiter->hdcp_dev = i915->drm.dev; - i915->display.hdcp.arbiter->ops = &gsc_hdcp_ops; - ret = intel_hdcp_gsc_hdcp2_init(i915); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = data; + display->hdcp.arbiter->hdcp_dev = display->drm->dev; + display->hdcp.arbiter->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(display); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } -void intel_hdcp_gsc_fini(struct drm_i915_private *i915) +void intel_hdcp_gsc_fini(struct intel_display *display) { - intel_hdcp_gsc_free_message(i915); - kfree(i915->display.hdcp.arbiter); + intel_hdcp_gsc_free_message(display); + kfree(display->hdcp.arbiter); } static int intel_gsc_send_sync(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h index 5f610df61cc9..5695a5e4f609 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h @@ -10,14 +10,15 @@ #include <linux/types.h> struct drm_i915_private; +struct intel_display; struct intel_hdcp_gsc_message; -bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915); +bool intel_hdcp_gsc_cs_required(struct intel_display *display); ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len); -int intel_hdcp_gsc_init(struct drm_i915_private *i915); -void intel_hdcp_gsc_fini(struct drm_i915_private *i915); -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915); +int intel_hdcp_gsc_init(struct intel_display *display); +void intel_hdcp_gsc_fini(struct intel_display *display); +bool intel_hdcp_gsc_check_status(struct intel_display *display); #endif /* __INTEL_HDCP_GCS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c index 35bdb532bbb3..129104fa9b16 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c @@ -46,12 +46,12 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, (u8 *)&session_init_out, sizeof(session_init_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_INITIATE_HDCP2_SESSION, session_init_out.header.status); return -EIO; @@ -108,12 +108,12 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev, (u8 *)&verify_rxcert_out, sizeof(verify_rxcert_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); return byte; } if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_VERIFY_RECEIVER_CERT, verify_rxcert_out.header.status); return -EIO; @@ -171,12 +171,12 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data, (u8 *)&send_hprime_out, sizeof(send_hprime_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); return -EIO; } @@ -222,12 +222,12 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat (u8 *)&pairing_info_out, sizeof(pairing_info_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", WIRED_AKE_SEND_PAIRING_INFO, pairing_info_out.header.status); return -EIO; @@ -269,12 +269,12 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev, byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), (u8 *)&lc_init_out, sizeof(lc_init_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); return -EIO; } @@ -323,12 +323,12 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data, (u8 *)&verify_lprime_out, sizeof(verify_lprime_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_VALIDATE_LOCALITY, verify_lprime_out.header.status); return -EIO; @@ -369,12 +369,12 @@ int intel_hdcp_gsc_get_session_key(struct device *dev, byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), (u8 *)&get_skey_out, sizeof(get_skey_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_GET_SESSION_KEY, get_skey_out.header.status); return -EIO; } @@ -435,12 +435,12 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev, (u8 *)&verify_repeater_out, sizeof(verify_repeater_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_VERIFY_REPEATER, verify_repeater_out.header.status); return -EIO; @@ -504,12 +504,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev, sizeof(verify_mprime_out)); kfree(verify_mprime_in); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_REPEATER_AUTH_STREAM_REQ, verify_mprime_out.header.status); return -EIO; @@ -552,12 +552,12 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev, (u8 *)&enable_auth_out, sizeof(enable_auth_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_ENABLE_AUTH, enable_auth_out.header.status); return -EIO; } @@ -599,12 +599,12 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data) (u8 *)&session_close_out, sizeof(session_close_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "Session Close Failed. status: 0x%X\n", session_close_out.header.status); return -EIO; } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h index ce199d6f6232..2d597f27e931 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h @@ -22,11 +22,12 @@ struct hdcp2_ske_send_eks; struct hdcp2_rep_send_receiverid_list; struct hdcp2_rep_send_ack; struct hdcp2_rep_stream_ready; +struct intel_display; ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len); -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915); +bool intel_hdcp_gsc_check_status(struct intel_display *display); int intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ake_init *ake_data); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_shim.h b/drivers/gpu/drm/i915/display/intel_hdcp_shim.h new file mode 100644 index 000000000000..abf9ae2f4ada --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_shim.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_HDCP_SHIM_H__ +#define __INTEL_HDCP_SHIM_H__ + +#include <linux/types.h> + +#include <drm/intel/i915_hdcp_interface.h> + +enum transcoder; +struct intel_connector; +struct intel_digital_port; + +enum check_link_response { + HDCP_LINK_PROTECTED = 0, + HDCP_TOPOLOGY_CHANGE, + HDCP_LINK_INTEGRITY_FAILURE, + HDCP_REAUTH_REQUEST +}; + +/* + * This structure serves as a translation layer between the generic HDCP code + * and the bus-specific code. What that means is that HDCP over HDMI differs + * from HDCP over DP, so to account for these differences, we need to + * communicate with the receiver through this shim. + * + * For completeness, the 2 buses differ in the following ways: + * - DP AUX vs. DDC + * HDCP registers on the receiver are set via DP AUX for DP, and + * they are set via DDC for HDMI. + * - Receiver register offsets + * The offsets of the registers are different for DP vs. HDMI + * - Receiver register masks/offsets + * For instance, the ready bit for the KSV fifo is in a different + * place on DP vs HDMI + * - Receiver register names + * Seriously. In the DP spec, the 16-bit register containing + * downstream information is called BINFO, on HDMI it's called + * BSTATUS. To confuse matters further, DP has a BSTATUS register + * with a completely different definition. + * - KSV FIFO + * On HDMI, the ksv fifo is read all at once, whereas on DP it must + * be read 3 keys at a time + * - Aksv output + * Since Aksv is hidden in hardware, there's different procedures + * to send it over DP AUX vs DDC + */ +struct intel_hdcp_shim { + /* Outputs the transmitter's An and Aksv values to the receiver. */ + int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an); + + /* Reads the receiver's key selection vector */ + int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv); + + /* + * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The + * definitions are the same in the respective specs, but the names are + * different. Call it BSTATUS since that's the name the HDMI spec + * uses and it was there first. + */ + int (*read_bstatus)(struct intel_digital_port *dig_port, + u8 *bstatus); + + /* Determines whether a repeater is present downstream */ + int (*repeater_present)(struct intel_digital_port *dig_port, + bool *repeater_present); + + /* Reads the receiver's Ri' value */ + int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri); + + /* Determines if the receiver's KSV FIFO is ready for consumption */ + int (*read_ksv_ready)(struct intel_digital_port *dig_port, + bool *ksv_ready); + + /* Reads the ksv fifo for num_downstream devices */ + int (*read_ksv_fifo)(struct intel_digital_port *dig_port, + int num_downstream, u8 *ksv_fifo); + + /* Reads a 32-bit part of V' from the receiver */ + int (*read_v_prime_part)(struct intel_digital_port *dig_port, + int i, u32 *part); + + /* Enables HDCP signalling on the port */ + int (*toggle_signalling)(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, + bool enable); + + /* Enable/Disable stream encryption on DP MST Transport Link */ + int (*stream_encryption)(struct intel_connector *connector, + bool enable); + + /* Ensures the link is still protected */ + bool (*check_link)(struct intel_digital_port *dig_port, + struct intel_connector *connector); + + /* Detects panel's hdcp capability. This is optional for HDMI. */ + int (*hdcp_get_capability)(struct intel_digital_port *dig_port, + bool *hdcp_capable); + + /* HDCP adaptation(DP/HDMI) required on the port */ + enum hdcp_wired_protocol protocol; + + /* Detects whether sink is HDCP2.2 capable */ + int (*hdcp_2_2_get_capability)(struct intel_connector *connector, + bool *capable); + + /* Write HDCP2.2 messages */ + int (*write_2_2_msg)(struct intel_connector *connector, + void *buf, size_t size); + + /* Read HDCP2.2 messages */ + int (*read_2_2_msg)(struct intel_connector *connector, + u8 msg_id, void *buf, size_t size); + + /* + * Implementation of DP HDCP2.2 Errata for the communication of stream + * type to Receivers. In DP HDCP2.2 Stream type is one of the input to + * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. + */ + int (*config_stream_type)(struct intel_connector *connector, + bool is_repeater, u8 type); + + /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */ + int (*stream_2_2_encryption)(struct intel_connector *connector, + bool enable); + + /* HDCP2.2 Link Integrity Check */ + int (*check_2_2_link)(struct intel_digital_port *dig_port, + struct intel_connector *connector); + + /* HDCP remote sink cap */ + int (*get_remote_hdcp_capability)(struct intel_connector *connector, + bool *hdcp_capable, bool *hdcp2_capable); +}; + +#endif /* __INTEL_HDCP_SHIM_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index cd9ee171e0df..c6ce6bb88d7c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -38,8 +38,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include <drm/intel/intel_lpe_audio.h> +#include <media/cec-notifier.h> + #include "g4x_hdmi.h" #include "i915_drv.h" #include "i915_reg.h" @@ -55,9 +58,11 @@ #include "intel_gmbus.h" #include "intel_hdcp.h" #include "intel_hdcp_regs.h" +#include "intel_hdcp_shim.h" #include "intel_hdmi.h" #include "intel_lspcon.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "intel_snps_phy.h" static void @@ -1207,6 +1212,30 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, &crtc_state->infoframes.hdmi); } +void intel_hdmi_fastset_infoframes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(encoder); + i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, + crtc_state->cpu_transcoder); + u32 val = intel_de_read(display, reg); + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) == 0 && + (val & VIDEO_DIP_ENABLE_DRM_GLK) == 0) + return; + + val &= ~(VIDEO_DIP_ENABLE_DRM_GLK); + + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); + + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_DRM, + &crtc_state->infoframes.drm); +} + static void hsw_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, @@ -1310,8 +1339,8 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port, memcpy(&write_buf[1], buffer, size); msg.addr = DRM_HDCP_DDC_ADDR; - msg.flags = 0, - msg.len = size + 1, + msg.flags = 0; + msg.len = size + 1; msg.buf = write_buf; ret = i2c_transfer(ddc, &msg, 1); @@ -2053,7 +2082,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return status; } - return intel_mode_valid_max_plane_size(dev_priv, mode, false); + return intel_mode_valid_max_plane_size(dev_priv, mode, 1); } bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, @@ -2913,7 +2942,6 @@ static struct intel_encoder * get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; for_each_intel_encoder(display->drm, other) { @@ -2927,7 +2955,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) connector = enc_to_dig_port(other)->hdmi.attached_connector; - if (connector && connector->base.ddc == intel_gmbus_get_adapter(i915, ddc_pin)) + if (connector && connector->base.ddc == intel_gmbus_get_adapter(display, ddc_pin)) return other; } @@ -2937,7 +2965,6 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; const char *source; u8 ddc_pin; @@ -2950,7 +2977,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) source = "platform default"; } - if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) { + if (!intel_gmbus_is_valid_pin(display, ddc_pin)) { drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n", encoder->base.base.id, encoder->base.name, ddc_pin); @@ -3023,7 +3050,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct intel_encoder *intel_encoder = &dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; struct cec_connector_info conn_info; u8 ddc_pin; @@ -3048,7 +3074,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, drm_connector_init_with_ddc(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA, - intel_gmbus_get_adapter(dev_priv, ddc_pin)); + intel_gmbus_get_adapter(display, ddc_pin)); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); @@ -3073,7 +3099,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, intel_connector_attach_encoder(intel_connector, intel_encoder); intel_hdmi->attached_connector = intel_connector; - if (is_hdcp_supported(dev_priv, port)) { + if (is_hdcp_supported(display, port)) { int ret = intel_hdcp_init(intel_connector, dig_port, &intel_hdmi_hdcp_shim); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 9b97623665c5..466f48df8a74 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -42,6 +42,9 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, u32 intel_hdmi_infoframe_enable(unsigned int type); void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state); +void intel_hdmi_fastset_infoframes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); void intel_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index d9ec349f3c8c..a013b0e0ef54 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -21,8 +21,11 @@ * IN THE SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/kernel.h> +#include <drm/drm_probe_helper.h> + #include "i915_drv.h" #include "i915_irq.h" #include "intel_display_power.h" diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 2c4e946d5575..cb64c6f0ad1b 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -556,6 +556,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { + struct intel_display *display = &dev_priv->display; u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; u32 pin_mask = 0, long_mask = 0; @@ -589,11 +590,12 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_ICP) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { + struct intel_display *display = &dev_priv->display; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -625,7 +627,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) @@ -849,10 +851,11 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - else - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); + /* + * We reduce the value to 250us to be able to detect SHPD when an external display + * is connected. This is also expected of us as stated in DP1.4a Table 3-4. + */ + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -1060,6 +1063,10 @@ static void mtp_hpd_irq_setup(struct drm_i915_private *i915) enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + /* + * Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the + * SHPD_FILTER_CNT value should be. + */ intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); mtp_hpd_invert(i915); diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index e7a9b860fac6..c87cd1d16d0a 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -26,7 +26,6 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, struct intel_link_bw_limits *limits) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe; limits->force_fec_pipes = 0; @@ -34,7 +33,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, for_each_pipe(display, pipe) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, - intel_crtc_for_pipe(i915, pipe)); + intel_crtc_for_pipe(display, pipe)); if (state->base.duplicated && crtc_state) { limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index fb4ed9f7855b..6d7637ad980a 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -37,6 +37,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -51,6 +52,7 @@ #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "intel_pps_regs.h" /* Private structure for the integrated LVDS support */ @@ -263,7 +265,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, temp |= LVDS_PIPE_SEL(pipe); } - /* set the corresponsding LVDS_BORDER bit */ + /* set the corresponding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; temp |= crtc_state->gmch_pfit.lvds_border_bits; @@ -899,7 +901,7 @@ void intel_lvds_init(struct drm_i915_private *i915) drm_connector_init_with_ddc(&i915->drm, &connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS, - intel_gmbus_get_adapter(i915, ddc_pin)); + intel_gmbus_get_adapter(display, ddc_pin)); drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS, "LVDS"); diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 72694dde3c22..2c8668b1ebae 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_vblank.h> #include "i915_drv.h" #include "i915_reg.h" @@ -221,6 +222,7 @@ static u8 get_transcoder_pipes(struct drm_i915_private *i915, static void get_portsync_pipes(struct intel_crtc *crtc, u8 *master_pipe_mask, u8 *slave_pipes_mask) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -243,7 +245,7 @@ static void get_portsync_pipes(struct intel_crtc *crtc, *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder)); drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask)); - master_crtc = intel_crtc_for_pipe(i915, ffs(*master_pipe_mask) - 1); + master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1); master_crtc_state = to_intel_crtc_state(master_crtc->base.state); *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask); } @@ -375,6 +377,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state static void intel_sanitize_plane_mapping(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_crtc *crtc; if (DISPLAY_VER(i915) >= 4) @@ -396,7 +399,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915) "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", plane->base.base.id, plane->base.name); - plane_crtc = intel_crtc_for_pipe(i915, pipe); + plane_crtc = intel_crtc_for_pipe(display, pipe); intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -490,8 +493,8 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc, } /* Disable any background color/etc. set by the BIOS */ - intel_color_commit_noarm(crtc_state); - intel_color_commit_arm(crtc_state); + intel_color_commit_noarm(NULL, crtc_state); + intel_color_commit_arm(NULL, crtc_state); } if (!crtc_state->hw.active || @@ -662,6 +665,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_plane *plane; struct intel_crtc *crtc; @@ -674,7 +678,7 @@ static void readout_plane_state(struct drm_i915_private *i915) visible = plane->get_hw_state(plane, &pipe); - crtc = intel_crtc_for_pipe(i915, pipe); + crtc = intel_crtc_for_pipe(display, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); @@ -695,6 +699,7 @@ static void readout_plane_state(struct drm_i915_private *i915) static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = @@ -743,7 +748,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { - crtc = intel_crtc_for_pipe(i915, pipe); + crtc = intel_crtc_for_pipe(display, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; @@ -955,6 +960,7 @@ static void intel_early_display_was(struct drm_i915_private *i915) void intel_modeset_setup_hw_state(struct drm_i915_private *i915, struct drm_modeset_acquire_ctx *ctx) { + struct intel_display *display = &i915->display; struct intel_encoder *encoder; struct intel_crtc *crtc; intel_wakeref_t wakeref; @@ -982,7 +988,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, drm_crtc_vblank_reset(&crtc->base); if (crtc_state->hw.active) { - intel_dmc_enable_pipe(i915, crtc->pipe); + intel_dmc_enable_pipe(display, crtc->pipe); intel_crtc_vblank_on(crtc_state); } } diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index 3491db5cad31..bc70e72ccc2e 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -27,6 +27,7 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_display *display = to_intel_display(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", @@ -35,29 +36,29 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta if (connector->get_hw_state(connector)) { struct intel_encoder *encoder = intel_attached_encoder(connector); - I915_STATE_WARN(i915, !crtc_state, - "connector enabled without attached crtc\n"); + INTEL_DISPLAY_STATE_WARN(display, !crtc_state, + "connector enabled without attached crtc\n"); if (!crtc_state) return; - I915_STATE_WARN(i915, !crtc_state->hw.active, - "connector is active, but attached crtc isn't\n"); + INTEL_DISPLAY_STATE_WARN(display, !crtc_state->hw.active, + "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) return; - I915_STATE_WARN(i915, - conn_state->best_encoder != &encoder->base, - "atomic encoder doesn't match attached encoder\n"); + INTEL_DISPLAY_STATE_WARN(display, + conn_state->best_encoder != &encoder->base, + "atomic encoder doesn't match attached encoder\n"); - I915_STATE_WARN(i915, conn_state->crtc != encoder->base.crtc, - "attached encoder crtc differs from connector crtc\n"); + INTEL_DISPLAY_STATE_WARN(display, conn_state->crtc != encoder->base.crtc, + "attached encoder crtc differs from connector crtc\n"); } else { - I915_STATE_WARN(i915, crtc_state && crtc_state->hw.active, - "attached crtc is active, but connector isn't\n"); - I915_STATE_WARN(i915, !crtc_state && conn_state->best_encoder, - "best encoder set without crtc!\n"); + INTEL_DISPLAY_STATE_WARN(display, crtc_state && crtc_state->hw.active, + "attached crtc is active, but connector isn't\n"); + INTEL_DISPLAY_STATE_WARN(display, !crtc_state && conn_state->best_encoder, + "best encoder set without crtc!\n"); } } @@ -65,6 +66,7 @@ static void verify_connector_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_connector *connector; const struct drm_connector_state *new_conn_state; int i; @@ -81,8 +83,8 @@ verify_connector_state(struct intel_atomic_state *state, intel_connector_verify_state(crtc_state, new_conn_state); - I915_STATE_WARN(to_i915(connector->dev), new_conn_state->best_encoder != encoder, - "connector's atomic encoder doesn't match legacy encoder\n"); + INTEL_DISPLAY_STATE_WARN(display, new_conn_state->best_encoder != encoder, + "connector's atomic encoder doesn't match legacy encoder\n"); } } @@ -109,6 +111,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s static void verify_encoder_state(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_encoder *encoder; struct drm_connector *connector; @@ -134,25 +137,25 @@ verify_encoder_state(struct intel_atomic_state *state) found = true; enabled = true; - I915_STATE_WARN(i915, - new_conn_state->crtc != encoder->base.crtc, - "connector's crtc doesn't match encoder crtc\n"); + INTEL_DISPLAY_STATE_WARN(display, + new_conn_state->crtc != encoder->base.crtc, + "connector's crtc doesn't match encoder crtc\n"); } if (!found) continue; - I915_STATE_WARN(i915, !!encoder->base.crtc != enabled, - "encoder's enabled state mismatch (expected %i, found %i)\n", - !!encoder->base.crtc, enabled); + INTEL_DISPLAY_STATE_WARN(display, !!encoder->base.crtc != enabled, + "encoder's enabled state mismatch (expected %i, found %i)\n", + !!encoder->base.crtc, enabled); if (!encoder->base.crtc) { bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(i915, active, - "encoder detached but still enabled on pipe %c.\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, active, + "encoder detached but still enabled on pipe %c.\n", + pipe_name(pipe)); } } } @@ -161,8 +164,8 @@ static void verify_crtc_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *i915 = to_i915(dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_crtc_state *sw_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_crtc_state *hw_crtc_state; @@ -173,7 +176,7 @@ verify_crtc_state(struct intel_atomic_state *state, if (!hw_crtc_state) return; - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, + drm_dbg_kms(display->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); hw_crtc_state->hw.enable = sw_crtc_state->hw.enable; @@ -184,30 +187,30 @@ verify_crtc_state(struct intel_atomic_state *state, if (IS_I830(i915) && hw_crtc_state->hw.active) hw_crtc_state->hw.active = sw_crtc_state->hw.active; - I915_STATE_WARN(i915, - sw_crtc_state->hw.active != hw_crtc_state->hw.active, - "crtc active state doesn't match with hw state (expected %i, found %i)\n", - sw_crtc_state->hw.active, hw_crtc_state->hw.active); + INTEL_DISPLAY_STATE_WARN(display, + sw_crtc_state->hw.active != hw_crtc_state->hw.active, + "crtc active state doesn't match with hw state (expected %i, found %i)\n", + sw_crtc_state->hw.active, hw_crtc_state->hw.active); - I915_STATE_WARN(i915, crtc->active != sw_crtc_state->hw.active, - "transitional active state does not match atomic hw state (expected %i, found %i)\n", - sw_crtc_state->hw.active, crtc->active); + INTEL_DISPLAY_STATE_WARN(display, crtc->active != sw_crtc_state->hw.active, + "transitional active state does not match atomic hw state (expected %i, found %i)\n", + sw_crtc_state->hw.active, crtc->active); primary_crtc = intel_primary_crtc(sw_crtc_state); - for_each_encoder_on_crtc(dev, &primary_crtc->base, encoder) { + for_each_encoder_on_crtc(display->drm, &primary_crtc->base, encoder) { enum pipe pipe; bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(i915, active != sw_crtc_state->hw.active, - "[ENCODER:%i] active %i with crtc active %i\n", - encoder->base.base.id, active, - sw_crtc_state->hw.active); + INTEL_DISPLAY_STATE_WARN(display, active != sw_crtc_state->hw.active, + "[ENCODER:%i] active %i with crtc active %i\n", + encoder->base.base.id, active, + sw_crtc_state->hw.active); - I915_STATE_WARN(i915, active && primary_crtc->pipe != pipe, - "Encoder connected to wrong pipe %c\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, active && primary_crtc->pipe != pipe, + "Encoder connected to wrong pipe %c\n", + pipe_name(pipe)); if (active) intel_encoder_get_config(encoder, hw_crtc_state); @@ -220,7 +223,7 @@ verify_crtc_state(struct intel_atomic_state *state, if (!intel_pipe_config_compare(sw_crtc_state, hw_crtc_state, false)) { - I915_STATE_WARN(i915, 1, "pipe state doesn't match!\n"); + INTEL_DISPLAY_STATE_WARN(display, 1, "pipe state doesn't match!\n"); intel_crtc_state_dump(hw_crtc_state, NULL, "hw state"); intel_crtc_state_dump(sw_crtc_state, NULL, "sw state"); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index ff11836459de..0eaa6cd6fe80 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -26,6 +26,7 @@ */ #include <linux/acpi.h> +#include <linux/debugfs.h> #include <linux/dmi.h> #include <acpi/video.h> diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 06b1122ec13e..2ec14096ba9c 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -294,7 +294,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, drm_WARN_ON(&overlay->i915->drm, overlay->old_vma); if (vma) - frontbuffer = intel_frontbuffer_get(vma->obj); + frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj)); intel_frontbuffer_track(overlay->frontbuffer, frontbuffer, INTEL_FRONTBUFFER_OVERLAY(pipe)); @@ -1457,18 +1457,19 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -struct intel_overlay_error_state { +struct intel_overlay_snapshot { struct overlay_registers regs; unsigned long base; u32 dovsta; u32 isr; }; -struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) +struct intel_overlay_snapshot * +intel_overlay_snapshot_capture(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_overlay *overlay = dev_priv->display.overlay; - struct intel_overlay_error_state *error; + struct intel_overlay_snapshot *error; if (!overlay || !overlay->active) return NULL; @@ -1487,9 +1488,12 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) } void -intel_overlay_print_error_state(struct drm_printer *p, - struct intel_overlay_error_state *error) +intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error, + struct drm_printer *p) { + if (!error) + return; + drm_printf(p, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", error->dovsta, error->isr); drm_printf(p, " Register file at 0x%08lx:\n", error->base); diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h index f28a09c062d0..eafac24d1de8 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.h +++ b/drivers/gpu/drm/i915/display/intel_overlay.h @@ -6,12 +6,15 @@ #ifndef __INTEL_OVERLAY_H__ #define __INTEL_OVERLAY_H__ +#include <linux/types.h> + struct drm_device; struct drm_file; struct drm_i915_private; struct drm_printer; +struct intel_display; struct intel_overlay; -struct intel_overlay_error_state; +struct intel_overlay_snapshot; #ifdef I915 void intel_overlay_setup(struct drm_i915_private *dev_priv); @@ -22,10 +25,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv); -struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); -void intel_overlay_print_error_state(struct drm_printer *p, - struct intel_overlay_error_state *error); #else static inline void intel_overlay_setup(struct drm_i915_private *dev_priv) { @@ -50,13 +49,21 @@ static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, static inline void intel_overlay_reset(struct drm_i915_private *dev_priv) { } -static inline struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) +#endif + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) && defined(I915) +struct intel_overlay_snapshot * +intel_overlay_snapshot_capture(struct intel_display *display); +void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error, + struct drm_printer *p); +#else +static inline struct intel_overlay_snapshot * +intel_overlay_snapshot_capture(struct intel_display *display) { return NULL; } -static inline void intel_overlay_print_error_state(struct drm_printer *p, - struct intel_overlay_error_state *error) +static inline void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error, + struct drm_printer *p) { } #endif diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 71454ddef20f..313bd3f35ace 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -33,22 +33,19 @@ #include <drm/drm_edid.h> -#include "i915_reg.h" +#include "i915_drv.h" #include "intel_backlight.h" #include "intel_connector.h" -#include "intel_de.h" +#include "intel_display_core.h" #include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_drrs.h" -#include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_quirks.h" #include "intel_vrr.h" -bool intel_panel_use_ssc(struct drm_i915_private *i915) +bool intel_panel_use_ssc(struct intel_display *display) { - struct intel_display *display = &i915->display; - if (display->params.panel_use_ssc >= 0) return display->params.panel_use_ssc != 0; return display->vbt.lvds_use_ssc && @@ -252,7 +249,7 @@ int intel_panel_compute_config(struct intel_connector *connector, static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); const struct drm_display_mode *preferred_mode = intel_panel_preferred_fixed_mode(connector); struct drm_display_mode *mode, *next; @@ -261,7 +258,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect if (!is_alt_fixed_mode(mode, preferred_mode)) continue; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using alternate EDID fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); @@ -272,7 +269,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect static void intel_panel_add_edid_preferred_mode(struct intel_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_display_mode *scan, *fixed_mode = NULL; if (list_empty(&connector->base.probed_modes)) @@ -290,7 +287,7 @@ static void intel_panel_add_edid_preferred_mode(struct intel_connector *connecto fixed_mode = list_first_entry(&connector->base.probed_modes, typeof(*fixed_mode), head); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using %s EDID fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, fixed_mode->type & DRM_MODE_TYPE_PREFERRED ? "preferred" : "first", @@ -303,16 +300,16 @@ static void intel_panel_add_edid_preferred_mode(struct intel_connector *connecto static void intel_panel_destroy_probed_modes(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_display_mode *mode, *next; list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); list_del(&mode->head); - drm_mode_destroy(&i915->drm, mode); + drm_mode_destroy(display->drm, mode); } } @@ -329,7 +326,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector, struct drm_display_mode *fixed_mode, const char *type) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_display_info *info = &connector->base.display_info; if (!fixed_mode) @@ -340,7 +337,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector, info->width_mm = fixed_mode->width_mm; info->height_mm = fixed_mode->height_mm; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, type, DRM_MODE_ARG(fixed_mode)); @@ -349,7 +346,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector, void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); const struct drm_display_mode *mode; mode = connector->panel.vbt.lfp_vbt_mode; @@ -357,13 +354,13 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector) return; intel_panel_add_fixed_mode(connector, - drm_mode_duplicate(&i915->drm, mode), + drm_mode_duplicate(display->drm, mode), "VBT LFP"); } void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); const struct drm_display_mode *mode; mode = connector->panel.vbt.sdvo_lvds_vbt_mode; @@ -371,7 +368,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector) return; intel_panel_add_fixed_mode(connector, - drm_mode_duplicate(&i915->drm, mode), + drm_mode_duplicate(display->drm, mode), "VBT SDVO"); } @@ -383,301 +380,6 @@ void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector, "current (BIOS)"); } -/* adjusted_mode has been preset to be the panel's fixed mode */ -static int pch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - int x, y, width, height; - - /* Native modes don't need fitting */ - if (adjusted_mode->crtc_hdisplay == pipe_src_w && - adjusted_mode->crtc_vdisplay == pipe_src_h && - crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) - return 0; - - switch (conn_state->scaling_mode) { - case DRM_MODE_SCALE_CENTER: - width = pipe_src_w; - height = pipe_src_h; - x = (adjusted_mode->crtc_hdisplay - width + 1)/2; - y = (adjusted_mode->crtc_vdisplay - height + 1)/2; - break; - - case DRM_MODE_SCALE_ASPECT: - /* Scale but preserve the aspect ratio */ - { - u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; - u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; - if (scaled_width > scaled_height) { /* pillar */ - width = scaled_height / pipe_src_h; - if (width & 1) - width++; - x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; - y = 0; - height = adjusted_mode->crtc_vdisplay; - } else if (scaled_width < scaled_height) { /* letter */ - height = scaled_width / pipe_src_w; - if (height & 1) - height++; - y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; - x = 0; - width = adjusted_mode->crtc_hdisplay; - } else { - x = y = 0; - width = adjusted_mode->crtc_hdisplay; - height = adjusted_mode->crtc_vdisplay; - } - } - break; - - case DRM_MODE_SCALE_NONE: - WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w); - WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h); - fallthrough; - case DRM_MODE_SCALE_FULLSCREEN: - x = y = 0; - width = adjusted_mode->crtc_hdisplay; - height = adjusted_mode->crtc_vdisplay; - break; - - default: - MISSING_CASE(conn_state->scaling_mode); - return -EINVAL; - } - - drm_rect_init(&crtc_state->pch_pfit.dst, - x, y, width, height); - crtc_state->pch_pfit.enabled = true; - - return 0; -} - -static void -centre_horizontally(struct drm_display_mode *adjusted_mode, - int width) -{ - u32 border, sync_pos, blank_width, sync_width; - - /* keep the hsync and hblank widths constant */ - sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; - blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; - sync_pos = (blank_width - sync_width + 1) / 2; - - border = (adjusted_mode->crtc_hdisplay - width + 1) / 2; - border += border & 1; /* make the border even */ - - adjusted_mode->crtc_hdisplay = width; - adjusted_mode->crtc_hblank_start = width + border; - adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width; - - adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos; - adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width; -} - -static void -centre_vertically(struct drm_display_mode *adjusted_mode, - int height) -{ - u32 border, sync_pos, blank_width, sync_width; - - /* keep the vsync and vblank widths constant */ - sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; - blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start; - sync_pos = (blank_width - sync_width + 1) / 2; - - border = (adjusted_mode->crtc_vdisplay - height + 1) / 2; - - adjusted_mode->crtc_vdisplay = height; - adjusted_mode->crtc_vblank_start = height + border; - adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width; - - adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos; - adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; -} - -static u32 panel_fitter_scaling(u32 source, u32 target) -{ - /* - * Floating point operation is not supported. So the FACTOR - * is defined, which can avoid the floating point computation - * when calculating the panel ratio. - */ -#define ACCURACY 12 -#define FACTOR (1 << ACCURACY) - u32 ratio = source * FACTOR / target; - return (FACTOR * ratio + FACTOR/2) / FACTOR; -} - -static void i965_scale_aspect(struct intel_crtc_state *crtc_state, - u32 *pfit_control) -{ - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; - u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; - - /* 965+ is easy, it does everything in hw */ - if (scaled_width > scaled_height) - *pfit_control |= PFIT_ENABLE | - PFIT_SCALING_PILLAR; - else if (scaled_width < scaled_height) - *pfit_control |= PFIT_ENABLE | - PFIT_SCALING_LETTER; - else if (adjusted_mode->crtc_hdisplay != pipe_src_w) - *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; -} - -static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, - u32 *pfit_control, u32 *pfit_pgm_ratios, - u32 *border) -{ - struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; - u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; - u32 bits; - - /* - * For earlier chips we have to calculate the scaling - * ratio by hand and program it into the - * PFIT_PGM_RATIO register - */ - if (scaled_width > scaled_height) { /* pillar */ - centre_horizontally(adjusted_mode, - scaled_height / pipe_src_h); - - *border = LVDS_BORDER_ENABLE; - if (pipe_src_h != adjusted_mode->crtc_vdisplay) { - bits = panel_fitter_scaling(pipe_src_h, - adjusted_mode->crtc_vdisplay); - - *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | - PFIT_VERT_SCALE(bits)); - *pfit_control |= (PFIT_ENABLE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_INTERP_BILINEAR); - } - } else if (scaled_width < scaled_height) { /* letter */ - centre_vertically(adjusted_mode, - scaled_width / pipe_src_w); - - *border = LVDS_BORDER_ENABLE; - if (pipe_src_w != adjusted_mode->crtc_hdisplay) { - bits = panel_fitter_scaling(pipe_src_w, - adjusted_mode->crtc_hdisplay); - - *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | - PFIT_VERT_SCALE(bits)); - *pfit_control |= (PFIT_ENABLE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_INTERP_BILINEAR); - } - } else { - /* Aspects match, Let hw scale both directions */ - *pfit_control |= (PFIT_ENABLE | - PFIT_VERT_AUTO_SCALE | - PFIT_HORIZ_AUTO_SCALE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_INTERP_BILINEAR); - } -} - -static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; - struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - - /* Native modes don't need fitting */ - if (adjusted_mode->crtc_hdisplay == pipe_src_w && - adjusted_mode->crtc_vdisplay == pipe_src_h) - goto out; - - switch (conn_state->scaling_mode) { - case DRM_MODE_SCALE_CENTER: - /* - * For centered modes, we have to calculate border widths & - * heights and modify the values programmed into the CRTC. - */ - centre_horizontally(adjusted_mode, pipe_src_w); - centre_vertically(adjusted_mode, pipe_src_h); - border = LVDS_BORDER_ENABLE; - break; - case DRM_MODE_SCALE_ASPECT: - /* Scale but preserve the aspect ratio */ - if (DISPLAY_VER(dev_priv) >= 4) - i965_scale_aspect(crtc_state, &pfit_control); - else - i9xx_scale_aspect(crtc_state, &pfit_control, - &pfit_pgm_ratios, &border); - break; - case DRM_MODE_SCALE_FULLSCREEN: - /* - * Full scaling, even if it changes the aspect ratio. - * Fortunately this is all done for us in hw. - */ - if (pipe_src_h != adjusted_mode->crtc_vdisplay || - pipe_src_w != adjusted_mode->crtc_hdisplay) { - pfit_control |= PFIT_ENABLE; - if (DISPLAY_VER(dev_priv) >= 4) - pfit_control |= PFIT_SCALING_AUTO; - else - pfit_control |= (PFIT_VERT_AUTO_SCALE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_AUTO_SCALE | - PFIT_HORIZ_INTERP_BILINEAR); - } - break; - default: - MISSING_CASE(conn_state->scaling_mode); - return -EINVAL; - } - - /* 965+ wants fuzzy fitting */ - /* FIXME: handle multiple panels by failing gracefully */ - if (DISPLAY_VER(dev_priv) >= 4) - pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY; - -out: - if ((pfit_control & PFIT_ENABLE) == 0) { - pfit_control = 0; - pfit_pgm_ratios = 0; - } - - /* Make sure pre-965 set dither correctly for 18bpp panels. */ - if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18) - pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE; - - crtc_state->gmch_pfit.control = pfit_control; - crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; - crtc_state->gmch_pfit.lvds_border_bits = border; - - return 0; -} - -int intel_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - - if (HAS_GMCH(i915)) - return gmch_panel_fitting(crtc_state, conn_state); - else - return pch_panel_fitting(crtc_state, conn_state); -} - enum drm_connector_status intel_panel_detect(struct drm_connector *connector, bool force) { diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 15a8c897b33f..b60d12322e5d 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -14,9 +14,9 @@ struct drm_connector; struct drm_connector_state; struct drm_display_mode; struct drm_edid; -struct drm_i915_private; struct intel_connector; struct intel_crtc_state; +struct intel_display; struct intel_encoder; void intel_panel_init_alloc(struct intel_connector *connector); @@ -25,7 +25,7 @@ int intel_panel_init(struct intel_connector *connector, void intel_panel_fini(struct intel_connector *connector); enum drm_connector_status intel_panel_detect(struct drm_connector *connector, bool force); -bool intel_panel_use_ssc(struct drm_i915_private *i915); +bool intel_panel_use_ssc(struct intel_display *display); const struct drm_display_mode * intel_panel_preferred_fixed_mode(struct intel_connector *connector); const struct drm_display_mode * @@ -42,8 +42,6 @@ enum drrs_type intel_panel_drrs_type(struct intel_connector *connector); enum drm_mode_status intel_panel_mode_valid(struct intel_connector *connector, const struct drm_display_mode *mode); -int intel_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); int intel_panel_compute_config(struct intel_connector *connector, struct drm_display_mode *adjusted_mode); void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index f13ab680c2cf..4210de87a0a2 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -39,58 +39,61 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, enum port port, i915_reg_t dp_reg) { + struct intel_display *display = &dev_priv->display; enum pipe port_pipe; bool state; state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - I915_STATE_WARN(dev_priv, state && port_pipe == pipe, - "PCH DP %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe, + "PCH DP %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); - I915_STATE_WARN(dev_priv, - HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH DP %c still using transcoder B\n", - port_name(port)); + INTEL_DISPLAY_STATE_WARN(display, + HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH DP %c still using transcoder B\n", + port_name(port)); } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, enum port port, i915_reg_t hdmi_reg) { + struct intel_display *display = &dev_priv->display; enum pipe port_pipe; bool state; state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); - I915_STATE_WARN(dev_priv, state && port_pipe == pipe, - "PCH HDMI %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe, + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); - I915_STATE_WARN(dev_priv, - HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH HDMI %c still using transcoder B\n", - port_name(port)); + INTEL_DISPLAY_STATE_WARN(display, + HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH HDMI %c still using transcoder B\n", + port_name(port)); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_display *display = &dev_priv->display; enum pipe port_pipe; assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - I915_STATE_WARN(dev_priv, - intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && port_pipe == pipe, - "PCH VGA enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, + intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe, + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); - I915_STATE_WARN(dev_priv, - intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, - "PCH LVDS enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, + intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); /* PCH SDVOB multiplex with HDMIB */ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); @@ -101,14 +104,15 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_display *display = &dev_priv->display; u32 val; bool enabled; - val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); + val = intel_de_read(display, PCH_TRANSCONF(pipe)); enabled = !!(val & TRANS_ENABLE); - I915_STATE_WARN(dev_priv, enabled, - "transcoder assertion failed, should be off on pipe %c but is still active\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); } static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 713cfba71475..84c55971e91a 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -491,6 +491,7 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_encoder *encoder; struct intel_shared_dpll *pll; int i; @@ -572,11 +573,11 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) if (has_panel) { final |= DREF_SSC_SOURCE_ENABLE; - if (intel_panel_use_ssc(dev_priv) && can_ssc) + if (intel_panel_use_ssc(display) && can_ssc) final |= DREF_SSC1_ENABLE; if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) + if (intel_panel_use_ssc(display) && can_ssc) final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; else final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; @@ -604,7 +605,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) val |= DREF_SSC_SOURCE_ENABLE; /* SSC must be turned on before enabling the CPU output */ - if (intel_panel_use_ssc(dev_priv) && can_ssc) { + if (intel_panel_use_ssc(display) && can_ssc) { drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); val |= DREF_SSC1_ENABLE; } else { @@ -620,7 +621,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* Enable CPU source on CPU attached eDP */ if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) { + if (intel_panel_use_ssc(display) && can_ssc) { drm_dbg_kms(&dev_priv->drm, "Using SSC on eDP\n"); val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c new file mode 100644 index 000000000000..50861aa78a89 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pfit.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_display_core.h" +#include "intel_display_driver.h" +#include "intel_display_types.h" +#include "intel_lvds_regs.h" +#include "intel_pfit.h" + +static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; + int width = drm_rect_width(dst); + int height = drm_rect_height(dst); + int x = dst->x1; + int y = dst->y1; + + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE && + (y & 1 || height & 1)) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") misaligned for interlaced output\n", + crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst)); + return -EINVAL; + } + + /* + * "Restriction : When pipe scaling is enabled, the scaled + * output must equal the pipe active area, so Pipe active + * size = (2 * PF window position) + PF window size." + * + * The vertical direction seems more forgiving than the + * horizontal direction, but still has some issues so + * let's follow the same hard rule for both. + */ + if (adjusted_mode->crtc_hdisplay != 2 * x + width || + adjusted_mode->crtc_vdisplay != 2 * y + height) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") not centered\n", + crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst)); + return -EINVAL; + } + + /* + * "Restriction : The X position must not be programmed + * to be 1 (28:16=0 0000 0000 0001b)." + */ + if (x == 1) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") badly positioned\n", + crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst)); + return -EINVAL; + } + + return 0; +} + +static int intel_pch_pfit_check_src_size(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + int max_src_w, max_src_h; + + if (DISPLAY_VER(display) >= 8) { + max_src_w = 4096; + max_src_h = 4096; + } else if (DISPLAY_VER(display) >= 7) { + /* + * PF0 7x5 capable + * PF1 3x3 capable (could be switched to 7x5 + * mode on HSW when PF2 unused) + * PF2 3x3 capable + * + * This assumes we use a 1:1 mapping between pipe and PF. + */ + max_src_w = crtc->pipe == PIPE_A ? 4096 : 2048; + max_src_h = 4096; + } else { + max_src_w = 4096; + max_src_h = 4096; + } + + if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] source size (%dx%d) exceeds pfit max (%dx%d)\n", + crtc->base.base.id, crtc->base.name, + pipe_src_w, pipe_src_h, max_src_w, max_src_h); + return -EINVAL; + } + + return 0; +} + +static int intel_pch_pfit_check_scaling(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + int hscale, vscale, max_scale = 0x12000; /* 1.125 */ + struct drm_rect src; + + drm_rect_init(&src, 0, 0, pipe_src_w << 16, pipe_src_h << 16); + + hscale = drm_rect_calc_hscale(&src, dst, 0, max_scale); + if (hscale < 0) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit horizontal downscaling (%d->%d) exceeds max (0x%x)\n", + crtc->base.base.id, crtc->base.name, + pipe_src_w, drm_rect_width(dst), + max_scale); + return hscale; + } + + vscale = drm_rect_calc_vscale(&src, dst, 0, max_scale); + if (vscale < 0) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit vertical downscaling (%d->%d) exceeds max (0x%x)\n", + crtc->base.base.id, crtc->base.name, + pipe_src_h, drm_rect_height(dst), + max_scale); + return vscale; + } + + return 0; +} + +static int intel_pch_pfit_check_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (adjusted_mode->crtc_vdisplay < 7) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] vertical active (%d) below minimum (%d) for pfit\n", + crtc->base.base.id, crtc->base.name, + adjusted_mode->crtc_vdisplay, 7); + return -EINVAL; + } + + return 0; +} + +static int intel_pch_pfit_check_cloning(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + /* + * The panel fitter is in the pipe and thus would affect every + * cloned output. The relevant properties (scaling mode, TV + * margins) are per-connector so we'd have to make sure each + * output sets them up identically. Seems like a very niche use + * case so let's just reject cloning entirely when pfit is used. + */ + if (crtc_state->uapi.encoder_mask && + !is_power_of_2(crtc_state->uapi.encoder_mask)) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] no pfit when cloning\n", + crtc->base.base.id, crtc->base.name); + return -EINVAL; + } + + return 0; +} + +/* adjusted_mode has been preset to be the panel's fixed mode */ +static int pch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + int ret, x, y, width, height; + + /* Native modes don't need fitting */ + if (adjusted_mode->crtc_hdisplay == pipe_src_w && + adjusted_mode->crtc_vdisplay == pipe_src_h && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) + return 0; + + switch (conn_state->scaling_mode) { + case DRM_MODE_SCALE_CENTER: + width = pipe_src_w; + height = pipe_src_h; + x = (adjusted_mode->crtc_hdisplay - width + 1)/2; + y = (adjusted_mode->crtc_vdisplay - height + 1)/2; + break; + + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + { + u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; + u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; + + if (scaled_width > scaled_height) { /* pillar */ + width = scaled_height / pipe_src_h; + if (width & 1) + width++; + x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; + y = 0; + height = adjusted_mode->crtc_vdisplay; + } else if (scaled_width < scaled_height) { /* letter */ + height = scaled_width / pipe_src_w; + if (height & 1) + height++; + y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; + x = 0; + width = adjusted_mode->crtc_hdisplay; + } else { + x = y = 0; + width = adjusted_mode->crtc_hdisplay; + height = adjusted_mode->crtc_vdisplay; + } + } + break; + + case DRM_MODE_SCALE_NONE: + WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w); + WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h); + fallthrough; + case DRM_MODE_SCALE_FULLSCREEN: + x = y = 0; + width = adjusted_mode->crtc_hdisplay; + height = adjusted_mode->crtc_vdisplay; + break; + + default: + MISSING_CASE(conn_state->scaling_mode); + return -EINVAL; + } + + drm_rect_init(&crtc_state->pch_pfit.dst, + x, y, width, height); + crtc_state->pch_pfit.enabled = true; + + /* + * SKL+ have unified scalers for pipes/planes so the + * checks are done in a single place for all scalers. + */ + if (DISPLAY_VER(display) >= 9) + return 0; + + ret = intel_pch_pfit_check_dst_window(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_src_size(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_scaling(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_timings(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_cloning(crtc_state); + if (ret) + return ret; + + return 0; +} + +static void +centre_horizontally(struct drm_display_mode *adjusted_mode, + int width) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the hsync and hblank widths constant */ + sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; + blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (adjusted_mode->crtc_hdisplay - width + 1) / 2; + border += border & 1; /* make the border even */ + + adjusted_mode->crtc_hdisplay = width; + adjusted_mode->crtc_hblank_start = width + border; + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width; + + adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos; + adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width; +} + +static void +centre_vertically(struct drm_display_mode *adjusted_mode, + int height) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the vsync and vblank widths constant */ + sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; + blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (adjusted_mode->crtc_vdisplay - height + 1) / 2; + + adjusted_mode->crtc_vdisplay = height; + adjusted_mode->crtc_vblank_start = height + border; + adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width; + + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos; + adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; +} + +static u32 panel_fitter_scaling(u32 source, u32 target) +{ + /* + * Floating point operation is not supported. So the FACTOR + * is defined, which can avoid the floating point computation + * when calculating the panel ratio. + */ +#define ACCURACY 12 +#define FACTOR (1 << ACCURACY) + u32 ratio = source * FACTOR / target; + return (FACTOR * ratio + FACTOR/2) / FACTOR; +} + +static void i965_scale_aspect(struct intel_crtc_state *crtc_state, + u32 *pfit_control) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; + u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; + + /* 965+ is easy, it does everything in hw */ + if (scaled_width > scaled_height) + *pfit_control |= PFIT_ENABLE | + PFIT_SCALING_PILLAR; + else if (scaled_width < scaled_height) + *pfit_control |= PFIT_ENABLE | + PFIT_SCALING_LETTER; + else if (adjusted_mode->crtc_hdisplay != pipe_src_w) + *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; +} + +static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, + u32 *pfit_control, u32 *pfit_pgm_ratios, + u32 *border) +{ + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; + u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; + u32 bits; + + /* + * For earlier chips we have to calculate the scaling + * ratio by hand and program it into the + * PFIT_PGM_RATIO register + */ + if (scaled_width > scaled_height) { /* pillar */ + centre_horizontally(adjusted_mode, + scaled_height / pipe_src_h); + + *border = LVDS_BORDER_ENABLE; + if (pipe_src_h != adjusted_mode->crtc_vdisplay) { + bits = panel_fitter_scaling(pipe_src_h, + adjusted_mode->crtc_vdisplay); + + *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | + PFIT_VERT_SCALE(bits)); + *pfit_control |= (PFIT_ENABLE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); + } + } else if (scaled_width < scaled_height) { /* letter */ + centre_vertically(adjusted_mode, + scaled_width / pipe_src_w); + + *border = LVDS_BORDER_ENABLE; + if (pipe_src_w != adjusted_mode->crtc_hdisplay) { + bits = panel_fitter_scaling(pipe_src_w, + adjusted_mode->crtc_hdisplay); + + *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | + PFIT_VERT_SCALE(bits)); + *pfit_control |= (PFIT_ENABLE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); + } + } else { + /* Aspects match, Let hw scale both directions */ + *pfit_control |= (PFIT_ENABLE | + PFIT_VERT_AUTO_SCALE | + PFIT_HORIZ_AUTO_SCALE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); + } +} + +static int intel_gmch_pfit_check_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int min; + + if (DISPLAY_VER(display) >= 4) + min = 3; + else + min = 2; + + if (adjusted_mode->crtc_hdisplay < min) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] horizontal active (%d) below minimum (%d) for pfit\n", + crtc->base.base.id, crtc->base.name, + adjusted_mode->crtc_hdisplay, min); + return -EINVAL; + } + + if (adjusted_mode->crtc_vdisplay < min) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] vertical active (%d) below minimum (%d) for pfit\n", + crtc->base.base.id, crtc->base.name, + adjusted_mode->crtc_vdisplay, min); + return -EINVAL; + } + + return 0; +} + +static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + + /* Native modes don't need fitting */ + if (adjusted_mode->crtc_hdisplay == pipe_src_w && + adjusted_mode->crtc_vdisplay == pipe_src_h) + goto out; + + /* + * TODO: implement downscaling for i965+. Need to account + * for downscaling in intel_crtc_compute_pixel_rate(). + */ + if (adjusted_mode->crtc_hdisplay < pipe_src_w) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit horizontal downscaling (%d->%d) not supported\n", + crtc->base.base.id, crtc->base.name, + pipe_src_w, adjusted_mode->crtc_hdisplay); + return -EINVAL; + } + if (adjusted_mode->crtc_vdisplay < pipe_src_h) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit vertical downscaling (%d->%d) not supported\n", + crtc->base.base.id, crtc->base.name, + pipe_src_h, adjusted_mode->crtc_vdisplay); + return -EINVAL; + } + + switch (conn_state->scaling_mode) { + case DRM_MODE_SCALE_CENTER: + /* + * For centered modes, we have to calculate border widths & + * heights and modify the values programmed into the CRTC. + */ + centre_horizontally(adjusted_mode, pipe_src_w); + centre_vertically(adjusted_mode, pipe_src_h); + border = LVDS_BORDER_ENABLE; + break; + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + if (DISPLAY_VER(display) >= 4) + i965_scale_aspect(crtc_state, &pfit_control); + else + i9xx_scale_aspect(crtc_state, &pfit_control, + &pfit_pgm_ratios, &border); + break; + case DRM_MODE_SCALE_FULLSCREEN: + /* + * Full scaling, even if it changes the aspect ratio. + * Fortunately this is all done for us in hw. + */ + if (pipe_src_h != adjusted_mode->crtc_vdisplay || + pipe_src_w != adjusted_mode->crtc_hdisplay) { + pfit_control |= PFIT_ENABLE; + if (DISPLAY_VER(display) >= 4) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (PFIT_VERT_AUTO_SCALE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_AUTO_SCALE | + PFIT_HORIZ_INTERP_BILINEAR); + } + break; + default: + MISSING_CASE(conn_state->scaling_mode); + return -EINVAL; + } + + /* 965+ wants fuzzy fitting */ + /* FIXME: handle multiple panels by failing gracefully */ + if (DISPLAY_VER(display) >= 4) + pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY; + +out: + if ((pfit_control & PFIT_ENABLE) == 0) { + pfit_control = 0; + pfit_pgm_ratios = 0; + } + + /* Make sure pre-965 set dither correctly for 18bpp panels. */ + if (DISPLAY_VER(display) < 4 && crtc_state->pipe_bpp == 18) + pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE; + + crtc_state->gmch_pfit.control = pfit_control; + crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; + crtc_state->gmch_pfit.lvds_border_bits = border; + + if ((pfit_control & PFIT_ENABLE) == 0) + return 0; + + return intel_gmch_pfit_check_timings(crtc_state); +} + +int intel_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (HAS_GMCH(display)) + return gmch_panel_fitting(crtc_state, conn_state); + else + return pch_panel_fitting(crtc_state, conn_state); +} diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h new file mode 100644 index 000000000000..add8d78de2c9 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pfit.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef __INTEL_PFIT_H__ +#define __INTEL_PFIT_H__ + +struct drm_connector_state; +struct intel_crtc_state; + +int intel_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + +#endif /* __INTEL_PFIT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 82ceede0b2b1..304da826dee1 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -32,6 +32,7 @@ #include "i915_reg.h" #include "intel_atomic.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_pipe_crc.h" #include "intel_pipe_crc_regs.h" @@ -285,6 +286,9 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) struct drm_modeset_acquire_ctx ctx; int ret; + if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) + i915gm_irq_cstate_wa(dev_priv, enable); + drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(&dev_priv->drm); diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index ada1792df5b3..62401f6a04e4 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -302,7 +302,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.flags = DRM_MODE_FB_MODIFIERS; if (intel_framebuffer_init(to_intel_framebuffer(fb), - vma->obj, &mode_cmd)) { + intel_bo_to_drm_bo(vma->obj), &mode_cmd)) { drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); goto err_vma; } diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index ceaf9e3147da..cdd314956a31 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -92,7 +92,7 @@ int intel_pmdemand_init(struct drm_i915_private *i915) &pmdemand_state->base, &intel_pmdemand_funcs); - if (IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) + if (IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) /* Wa_14016740474 */ intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE); @@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); const struct intel_bw_state *new_bw_state, *old_bw_state; const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; const struct intel_crtc_state *new_crtc_state, *old_crtc_state; @@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) new_dbuf_state = intel_atomic_get_new_dbuf_state(state); old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (new_dbuf_state && - (new_dbuf_state->active_pipes != - old_dbuf_state->active_pipes || - new_dbuf_state->enabled_slices != - old_dbuf_state->enabled_slices)) + new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) return true; + if (DISPLAY_VER(display) < 30) { + if (new_dbuf_state && + new_dbuf_state->enabled_slices != + old_dbuf_state->enabled_slices) + return true; + } + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); old_cdclk_state = intel_atomic_get_old_cdclk_state(state); if (new_cdclk_state && @@ -327,10 +332,15 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) if (IS_ERR(new_dbuf_state)) return PTR_ERR(new_dbuf_state); - new_pmdemand_state->params.active_pipes = - min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); - new_pmdemand_state->params.active_dbufs = - min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + if (DISPLAY_VER(i915) < 30) { + new_pmdemand_state->params.active_dbufs = + min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + new_pmdemand_state->params.active_pipes = + min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); + } else { + new_pmdemand_state->params.active_pipes = + min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(i915)); + } new_cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(new_cdclk_state)) @@ -395,27 +405,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); - /* Set 1*/ pmdemand_state->params.qclk_gv_bw = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); pmdemand_state->params.voltage_index = REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); pmdemand_state->params.qclk_gv_index = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); - pmdemand_state->params.active_pipes = - REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); - pmdemand_state->params.active_dbufs = - REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); pmdemand_state->params.active_phys = REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); - /* Set 2*/ pmdemand_state->params.cdclk_freq_mhz = REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); pmdemand_state->params.ddiclk_max = REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); - pmdemand_state->params.scalers = - REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + + if (DISPLAY_VER(i915) >= 30) { + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); + } else { + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); + pmdemand_state->params.active_dbufs = + REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); + + pmdemand_state->params.scalers = + REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + } unlock: mutex_unlock(&i915->display.pmdemand.lock); @@ -442,6 +457,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, { u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); + /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ + if (DISPLAY_VER(i915) >= 30) + return; + mutex_lock(&i915->display.pmdemand.lock); if (drm_WARN_ON(&i915->drm, !intel_pmdemand_check_prev_transaction(i915))) @@ -460,7 +479,8 @@ unlock: } static void -intel_pmdemand_update_params(const struct intel_pmdemand_state *new, +intel_pmdemand_update_params(struct intel_display *display, + const struct intel_pmdemand_state *new, const struct intel_pmdemand_state *old, u32 *reg1, u32 *reg2, bool serialized) { @@ -495,16 +515,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new, update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); - update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); - update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); /* Set 2*/ update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); - update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); + if (DISPLAY_VER(display) >= 30) { + update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); + } else { + update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); + update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); + + update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); + } + #undef update_reg } @@ -514,6 +540,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, const struct intel_pmdemand_state *old, bool serialized) { + struct intel_display *display = &i915->display; bool changed = false; u32 reg1, mod_reg1; u32 reg2, mod_reg2; @@ -529,7 +556,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); mod_reg2 = reg2; - intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, + intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2, serialized); if (reg1 != mod_reg1) { diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h index 128fd61f8f14..a1c49efdc493 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h @@ -20,14 +20,14 @@ struct pmdemand_params { u8 voltage_index; u8 qclk_gv_index; u8 active_pipes; - u8 active_dbufs; + u8 active_dbufs; /* pre-Xe3 only */ /* Total number of non type C active phys from active_phys_mask */ u8 active_phys; u8 plls; u16 cdclk_freq_mhz; /* max from ddi_clocks[] */ u16 ddiclk_max; - u8 scalers; + u8 scalers; /* pre-Xe3 only */ }; struct intel_pmdemand_state { diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index feddc30e3375..093fe37a3983 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -3,6 +3,8 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/debugfs.h> + #include "g4x_dp.h" #include "i915_drv.h" #include "i915_reg.h" @@ -27,11 +29,10 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd static const char *pps_name(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_pps *pps = &intel_dp->pps; - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - switch (pps->pps_pipe) { + if (display->platform.valleyview || display->platform.cherryview) { + switch (pps->vlv_pps_pipe) { case INVALID_PIPE: /* * FIXME would be nice if we can guarantee @@ -43,7 +44,7 @@ static const char *pps_name(struct intel_dp *intel_dp) case PIPE_B: return "PPS B"; default: - MISSING_CASE(pps->pps_pipe); + MISSING_CASE(pps->vlv_pps_pipe); break; } } else { @@ -68,7 +69,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) intel_wakeref_t wakeref; /* - * See intel_pps_reset_all() why we need a power domain reference here. + * See vlv_pps_reset_all() why we need a power domain reference here. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); mutex_lock(&display->pps.mutex); @@ -85,7 +86,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, mutex_unlock(&display->pps.mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); - return 0; + return NULL; } static void @@ -94,7 +95,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) struct intel_display *display = to_intel_display(intel_dp); struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum pipe pipe = intel_dp->pps.pps_pipe; + enum pipe pipe = intel_dp->pps.vlv_pps_pipe; bool pll_enabled, release_cl_override = false; enum dpio_phy phy = vlv_pipe_to_phy(pipe); enum dpio_channel ch = vlv_pipe_to_channel(pipe); @@ -120,7 +121,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) DP |= DP_PORT_WIDTH(1); DP |= DP_LINK_TRAIN_PAT_1; - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) DP |= DP_PIPE_SEL_CHV(pipe); else DP |= DP_PIPE_SEL(pipe); @@ -132,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) * So enable temporarily it if it's not already enabled. */ if (!pll_enabled) { - release_cl_override = IS_CHERRYVIEW(dev_priv) && + release_cl_override = display->platform.cherryview && !chv_phy_powergate_ch(dev_priv, phy, ch, true); if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) { @@ -180,18 +181,18 @@ static enum pipe vlv_find_free_pps(struct intel_display *display) if (encoder->type == INTEL_OUTPUT_EDP) { drm_WARN_ON(display->drm, - intel_dp->pps.active_pipe != INVALID_PIPE && - intel_dp->pps.active_pipe != - intel_dp->pps.pps_pipe); + intel_dp->pps.vlv_active_pipe != INVALID_PIPE && + intel_dp->pps.vlv_active_pipe != + intel_dp->pps.vlv_pps_pipe); - if (intel_dp->pps.pps_pipe != INVALID_PIPE) - pipes &= ~(1 << intel_dp->pps.pps_pipe); + if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) + pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe); } else { drm_WARN_ON(display->drm, - intel_dp->pps.pps_pipe != INVALID_PIPE); + intel_dp->pps.vlv_pps_pipe != INVALID_PIPE); - if (intel_dp->pps.active_pipe != INVALID_PIPE) - pipes &= ~(1 << intel_dp->pps.active_pipe); + if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE) + pipes &= ~(1 << intel_dp->pps.vlv_active_pipe); } } @@ -213,11 +214,11 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) /* We should never land here with regular DP ports */ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); - drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE && - intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE && + intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe); - if (intel_dp->pps.pps_pipe != INVALID_PIPE) - return intel_dp->pps.pps_pipe; + if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) + return intel_dp->pps.vlv_pps_pipe; pipe = vlv_find_free_pps(display); @@ -229,7 +230,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) pipe = PIPE_A; vlv_steal_power_sequencer(display, pipe); - intel_dp->pps.pps_pipe = pipe; + intel_dp->pps.vlv_pps_pipe = pipe; drm_dbg_kms(display->drm, "picked %s for [ENCODER:%d:%s]\n", @@ -246,7 +247,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) */ vlv_power_sequencer_kick(intel_dp); - return intel_dp->pps.pps_pipe; + return intel_dp->pps.vlv_pps_pipe; } static int @@ -260,10 +261,10 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) /* We should never land here with regular DP ports */ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); - if (!intel_dp->pps.pps_reset) + if (!intel_dp->pps.bxt_pps_reset) return pps_idx; - intel_dp->pps.pps_reset = false; + intel_dp->pps.bxt_pps_reset = false; /* * Only the HW needs to be reprogrammed, the SW state is fixed and @@ -325,19 +326,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) /* try to find a pipe with this port selected */ /* first pick one where the panel is on */ - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, - pps_has_pp_on); + intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, + pps_has_pp_on); /* didn't find one? pick one where vdd is on */ - if (intel_dp->pps.pps_pipe == INVALID_PIPE) - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, - pps_has_vdd_on); + if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) + intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, + pps_has_vdd_on); /* didn't find one? pick one with just the correct port */ - if (intel_dp->pps.pps_pipe == INVALID_PIPE) - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, - pps_any); + if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) + intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, + pps_any); /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ - if (intel_dp->pps.pps_pipe == INVALID_PIPE) { + if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) { drm_dbg_kms(display->drm, "[ENCODER:%d:%s] no initial power sequencer\n", dig_port->base.base.base.id, dig_port->base.base.name); @@ -354,10 +355,10 @@ static int intel_num_pps(struct intel_display *display) { struct drm_i915_private *i915 = to_i915(display->drm); - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) return 2; - if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + if (display->platform.geminilake || display->platform.broxton) return 2; if (INTEL_PCH_TYPE(i915) >= PCH_MTL) @@ -404,11 +405,10 @@ pps_initial_setup(struct intel_dp *intel_dp) struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); lockdep_assert_held(&display->pps.mutex); - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + if (display->platform.valleyview || display->platform.cherryview) { vlv_initial_power_sequencer_setup(intel_dp); return true; } @@ -446,21 +446,17 @@ pps_initial_setup(struct intel_dp *intel_dp) return intel_pps_is_valid(intel_dp); } -void intel_pps_reset_all(struct intel_display *display) +void vlv_pps_reset_all(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - if (drm_WARN_ON(display->drm, !IS_LP(dev_priv))) - return; - if (!HAS_DISPLAY(display)) return; /* * We can't grab pps_mutex here due to deadlock with power_domain * mutex when power_domain functions are called while holding pps_mutex. - * That also means that in order to use pps_pipe the code needs to + * That also means that in order to use vlv_pps_pipe the code needs to * hold both a power domain reference and pps_mutex, and the power domain * reference get/put must be done while _not_ holding pps_mutex. * pps_{lock,unlock}() do these steps in the correct order, so one @@ -470,16 +466,27 @@ void intel_pps_reset_all(struct intel_display *display) for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_WARN_ON(display->drm, - intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); - if (encoder->type != INTEL_OUTPUT_EDP) - continue; + if (encoder->type == INTEL_OUTPUT_EDP) + intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; + } +} + +void bxt_pps_reset_all(struct intel_display *display) +{ + struct intel_encoder *encoder; + + if (!HAS_DISPLAY(display)) + return; + + /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */ + + for_each_intel_dp(display->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (DISPLAY_VER(display) >= 9) - intel_dp->pps.pps_reset = true; - else - intel_dp->pps.pps_pipe = INVALID_PIPE; + if (encoder->type == INTEL_OUTPUT_EDP) + intel_dp->pps.bxt_pps_reset = true; } } @@ -500,9 +507,9 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, memset(regs, 0, sizeof(*regs)); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (display->platform.valleyview || display->platform.cherryview) pps_idx = vlv_power_sequencer_pipe(intel_dp); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) pps_idx = bxt_power_sequencer_idx(intel_dp); else pps_idx = intel_dp->pps.pps_idx; @@ -513,7 +520,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, regs->pp_off = PP_OFF_DELAYS(display, pps_idx); /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || + if (display->platform.geminilake || display->platform.broxton || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) regs->pp_div = INVALID_MMIO_REG; else @@ -543,12 +550,11 @@ _pp_stat_reg(struct intel_dp *intel_dp) static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&display->pps.mutex); - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_dp->pps.pps_pipe == INVALID_PIPE) + if ((display->platform.valleyview || display->platform.cherryview) && + intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) return false; return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0; @@ -557,12 +563,11 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&display->pps.mutex); - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_dp->pps.pps_pipe == INVALID_PIPE) + if ((display->platform.valleyview || display->platform.cherryview) && + intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) return false; return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; @@ -792,7 +797,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) } /* - * Must be paired with intel_pps_off(). + * Must be paired with intel_pps_vdd_off() or - to disable + * both VDD and panel power - intel_pps_off(). * Nested calls to these functions are not allowed since * we drop the lock. Caller must use some higher level * locking to prevent nested calls from other threads. @@ -800,7 +806,6 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) void intel_pps_vdd_on(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; bool vdd; @@ -810,10 +815,10 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp) vdd = false; with_intel_pps_lock(intel_dp, wakeref) vdd = intel_pps_vdd_on_unlocked(intel_dp); - I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", - dp_to_dig_port(intel_dp)->base.base.base.id, - dp_to_dig_port(intel_dp)->base.base.name, - pps_name(intel_dp)); + INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name, + pps_name(intel_dp)); } static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) @@ -852,8 +857,10 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) intel_de_read(display, pp_stat_reg), intel_de_read(display, pp_ctrl_reg)); - if ((pp & PANEL_POWER_ON) == 0) + if ((pp & PANEL_POWER_ON) == 0) { intel_dp->pps.panel_power_off_time = ktime_get_boottime(); + intel_dp_invalidate_source_oui(intel_dp); + } intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port), @@ -920,18 +927,17 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&display->pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; - I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd, - "[ENCODER:%d:%s] %s VDD not forced on", - dp_to_dig_port(intel_dp)->base.base.base.id, - dp_to_dig_port(intel_dp)->base.base.name, - pps_name(intel_dp)); + INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd, + "[ENCODER:%d:%s] %s VDD not forced on", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name, + pps_name(intel_dp)); intel_dp->pps.want_panel_vdd = false; @@ -941,10 +947,20 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) edp_panel_vdd_schedule_off(intel_dp); } +void intel_pps_vdd_off(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp)) + return; + + with_intel_pps_lock(intel_dp, wakeref) + intel_pps_vdd_off_unlocked(intel_dp, false); +} + void intel_pps_on_unlocked(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pp; i915_reg_t pp_ctrl_reg; @@ -969,7 +985,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ilk_get_pp_control(intel_dp); - if (IS_IRONLAKE(dev_priv)) { + if (display->platform.ironlake) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; intel_de_write(display, pp_ctrl_reg, pp); @@ -985,7 +1001,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); pp |= PANEL_POWER_ON; - if (!IS_IRONLAKE(dev_priv)) + if (!display->platform.ironlake) pp |= PANEL_POWER_RESET; intel_de_write(display, pp_ctrl_reg, pp); @@ -998,7 +1014,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0); - if (IS_IRONLAKE(dev_priv)) { + if (display->platform.ironlake) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ intel_de_write(display, pp_ctrl_reg, pp); intel_de_posting_read(display, pp_ctrl_reg); @@ -1054,6 +1070,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp) wait_panel_off(intel_dp); intel_dp->pps.panel_power_off_time = ktime_get_boottime(); + intel_dp_invalidate_source_oui(intel_dp); + /* We got a reference when we enabled the VDD. */ intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port), @@ -1139,7 +1157,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable) return; drm_dbg_kms(display->drm, "panel power control backlight %s\n", - enable ? "enable" : "disable"); + str_enable_disable(enable)); if (enable) intel_pps_backlight_on(intel_dp); @@ -1151,10 +1169,10 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum pipe pipe = intel_dp->pps.pps_pipe; + enum pipe pipe = intel_dp->pps.vlv_pps_pipe; i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe); - drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B)) return; @@ -1177,7 +1195,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) intel_de_write(display, pp_on_reg, 0); intel_de_posting_read(display, pp_on_reg); - intel_dp->pps.pps_pipe = INVALID_PIPE; + intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; } static void vlv_steal_power_sequencer(struct intel_display *display, @@ -1190,12 +1208,12 @@ static void vlv_steal_power_sequencer(struct intel_display *display, for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe, + drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe, "stealing PPS %c from active [ENCODER:%d:%s]\n", pipe_name(pipe), encoder->base.base.id, encoder->base.name); - if (intel_dp->pps.pps_pipe != pipe) + if (intel_dp->pps.vlv_pps_pipe != pipe) continue; drm_dbg_kms(display->drm, @@ -1208,8 +1226,59 @@ static void vlv_steal_power_sequencer(struct intel_display *display, } } -void vlv_pps_init(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum pipe pipe; + + if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, + encoder->port, &pipe)) + return pipe; + + return INVALID_PIPE; +} + +/* Call on all DP, not just eDP */ +void vlv_pps_pipe_init(struct intel_dp *intel_dp) +{ + intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; + intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); +} + +/* Call on all DP, not just eDP */ +void vlv_pps_pipe_reset(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + + with_intel_pps_lock(intel_dp, wakeref) + intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); +} + +enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp) +{ + enum pipe pipe; + + /* + * Figure out the current pipe for the initial backlight setup. If the + * current pipe isn't valid, try the PPS pipe, and if that fails just + * assume pipe A. + */ + pipe = vlv_active_pipe(intel_dp); + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = intel_dp->pps.vlv_pps_pipe; + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = PIPE_A; + + return pipe; +} + +/* Call on all DP, not just eDP */ +void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1217,10 +1286,10 @@ void vlv_pps_init(struct intel_encoder *encoder, lockdep_assert_held(&display->pps.mutex); - drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); - if (intel_dp->pps.pps_pipe != INVALID_PIPE && - intel_dp->pps.pps_pipe != crtc->pipe) { + if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE && + intel_dp->pps.vlv_pps_pipe != crtc->pipe) { /* * If another power sequencer was being used on this * port previously make sure to turn off vdd there while @@ -1235,13 +1304,13 @@ void vlv_pps_init(struct intel_encoder *encoder, */ vlv_steal_power_sequencer(display, crtc->pipe); - intel_dp->pps.active_pipe = crtc->pipe; + intel_dp->pps.vlv_active_pipe = crtc->pipe; if (!intel_dp_is_edp(intel_dp)) return; /* now it's all ours */ - intel_dp->pps.pps_pipe = crtc->pipe; + intel_dp->pps.vlv_pps_pipe = crtc->pipe; drm_dbg_kms(display->drm, "initializing %s for [ENCODER:%d:%s]\n", @@ -1253,6 +1322,18 @@ void vlv_pps_init(struct intel_encoder *encoder, pps_init_registers(intel_dp, true); } +/* Call on all DP, not just eDP */ +void vlv_pps_port_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + intel_wakeref_t wakeref; + + with_intel_pps_lock(intel_dp, wakeref) + intel_dp->pps.vlv_active_pipe = INVALID_PIPE; +} + static void pps_vdd_init(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); @@ -1555,7 +1636,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (display->platform.valleyview || display->platform.cherryview) { port_sel = PANEL_PORT_SELECT_VLV(port); } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { switch (port) { @@ -1602,7 +1683,6 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd void intel_pps_encoder_reset(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) @@ -1613,7 +1693,7 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp) * Reinit the power sequencer also on the resume path, in case * BIOS did something nasty with it. */ - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) vlv_initial_power_sequencer_setup(intel_dp); pps_init_delays(intel_dp); @@ -1649,11 +1729,10 @@ bool intel_pps_init(struct intel_dp *intel_dp) static void pps_init_late(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) return; if (intel_num_pps(display) < 2) @@ -1711,9 +1790,9 @@ void intel_pps_setup(struct intel_display *display) { struct drm_i915_private *i915 = to_i915(display->drm); - if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton) display->pps.mmio_base = PCH_PPS_BASE; - else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + else if (display->platform.valleyview || display->platform.cherryview) display->pps.mmio_base = VLV_PPS_BASE; else display->pps.mmio_base = PPS_BASE; @@ -1785,7 +1864,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) MISSING_CASE(port_sel); break; } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + } else if (display->platform.valleyview || display->platform.cherryview) { /* presumably write lock depends on pipe, not port select */ pp_reg = PP_CONTROL(display, pipe); panel_pipe = pipe; @@ -1806,7 +1885,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) locked = false; - I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked, - "panel assertion failure, pipe %c regs locked\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked, + "panel assertion failure, pipe %c regs locked\n", + pipe_name(pipe)); } diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index 0c5da83a559e..c83007152f07 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -34,6 +34,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp); void intel_pps_check_power_unlocked(struct intel_dp *intel_dp); void intel_pps_vdd_on(struct intel_dp *intel_dp); +void intel_pps_vdd_off(struct intel_dp *intel_dp); void intel_pps_on(struct intel_dp *intel_dp); void intel_pps_off(struct intel_dp *intel_dp); void intel_pps_vdd_off_sync(struct intel_dp *intel_dp); @@ -43,10 +44,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp); bool intel_pps_init(struct intel_dp *intel_dp); void intel_pps_init_late(struct intel_dp *intel_dp); void intel_pps_encoder_reset(struct intel_dp *intel_dp); -void intel_pps_reset_all(struct intel_display *display); -void vlv_pps_init(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void vlv_pps_pipe_init(struct intel_dp *intel_dp); +void vlv_pps_pipe_reset(struct intel_dp *intel_dp); +enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp); +void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void vlv_pps_port_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void vlv_pps_reset_all(struct intel_display *display); +void bxt_pps_reset_all(struct intel_display *display); void intel_pps_unlock_regs_wa(struct intel_display *display); void intel_pps_setup(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 136a0d6ca970..a784c0b81556 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -21,6 +21,8 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/debugfs.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> @@ -33,6 +35,7 @@ #include "intel_cursor_regs.h" #include "intel_ddi.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -230,7 +233,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp) switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: if (display->params.enable_psr == -1) - return connector->panel.vbt.psr.enable; + return intel_dp_is_edp(intel_dp) ? + connector->panel.vbt.psr.enable : + true; return display->params.enable_psr; case I915_PSR_DEBUG_DISABLE: return false; @@ -762,7 +767,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); - u8 val = DP_PSR_ENABLE; + u8 val = 0; if (crtc_state->has_sel_update) { val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; @@ -782,7 +787,9 @@ static void _psr_enable_sink(struct intel_dp *intel_dp, if (intel_dp->psr.entry_setup_frames > 0) val |= DP_PSR_FRAME_CAPTURE; + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); + val |= DP_PSR_ENABLE; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); } @@ -1446,11 +1453,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (DISPLAY_VER(display) >= 12) { + if (DISPLAY_VER(display) >= 20) { + psr_max_h = crtc_hdisplay; + psr_max_v = crtc_vdisplay; + max_bpp = crtc_state->pipe_bpp; + } else if (IS_DISPLAY_VER(display, 12, 14)) { psr_max_h = 5120; psr_max_v = 3200; max_bpp = 30; - } else if (DISPLAY_VER(display) >= 10) { + } else if (IS_DISPLAY_VER(display, 10, 11)) { psr_max_h = 4096; psr_max_v = 2304; max_bpp = 24; @@ -1599,6 +1610,10 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, /* Remaining checks are for eDP only */ + if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A && + to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B) + return false; + /* 128b/132b Panel Replay is not supported on eDP */ if (intel_dp_is_uhbr(crtc_state)) { drm_dbg_kms(display->drm, @@ -1903,14 +1918,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * cause issues if non-supported panels are used. */ if (!intel_dp->psr.panel_replay_enabled && - (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) || + (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv))) intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); /* Wa_16012604467:adlp,mtl[a0,b0] */ if (!intel_dp->psr.panel_replay_enabled && - IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0)) + IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) intel_de_rmw(display, MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 0, @@ -1998,6 +2013,15 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_dp->psr.enabled = true; intel_dp->psr.paused = false; + /* + * Link_ok is sticky and set here on PSR enable. We can assume link + * training is complete as we never continue to PSR enable with + * untrained link. Link_ok is kept as set until first short pulse + * interrupt. This is targeted to workaround panels stating bad link + * after PSR is enabled. + */ + intel_dp->psr.link_ok = true; + intel_psr_activate(intel_dp); } @@ -2095,7 +2119,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) if (intel_dp->psr.sel_update_enabled) { /* Wa_16012604467:adlp,mtl[a0,b0] */ if (!intel_dp->psr.panel_replay_enabled && - IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0)) + IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) intel_de_rmw(display, MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); @@ -2114,7 +2138,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); intel_de_rmw(display, - PORT_ALPM_CTL(display, cpu_transcoder), + PORT_ALPM_CTL(cpu_transcoder), PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); } @@ -2157,6 +2181,8 @@ void intel_psr_disable(struct intel_dp *intel_dp, intel_psr_disable_locked(intel_dp); + intel_dp->psr.link_ok = false; + mutex_unlock(&intel_dp->psr.lock); cancel_work_sync(&intel_dp->psr.work); cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); @@ -2221,6 +2247,36 @@ unlock: mutex_unlock(&psr->lock); } +/** + * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed + * @crtc_state: CRTC status + * + * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't + * prevent it in case of Panel Replay. Panel Replay switches main link off on + * DC entry. This means vblank interrupts are not fired and is a problem if + * user-space is polling for vblank events. + */ +bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) { + struct intel_dp *intel_dp; + + if (!intel_encoder_is_dp(encoder)) + continue; + + intel_dp = enc_to_intel_dp(encoder); + + if (intel_dp_is_edp(intel_dp) && + CAN_PANEL_REPLAY(intel_dp)) + return true; + } + + return false; +} + static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) { struct drm_i915_private *dev_priv = to_i915(display->drm); @@ -2480,11 +2536,60 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c return true; } +/* Wa 14019834836 */ +static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_encoder *encoder; + int hactive_limit; + + if (crtc_state->psr2_su_area.y1 != 0 || + crtc_state->psr2_su_area.y2 != 0) + return; + + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546; + else + hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273; + + if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit) + return; + + for_each_intel_encoder_mask_with_psr(display->drm, encoder, + crtc_state->uapi.encoder_mask) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!intel_dp_is_edp(intel_dp) && + intel_dp->psr.panel_replay_enabled && + intel_dp->psr.sel_update_enabled) { + crtc_state->psr2_su_area.y2++; + return; + } + } +} + +static void +intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + /* Wa_14014971492 */ + if (!crtc_state->has_panel_replay && + ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || + IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) && + crtc_state->splitter.enable) + crtc_state->psr2_su_area.y1 = 0; + + /* Wa 14019834836 */ + if (DISPLAY_VER(display) == 30) + intel_psr_apply_pr_link_on_su_wa(crtc_state); +} + int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state *new_plane_state, *old_plane_state; struct intel_plane *plane; @@ -2589,12 +2694,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (full_update) goto skip_sel_fetch_set_loop; - /* Wa_14014971492 */ - if (!crtc_state->has_panel_replay && - ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) || - IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) && - crtc_state->splitter.enable) - crtc_state->psr2_su_area.y1 = 0; + intel_psr_apply_su_area_workarounds(crtc_state); ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) @@ -3373,6 +3473,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) mutex_lock(&psr->lock); + psr->link_ok = false; + if (!psr->enabled) goto exit; @@ -3433,6 +3535,33 @@ bool intel_psr_enabled(struct intel_dp *intel_dp) } /** + * intel_psr_link_ok - return psr->link_ok + * @intel_dp: struct intel_dp + * + * We are seeing unexpected link re-trainings with some panels. This is caused + * by panel stating bad link status after PSR is enabled. Code checking link + * status can call this to ensure it can ignore bad link status stated by the + * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link + * is ok caller should rely on latter. + * + * Return value of link_ok + */ +bool intel_psr_link_ok(struct intel_dp *intel_dp) +{ + bool ret; + + if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) || + !intel_dp_is_edp(intel_dp)) + return false; + + mutex_lock(&intel_dp->psr.lock); + ret = intel_dp->psr.link_ok; + mutex_unlock(&intel_dp->psr.lock); + + return ret; +} + +/** * intel_psr_lock - grab PSR lock * @crtc_state: the crtc state * @@ -3848,10 +3977,8 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector) struct drm_i915_private *i915 = to_i915(connector->base.dev); struct dentry *root = connector->base.debugfs_entry; - /* TODO: Add support for MST connectors as well. */ - if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && - connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) || - connector->mst_port) + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && + connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) return; debugfs_create_file("i915_psr_sink_status", 0444, root, diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 6eb5f15f674f..956be263c09e 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -58,6 +58,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); +bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state); +bool intel_psr_link_ok(struct intel_dp *intel_dp); void intel_psr_lock(const struct intel_crtc_state *crtc_state); void intel_psr_unlock(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 642bb15fb547..9ad7611506e8 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -9,6 +9,7 @@ #include "intel_display_reg_defs.h" #include "intel_dp_aux_regs.h" +#define _TRANS_EXITLINE_A 0x60018 #define TRANS_EXITLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A) #define EXITLINE_ENABLE REG_BIT(31) #define EXITLINE_MASK REG_GENMASK(12, 0) @@ -295,9 +296,9 @@ #define _PORT_ALPM_CTL_A 0x16fa2c #define _PORT_ALPM_CTL_B 0x16fc2c -#define PORT_ALPM_CTL(dev_priv, port) _MMIO_PORT(port, _PORT_ALPM_CTL_A, _PORT_ALPM_CTL_B) +#define PORT_ALPM_CTL(port) _MMIO_PORT(port, _PORT_ALPM_CTL_A, _PORT_ALPM_CTL_B) #define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31) -#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20) +#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(25, 20) #define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val) #define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16) #define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val) @@ -306,7 +307,7 @@ #define _PORT_ALPM_LFPS_CTL_A 0x16fa30 #define _PORT_ALPM_LFPS_CTL_B 0x16fc30 -#define PORT_ALPM_LFPS_CTL(dev_priv, port) _MMIO_PORT(port, _PORT_ALPM_LFPS_CTL_A, _PORT_ALPM_LFPS_CTL_B) +#define PORT_ALPM_LFPS_CTL(port) _MMIO_PORT(port, _PORT_ALPM_LFPS_CTL_A, _PORT_ALPM_LFPS_CTL_B) #define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31) #define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24) #define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7 diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 29b56d53a340..28f497ae785b 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -231,7 +231,7 @@ static struct intel_quirk intel_quirks[] = { { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, }; -static struct intel_dpcd_quirk intel_dpcd_quirks[] = { +static const struct intel_dpcd_quirk intel_dpcd_quirks[] = { /* Dell Precision 5490 */ { .device = 0x7d55, @@ -272,7 +272,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp, int i; for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) { - struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i]; + const struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i]; if (d->device == q->device && (d->subsystem_vendor == q->subsystem_vendor || diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 7cc519b402e9..7a28104f68ad 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -36,6 +36,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_eld.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -2081,10 +2082,10 @@ intel_sdvo_get_edid(struct drm_connector *connector) static const struct drm_edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); struct i2c_adapter *ddc; - ddc = intel_gmbus_get_adapter(i915, i915->display.vbt.crt_ddc_pin); + ddc = intel_gmbus_get_adapter(display, display->vbt.crt_ddc_pin); if (!ddc) return NULL; @@ -2637,6 +2638,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo *sdvo, static void intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) { + struct intel_display *display = to_intel_display(&sdvo->base); struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev); const struct sdvo_device_mapping *mapping; u8 pin; @@ -2647,7 +2649,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized && - intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) + intel_gmbus_is_valid_pin(display, mapping->i2c_pin)) pin = mapping->i2c_pin; else pin = GMBUS_PIN_DPB; @@ -2656,7 +2658,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) sdvo->base.base.base.id, sdvo->base.base.name, pin, sdvo->target_addr); - sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); + sdvo->i2c = intel_gmbus_get_adapter(display, pin); /* * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index e6df1f92def5..4b3a32736fd6 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -1997,6 +1997,7 @@ int intel_snps_phy_check_hdmi_link_rate(int clock) void intel_mpllb_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -2019,11 +2020,11 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state, intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); #define MPLLB_CHECK(__name) \ - I915_STATE_WARN(i915, mpllb_sw_state->__name != mpllb_hw_state.__name, \ - "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \ - crtc->base.base.id, crtc->base.name, \ - __stringify(__name), \ - mpllb_sw_state->__name, mpllb_hw_state.__name) + INTEL_DISPLAY_STATE_WARN(display, mpllb_sw_state->__name != mpllb_hw_state.__name, \ + "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \ + crtc->base.base.id, crtc->base.name, \ + __stringify(__name), \ + mpllb_sw_state->__name, mpllb_hw_state.__name) MPLLB_CHECK(mpllb_cp); MPLLB_CHECK(mpllb_div); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e657b09ede99..e6fadcef58e0 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -378,7 +378,8 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -vlv_sprite_update_noarm(struct intel_plane *plane, +vlv_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -399,7 +400,8 @@ vlv_sprite_update_noarm(struct intel_plane *plane, } static void -vlv_sprite_update_arm(struct intel_plane *plane, +vlv_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -449,7 +451,8 @@ vlv_sprite_update_arm(struct intel_plane *plane, } static void -vlv_sprite_disable_arm(struct intel_plane *plane, +vlv_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); @@ -795,7 +798,8 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -ivb_sprite_update_noarm(struct intel_plane *plane, +ivb_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -826,7 +830,8 @@ ivb_sprite_update_noarm(struct intel_plane *plane, } static void -ivb_sprite_update_arm(struct intel_plane *plane, +ivb_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -874,7 +879,8 @@ ivb_sprite_update_arm(struct intel_plane *plane, } static void -ivb_sprite_disable_arm(struct intel_plane *plane, +ivb_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); @@ -1133,7 +1139,8 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -g4x_sprite_update_noarm(struct intel_plane *plane, +g4x_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1162,7 +1169,8 @@ g4x_sprite_update_noarm(struct intel_plane *plane, } static void -g4x_sprite_update_arm(struct intel_plane *plane, +g4x_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1206,7 +1214,8 @@ g4x_sprite_update_arm(struct intel_plane *plane, } static void -g4x_sprite_disable_arm(struct intel_plane *plane, +g4x_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 044a032e41b9..531079979c05 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -8,9 +8,6 @@ #include <linux/types.h> -struct drm_device; -struct drm_display_mode; -struct drm_file; struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; @@ -19,8 +16,6 @@ enum pipe; #ifdef I915 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); -int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c index 4853c4806004..1d0b84b464c1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c +++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c @@ -42,6 +42,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state, int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_intel_sprite_colorkey *set = data; struct drm_plane *plane; @@ -100,7 +101,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, */ if (!ret && has_dst_key_in_primary_plane(dev_priv)) { struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, + intel_crtc_for_pipe(display, to_intel_plane(plane)->pipe); plane_state = drm_atomic_get_plane_state(state, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 6f2ee7dbc43b..b16c4d2d4077 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -1005,7 +1005,7 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { drm_dbg_kms(&i915->drm, "Port %s: timeout waiting for TCSS power to get %s\n", - enabled ? "enabled" : "disabled", + str_enabled_disabled(enabled), tc->port_name); return false; } diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 5fee4be64592..27c530218ee6 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -33,6 +33,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -1092,7 +1093,6 @@ intel_tv_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_display_mode mode = {}; @@ -1166,7 +1166,7 @@ intel_tv_get_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock /= 2; /* pixel counter doesn't work on i965gm TV output */ - if (IS_I965GM(dev_priv)) + if (display->platform.i965gm) pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; } @@ -1196,7 +1196,6 @@ intel_tv_compute_config(struct intel_encoder *encoder, struct intel_atomic_state *state = to_intel_atomic_state(pipe_config->uapi.state); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); @@ -1348,7 +1347,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, adjusted_mode->name[0] = '\0'; /* pixel counter doesn't work on i965gm TV output */ - if (IS_I965GM(dev_priv)) + if (display->platform.i965gm) pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; @@ -1524,7 +1523,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; /* Enable two fixes for the chips that need them. */ - if (IS_I915GM(dev_priv)) + if (display->platform.i915gm) tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; set_tv_mode_timings(display, tv_mode, burst_ena); @@ -1626,7 +1625,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, * The TV sense state should be cleared to zero on cantiga platform. Otherwise * the TV is misdetected. This is hardware requirement. */ - if (IS_GM45(dev_priv)) + if (display->platform.gm45) tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 0b7f2134e441..a95fb3349eba 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -3,6 +3,8 @@ * Copyright © 2022-2023 Intel Corporation */ +#include <drm/drm_vblank.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_color.h" @@ -193,7 +195,6 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* * The scanline counter increments at the leading edge of hsync. @@ -223,7 +224,7 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) */ if (DISPLAY_VER(display) == 2) return -1; - else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return 2; else return 1; @@ -325,14 +326,13 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, const struct drm_display_mode *mode) { struct intel_display *display = to_intel_display(_crtc->dev); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc *crtc = to_intel_crtc(_crtc); enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; bool use_scanline_counter = DISPLAY_VER(display) >= 5 || - IS_G4X(dev_priv) || DISPLAY_VER(display) == 2 || + display->platform.g4x || DISPLAY_VER(display) == 2 || crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; if (drm_WARN_ON(display->drm, !mode->crtc_clock)) { @@ -601,14 +601,15 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state, struct intel_vblank_evade_ctx *evade) { + struct intel_display *display = to_intel_display(new_crtc_state); struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state; const struct drm_display_mode *adjusted_mode; evade->crtc = crtc; - evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) && + evade->need_vlv_dsi_wa = (display->platform.valleyview || + display->platform.cherryview) && intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 2e849b015e74..40525f5c4c42 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -306,6 +306,12 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; + if (vdsc_cfg->bits_per_component < 8) { + drm_dbg_kms(&dev_priv->drm, "DSC bpc requirements not met bpc: %d\n", + vdsc_cfg->bits_per_component); + return -EINVAL; + } + drm_dsc_set_rc_buf_thresh(vdsc_cfg); /* @@ -379,9 +385,9 @@ static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state) { int num_vdsc_instances = intel_dsc_get_vdsc_per_pipe(crtc_state); + int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); - if (crtc_state->joiner_pipes) - num_vdsc_instances *= 2; + num_vdsc_instances *= num_joined_pipes; return num_vdsc_instances; } @@ -742,7 +748,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) u32 dss_ctl1_val = 0; if (crtc_state->joiner_pipes && !crtc_state->dsc.compression_enable) { - if (intel_crtc_is_joiner_secondary(crtc_state)) + if (intel_crtc_is_bigjoiner_secondary(crtc_state)) dss_ctl1_val |= UNCOMPRESSED_JOINER_SECONDARY; else dss_ctl1_val |= UNCOMPRESSED_JOINER_PRIMARY; @@ -770,8 +776,15 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state) dss_ctl1_val |= JOINER_ENABLE; } if (crtc_state->joiner_pipes) { + if (intel_crtc_ultrajoiner_enable_needed(crtc_state)) + dss_ctl1_val |= ULTRA_JOINER_ENABLE; + + if (intel_crtc_is_ultrajoiner_primary(crtc_state)) + dss_ctl1_val |= PRIMARY_ULTRA_JOINER_ENABLE; + dss_ctl1_val |= BIG_JOINER_ENABLE; - if (!intel_crtc_is_joiner_secondary(crtc_state)) + + if (intel_crtc_is_bigjoiner_primary(crtc_state)) dss_ctl1_val |= PRIMARY_BIG_JOINER_ENABLE; } intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h index f921ad67b587..bf32a3b46fb1 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -37,6 +37,8 @@ #define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) #define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) #define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) +#define ULTRA_JOINER_ENABLE REG_BIT(23) +#define PRIMARY_ULTRA_JOINER_ENABLE REG_BIT(22) #define UNCOMPRESSED_JOINER_PRIMARY (1 << 21) #define UNCOMPRESSED_JOINER_SECONDARY (1 << 20) diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index 0b5916c15307..fd18dd07ae49 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -14,24 +14,24 @@ #include "intel_de.h" #include "intel_vga.h" -static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915) +static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display) { - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) return VLV_VGACNTRL; - else if (DISPLAY_VER(i915) >= 5) + else if (DISPLAY_VER(display) >= 5) return CPU_VGACNTRL; else return VGACNTRL; } /* Disable the VGA plane that we never use */ -void intel_vga_disable(struct drm_i915_private *dev_priv) +void intel_vga_disable(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); + i915_reg_t vga_reg = intel_vga_cntrl_reg(display); u8 sr1; - if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE) + if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE) return; /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ @@ -42,23 +42,24 @@ void intel_vga_disable(struct drm_i915_private *dev_priv) vga_put(pdev, VGA_RSRC_LEGACY_IO); udelay(300); - intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE); - intel_de_posting_read(dev_priv, vga_reg); + intel_de_write(display, vga_reg, VGA_DISP_DISABLE); + intel_de_posting_read(display, vga_reg); } -void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv) +void intel_vga_redisable_power_on(struct intel_display *display) { - i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); + i915_reg_t vga_reg = intel_vga_cntrl_reg(display); - if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) { - drm_dbg_kms(&dev_priv->drm, + if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) { + drm_dbg_kms(display->drm, "Something enabled VGA plane, disabling it\n"); - intel_vga_disable(dev_priv); + intel_vga_disable(display); } } -void intel_vga_redisable(struct drm_i915_private *i915) +void intel_vga_redisable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; /* @@ -74,14 +75,14 @@ void intel_vga_redisable(struct drm_i915_private *i915) if (!wakeref) return; - intel_vga_redisable_power_on(i915); + intel_vga_redisable_power_on(display); intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref); } -void intel_vga_reset_io_mem(struct drm_i915_private *i915) +void intel_vga_reset_io_mem(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -98,10 +99,10 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915) vga_put(pdev, VGA_RSRC_LEGACY_IO); } -int intel_vga_register(struct drm_i915_private *i915) +int intel_vga_register(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); int ret; /* @@ -119,9 +120,9 @@ int intel_vga_register(struct drm_i915_private *i915) return 0; } -void intel_vga_unregister(struct drm_i915_private *i915) +void intel_vga_unregister(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); vga_client_unregister(pdev); } diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h index ba5b55b917f0..824dfc32a199 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.h +++ b/drivers/gpu/drm/i915/display/intel_vga.h @@ -6,13 +6,13 @@ #ifndef __INTEL_VGA_H__ #define __INTEL_VGA_H__ -struct drm_i915_private; +struct intel_display; -void intel_vga_reset_io_mem(struct drm_i915_private *i915); -void intel_vga_disable(struct drm_i915_private *i915); -void intel_vga_redisable(struct drm_i915_private *i915); -void intel_vga_redisable_power_on(struct drm_i915_private *i915); -int intel_vga_register(struct drm_i915_private *i915); -void intel_vga_unregister(struct drm_i915_private *i915); +void intel_vga_reset_io_mem(struct intel_display *display); +void intel_vga_disable(struct intel_display *display); +void intel_vga_redisable(struct intel_display *display); +void intel_vga_redisable_power_on(struct intel_display *display); +int intel_vga_register(struct intel_display *display); +void intel_vga_unregister(struct intel_display *display); #endif /* __INTEL_VGA_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 9a51f5bac307..19a5d0076bb8 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -56,6 +56,11 @@ bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh) vrefresh <= info->monitor_range.max_vfreq; } +bool intel_vrr_possible(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->vrr.flipline; +} + void intel_vrr_check_modeset(struct intel_atomic_state *state) { @@ -239,11 +244,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, (crtc_state->hw.adjusted_mode.crtc_vtotal - crtc_state->hw.adjusted_mode.vsync_end); } +} + +void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + if (!intel_vrr_possible(crtc_state)) + return; - /* - * For XE_LPD+, we use guardband and pipeline override - * is deprecated. - */ if (DISPLAY_VER(display) >= 13) { crtc_state->vrr.guardband = crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; @@ -281,7 +291,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0, PIPE_VBLANK_WITH_DELAY); - if (!crtc_state->vrr.flipline) { + if (!intel_vrr_possible(crtc_state)) { intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0); return; diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 89937858200d..b3b45c675020 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -15,9 +15,11 @@ struct intel_crtc_state; bool intel_vrr_is_capable(struct intel_connector *connector); bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh); +bool intel_vrr_possible(const struct intel_crtc_state *crtc_state); void intel_vrr_check_modeset(struct intel_atomic_state *state); void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state); +void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state); void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state); void intel_vrr_enable(const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c index 82c4933ad507..d7dc49aecd27 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.c +++ b/drivers/gpu/drm/i915/display/intel_wm.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/debugfs.h> + #include "i915_drv.h" #include "i9xx_wm.h" #include "intel_display_types.h" @@ -48,29 +50,15 @@ void intel_update_watermarks(struct drm_i915_private *i915) i915->display.funcs.wm->update_wm(i915); } -int intel_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) +int intel_wm_compute(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); - - if (i915->display.funcs.wm->compute_pipe_wm) - return i915->display.funcs.wm->compute_pipe_wm(state, crtc); - - return 0; -} - -int intel_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - - if (!i915->display.funcs.wm->compute_intermediate_wm) - return 0; + struct intel_display *display = to_intel_display(state); - if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm)) + if (!display->funcs.wm->compute_watermarks) return 0; - return i915->display.funcs.wm->compute_intermediate_wm(state, crtc); + return display->funcs.wm->compute_watermarks(state, crtc); } bool intel_initial_watermarks(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h index 48429ac140d2..e97cdca89a5c 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.h +++ b/drivers/gpu/drm/i915/display/intel_wm.h @@ -15,10 +15,8 @@ struct intel_crtc_state; struct intel_plane_state; void intel_update_watermarks(struct drm_i915_private *i915); -int intel_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc); -int intel_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc); +int intel_wm_compute(struct intel_atomic_state *state, + struct intel_crtc *crtc); bool intel_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_atomic_update_watermarks(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index baa601d27815..7dbc99b02eaa 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -272,7 +272,6 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); struct drm_framebuffer *fb = plane_state->hw.fb; - int ret; bool force_detach = !fb || !plane_state->uapi.visible; bool need_scaler = false; @@ -281,72 +280,16 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) need_scaler = true; - ret = skl_update_scaler(crtc_state, force_detach, - drm_plane_index(&intel_plane->base), - &plane_state->scaler_id, - drm_rect_width(&plane_state->uapi.src) >> 16, - drm_rect_height(&plane_state->uapi.src) >> 16, - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst), - fb ? fb->format : NULL, - fb ? fb->modifier : 0, - need_scaler); - - if (ret || plane_state->scaler_id < 0) - return ret; - - /* check colorkey */ - if (plane_state->ckey.flags) { - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] scaling with color key not allowed", - intel_plane->base.base.id, - intel_plane->base.name); - return -EINVAL; - } - - /* Check src format */ - switch (fb->format->format) { - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_XYUV8888: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - break; - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - if (DISPLAY_VER(dev_priv) >= 11) - break; - fallthrough; - default: - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, intel_plane->base.name, - fb->base.id, fb->format->format); - return -EINVAL; - } - - return 0; + return skl_update_scaler(crtc_state, force_detach, + drm_plane_index(&intel_plane->base), + &plane_state->scaler_id, + drm_rect_width(&plane_state->uapi.src) >> 16, + drm_rect_height(&plane_state->uapi.src) >> 16, + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst), + fb ? fb->format : NULL, + fb ? fb->modifier : 0, + need_scaler); } static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index c8720d31d101..038ca2ec5d7a 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic_plane.h" +#include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" @@ -349,7 +350,6 @@ static int skl_plane_max_width(const struct drm_framebuffer *fb, return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: /* FIXME AUX plane? */ case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: @@ -431,6 +431,16 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb, } } +static int xe3_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 4096; + else + return 6144; +} + static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) @@ -593,11 +603,11 @@ static u32 skl_plane_min_alignment(struct intel_plane *plane, * in full-range YCbCr. */ static void -icl_program_input_csc(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, +icl_program_input_csc(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; @@ -641,31 +651,31 @@ icl_program_input_csc(struct intel_plane *plane, }; const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), - ROFF(csc[0]) | GOFF(csc[1])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), - BOFF(csc[2])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), - ROFF(csc[3]) | GOFF(csc[4])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), - BOFF(csc[5])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), - ROFF(csc[6]) | GOFF(csc[7])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), - BOFF(csc[8])); - - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), - PREOFF_YUV_TO_RGB_HI); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), - PREOFF_YUV_TO_RGB_LO); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), + ROFF(csc[0]) | GOFF(csc[1])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), + BOFF(csc[2])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), + ROFF(csc[3]) | GOFF(csc[4])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), + BOFF(csc[5])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), + ROFF(csc[6]) | GOFF(csc[7])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), + BOFF(csc[8])); + + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); } static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, @@ -719,9 +729,11 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level) return val; } -static void skl_write_plane_wm(struct intel_plane *plane, +static void skl_write_plane_wm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -733,71 +745,75 @@ static void skl_write_plane_wm(struct intel_plane *plane, int level; for (level = 0; level < i915->display.wm.num_levels; level++) - intel_de_write_fw(i915, PLANE_WM(pipe, plane_id, level), - skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); + intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level), + skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); - intel_de_write_fw(i915, PLANE_WM_TRANS(pipe, plane_id), - skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); + intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id), + skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - intel_de_write_fw(i915, PLANE_WM_SAGV(pipe, plane_id), - skl_plane_wm_reg_val(&wm->sagv.wm0)); - intel_de_write_fw(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id), - skl_plane_wm_reg_val(&wm->sagv.trans_wm)); + intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id), + skl_plane_wm_reg_val(&wm->sagv.wm0)); + intel_de_write_dsb(display, dsb, PLANE_WM_SAGV_TRANS(pipe, plane_id), + skl_plane_wm_reg_val(&wm->sagv.trans_wm)); } - intel_de_write_fw(i915, PLANE_BUF_CFG(pipe, plane_id), - skl_plane_ddb_reg_val(ddb)); + intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id), + skl_plane_ddb_reg_val(ddb)); if (DISPLAY_VER(i915) < 11) - intel_de_write_fw(i915, PLANE_NV12_BUF_CFG(pipe, plane_id), - skl_plane_ddb_reg_val(ddb_y)); + intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id), + skl_plane_ddb_reg_val(ddb_y)); } static void -skl_plane_disable_arm(struct intel_plane *plane, +skl_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } -static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane, +static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); } static void -icl_plane_disable_arm(struct intel_plane *plane, +icl_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), 0); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); - icl_plane_disable_sel_fetch_arm(plane, crtc_state); - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } static bool @@ -1234,28 +1250,30 @@ static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state) return keymsk; } -static void icl_plane_csc_load_black(struct intel_plane *plane) +static void icl_plane_csc_load_black(struct intel_dsb *dsb, + struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } static int icl_plane_color_plane(const struct intel_plane_state *plane_state) @@ -1268,11 +1286,12 @@ static int icl_plane_color_plane(const struct intel_plane_state *plane_state) } static void -skl_plane_update_noarm(struct intel_plane *plane, +skl_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 stride = skl_plane_stride(plane_state, 0); @@ -1287,21 +1306,23 @@ skl_plane_update_noarm(struct intel_plane *plane, crtc_y = 0; } - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), - PLANE_STRIDE_(stride)); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); + intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); + intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id), + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); + intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id), + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); } static void -skl_plane_update_arm(struct intel_plane *plane, +skl_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -1321,22 +1342,26 @@ skl_plane_update_arm(struct intel_plane *plane, plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id), + skl_plane_keyval(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id), + skl_plane_keymsk(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id), + skl_plane_keymax(plane_state)); - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); + intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), - skl_plane_aux_dist(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, 0)); - intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | - PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); + intel_de_write_dsb(display, dsb, PLANE_AUX_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | + PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); if (DISPLAY_VER(dev_priv) >= 10) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); /* * Enable the scaler before the plane so that we don't @@ -1353,17 +1378,19 @@ skl_plane_update_arm(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, 0)); } -static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, +static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_rect *clip; u32 val; @@ -1380,7 +1407,7 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, y = (clip->y1 + plane_state->uapi.dst.y1); val = y << 16; val |= plane_state->uapi.dst.x1; - intel_de_write_fw(i915, SEL_FETCH_PLANE_POS(pipe, plane->id), val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_POS(pipe, plane->id), val); x = plane_state->view.color_plane[color_plane].x; @@ -1395,20 +1422,21 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, val = y << 16 | x; - intel_de_write_fw(i915, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), - val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), val); /* Sizes are 0 based */ val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; - intel_de_write_fw(i915, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val); } static void -icl_plane_update_noarm(struct intel_plane *plane, +icl_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -1432,76 +1460,82 @@ icl_plane_update_noarm(struct intel_plane *plane, crtc_y = 0; } - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), - PLANE_STRIDE_(stride)); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); + intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); + intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id), + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); + intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id), + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id), + skl_plane_keyval(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id), + skl_plane_keymsk(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id), + skl_plane_keymax(plane_state)); - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); + intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { - intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), - lower_32_bits(plane_state->ccval)); - intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), - upper_32_bits(plane_state->ccval)); + intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 0), + lower_32_bits(plane_state->ccval)); + intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 1), + upper_32_bits(plane_state->ccval)); } /* FLAT CCS doesn't need to program AUX_DIST */ if (!HAS_FLAT_CCS(dev_priv) && DISPLAY_VER(dev_priv) < 20) - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), - skl_plane_aux_dist(plane_state, color_plane)); + intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, color_plane)); if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), - plane_state->cus_ctl); + intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), + plane_state->cus_ctl); - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) - icl_program_input_csc(plane, crtc_state, plane_state); + icl_program_input_csc(dsb, plane, plane_state); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); /* * FIXME: pxp session invalidation can hit any time even at time of commit * or after the commit, display content will be garbage. */ if (plane_state->force_black) - icl_plane_csc_load_black(plane); + icl_plane_csc_load_black(dsb, plane, crtc_state); - icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); + icl_plane_update_sel_fetch_noarm(dsb, plane, crtc_state, plane_state, color_plane); } -static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane, +static void icl_plane_update_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) - intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), - SEL_FETCH_PLANE_CTL_ENABLE); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), + SEL_FETCH_PLANE_CTL_ENABLE); else - icl_plane_disable_sel_fetch_arm(plane, crtc_state); + icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); } static void -icl_plane_update_arm(struct intel_plane *plane, +icl_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; int color_plane = icl_plane_color_plane(plane_state); @@ -1520,37 +1554,45 @@ icl_plane_update_arm(struct intel_plane *plane, if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); - icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state); + icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, color_plane)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, color_plane)); } static void -skl_plane_async_flip(struct intel_plane *plane, +skl_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 plane_ctl = plane_state->ctl; + u32 plane_ctl = plane_state->ctl, plane_surf; plane_ctl |= skl_plane_ctl_crtc(crtc_state); + plane_surf = skl_plane_surf(plane_state, 0); - if (async_flip) - plane_ctl |= PLANE_CTL_ASYNC_FLIP; + if (async_flip) { + if (DISPLAY_VER(display) >= 30) + plane_surf |= PLANE_SURF_ASYNC_UPDATE; + else + plane_ctl |= PLANE_CTL_ASYNC_FLIP; + } - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + plane_surf); } static bool intel_format_is_p01x(u32 format) @@ -2095,13 +2137,13 @@ static void check_protection(struct intel_plane_state *plane_state) struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); if (DISPLAY_VER(i915) < 11) return; plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0; - plane_state->force_black = i915_gem_object_is_protected(obj) && + plane_state->force_black = intel_bo_is_protected(obj) && !plane_state->decrypt; } @@ -2313,8 +2355,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) +static bool icl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); @@ -2326,9 +2368,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; + case DRM_FORMAT_RGB565: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2338,20 +2385,69 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: - if (intel_fb_is_mc_ccs_modifier(modifier)) + case DRM_FORMAT_XVYU2101010: + if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; fallthrough; - case DRM_FORMAT_RGB565: + case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED || + modifier == I915_FORMAT_MOD_Y_TILED) + return true; + fallthrough; + default: + return false; + } +} + +static bool tgl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + struct intel_plane *plane = to_intel_plane(_plane); + + if (!intel_fb_plane_supports_modifier(plane, modifier)) + return false; + + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: + if (intel_fb_is_ccs_modifier(modifier)) + return true; + fallthrough; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + if (intel_fb_is_mc_ccs_modifier(modifier)) + return true; + fallthrough; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_C8: case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: @@ -2374,13 +2470,22 @@ static const struct drm_plane_funcs skl_plane_funcs = { .format_mod_supported = skl_plane_format_mod_supported, }; -static const struct drm_plane_funcs gen12_plane_funcs = { +static const struct drm_plane_funcs icl_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = gen12_plane_format_mod_supported, + .format_mod_supported = icl_plane_format_mod_supported, +}; + +static const struct drm_plane_funcs tgl_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = tgl_plane_format_mod_supported, }; static void @@ -2422,8 +2527,8 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, (plane_id == PLANE_1 || plane_id == PLANE_2); } -static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, - enum plane_id plane_id) +static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915, + enum plane_id plane_id) { if (DISPLAY_VER(i915) < 12) return false; @@ -2461,7 +2566,7 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915, caps |= INTEL_PLANE_CAP_CCS_RC_CC; } - if (gen12_plane_has_mc_ccs(i915, plane_id)) + if (tgl_plane_has_mc_ccs(i915, plane_id)) caps |= INTEL_PLANE_CAP_CCS_MC; if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915)) @@ -2494,7 +2599,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane); - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(dev_priv) >= 30) { + plane->max_width = xe3_plane_max_width; + plane->max_height = icl_plane_max_height; + plane->min_cdclk = icl_plane_min_cdclk; + } else if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; if (icl_is_hdr_plane(dev_priv, plane_id)) plane->max_width = icl_hdr_plane_max_width; @@ -2552,7 +2661,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane_id, &num_formats); if (DISPLAY_VER(dev_priv) >= 12) - plane_funcs = &gen12_plane_funcs; + plane_funcs = &tgl_plane_funcs; + else if (DISPLAY_VER(dev_priv) == 11) + plane_funcs = &icl_plane_funcs; else plane_funcs = &skl_plane_funcs; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h index 4ddcd7d46bbd..ff31a00d511e 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h +++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h @@ -159,6 +159,7 @@ _PLANE_SURF_2_A, _PLANE_SURF_2_B) #define PLANE_SURF_ADDR_MASK REG_GENMASK(31, 12) #define PLANE_SURF_DECRYPT REG_BIT(2) +#define PLANE_SURF_ASYNC_UPDATE REG_BIT(0) #define _PLANE_KEYMAX_1_A 0x701a0 #define _PLANE_KEYMAX_2_A 0x702a0 diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 045c7cac166b..3b0e87edbacf 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -3,6 +3,8 @@ * Copyright © 2022 Intel Corporation */ +#include <linux/debugfs.h> + #include <drm/drm_blend.h> #include "i915_drv.h" @@ -716,7 +718,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane); + int color_plane, unsigned int pan_x); static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, struct intel_plane *plane, @@ -763,7 +765,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, drm_format_info(DRM_FORMAT_ARGB8888), DRM_FORMAT_MOD_LINEAR, DRM_MODE_ROTATE_0, - crtc_state->pixel_rate, &wp, 0); + crtc_state->pixel_rate, &wp, 0, 0); drm_WARN_ON(&i915->drm, ret); for (level = 0; level < i915->display.wm.num_levels; level++) { @@ -1740,7 +1742,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane) + int color_plane, unsigned int pan_x) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -1801,7 +1803,9 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->y_min_scanlines, wp->dbuf_block_size); - if (DISPLAY_VER(i915) >= 10) + if (DISPLAY_VER(i915) >= 30) + interm_pbpl += (pan_x != 0); + else if (DISPLAY_VER(i915) >= 10) interm_pbpl++; wp->plane_blocks_per_line = div_fixed16(interm_pbpl, @@ -1843,7 +1847,8 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, fb->format, fb->modifier, plane_state->hw.rotation, intel_plane_pixel_rate(crtc_state, plane_state), - wp, color_plane); + wp, color_plane, + plane_state->uapi.src.x1); } static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) @@ -1907,7 +1912,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } } - blocks = fixed16_to_u32_round_up(selected_result) + 1; + blocks = fixed16_to_u32_round_up(selected_result); + if (DISPLAY_VER(i915) < 30) + blocks++; + /* * Lets have blocks at minimum equivalent to plane_blocks_per_line * as there will be at minimum one line for lines configuration. This @@ -2971,6 +2979,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, static void skl_wm_get_hw_state(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc *crtc; @@ -2978,7 +2987,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915) if (HAS_MBUS_JOINING(i915)) dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN; - dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw); + dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw); for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = @@ -3524,7 +3533,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state) for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) { u32 pipe_val = val; - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { + if (DISPLAY_VERx100(i915) == 1400) { if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, new_dbuf_state->active_pipes)) pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL; @@ -3598,6 +3607,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, const struct intel_dbuf_state *dbuf_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe = ffs(dbuf_state->active_pipes) - 1; const struct intel_crtc_state *new_crtc_state; @@ -3606,7 +3616,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus); drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes)); - crtc = intel_crtc_for_pipe(i915, pipe); + crtc = intel_crtc_for_pipe(display, pipe); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state)) @@ -3668,7 +3678,7 @@ void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state) void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_dbuf_state *new_dbuf_state = intel_atomic_get_new_dbuf_state(state); const struct intel_dbuf_state *old_dbuf_state = @@ -3687,7 +3697,7 @@ void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state) intel_dbuf_mbus_join_update(state, pipe); if (pipe != INVALID_PIPE) { - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); intel_crtc_wait_for_next_vblank(crtc); } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index 78b121941237..e73baec94873 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -73,9 +73,9 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state); container_of_const((global_state), struct intel_dbuf_state, base) #define intel_atomic_get_old_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) + to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) #define intel_atomic_get_new_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) + to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) int intel_dbuf_init(struct drm_i915_private *i915); int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index d21f3fb39706..9383eedee2d4 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -30,6 +30,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -43,6 +44,7 @@ #include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "skl_scaler.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" @@ -1071,7 +1073,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port)); hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port)); - /* harizontal values are in terms of high speed byte clock */ + /* horizontal values are in terms of high speed byte clock */ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio); hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 70c5a13a3c75..59a50647f2c3 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -592,15 +592,16 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) static void assert_dsi_pll(struct drm_i915_private *i915, bool state) { + struct intel_display *display = &i915->display; bool cur_state; vlv_cck_get(i915); cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN; vlv_cck_put(i915); - I915_STATE_WARN(i915, cur_state != state, - "DSI PLL state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "DSI PLL state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_dsi_pll_enabled(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index a3b83cfe1726..f151640c1d13 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -915,7 +915,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) */ if (i915_gem_context_uses_protected_content(eb->gem_context) && i915_gem_object_is_protected(obj)) { - err = intel_pxp_key_check(eb->i915->pxp, obj, true); + err = intel_pxp_key_check(eb->i915->pxp, intel_bo_to_drm_bo(obj), true); if (err) { i915_gem_object_put(obj); return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 3b27218aabe2..900c08337942 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -13,7 +13,7 @@ #include "i915_driver.h" #include "i915_drv.h" -#if defined(CONFIG_X86) +#if IS_ENABLED(CONFIG_X86) #include <asm/smp.h> #else #define wbinvd_on_all_cpus() \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index d166052eb2ce..9117e9422844 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -117,7 +117,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, }, { NULL, 0 }, }, *phase; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; unsigned long count = 0; unsigned long scanned = 0; int err = 0, i = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index d29005980806..9d958a6f377e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -457,7 +457,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915) icl_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 8) { - if (IS_LP(i915)) + if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915)) chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index b22e2019768f..10d8673641f7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -808,7 +808,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, } if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { - ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; @@ -1038,7 +1038,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; vm_fault_t ret; int idx; @@ -1195,7 +1195,7 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; assert_object_held_shared(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 03b00a03a634..041dab543b78 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -624,7 +624,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, /* Populate ttm with pages if needed. Typically system memory. */ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { - ret = ttm_tt_populate(bo->bdev, ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index ad649523d5e0..61596cecce4d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -90,7 +90,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, goto out_no_lock; backup_bo = i915_gem_to_ttm(backup); - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + err = ttm_bo_populate(backup_bo, &ctx); if (err) goto out_no_populate; @@ -189,7 +189,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, if (!backup_bo->resource) err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); if (!err) - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + err = ttm_bo_populate(backup_bo, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 8fe0499308ff..4904d0f4162c 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -169,7 +169,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, return cs; } -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs) { return __gen2_emit_breadcrumb(rq, cs, 16, 8); } @@ -248,7 +248,7 @@ int i830_emit_bb_start(struct i915_request *rq, return 0; } -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { @@ -292,29 +292,12 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask &= ~engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); - ENGINE_POSTING_READ16(engine, RING_IMR); -} - -void gen2_irq_disable(struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask |= engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); -} - -void gen3_irq_enable(struct intel_engine_cs *engine) -{ engine->i915->irq_mask &= ~engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); } -void gen3_irq_disable(struct intel_engine_cs *engine) +void gen2_irq_disable(struct intel_engine_cs *engine) { engine->i915->irq_mask |= engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h index a5cd64a65c9e..7b37560fc356 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h @@ -15,13 +15,13 @@ int gen2_emit_flush(struct i915_request *rq, u32 mode); int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode); int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode); -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs); +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs); u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs); int i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); int gen4_emit_bb_start(struct i915_request *rq, @@ -30,8 +30,6 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine); void gen2_irq_disable(struct intel_engine_cs *engine); -void gen3_irq_enable(struct intel_engine_cs *engine); -void gen3_irq_disable(struct intel_engine_cs *engine); void gen5_irq_enable(struct intel_engine_cs *engine); void gen5_irq_disable(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index d38b914d1206..6e89112f68ae 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -399,7 +399,8 @@ static void emit_batch(struct i915_vma * const vma, batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); batch_add(&cmds, 0xffff0000 | - ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ? + (((IS_IVYBRIDGE(i915) && INTEL_INFO(i915)->gt == 1) || + IS_VALLEYVIEW(i915)) ? HIZ_RAW_STALL_OPT_DISABLE : 0)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 20b9b04ec1e0..cc866773ba6f 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -70,7 +70,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) if (!--b->irq_enabled) b->irq_disable(b); - WRITE_ONCE(b->irq_armed, 0); + WRITE_ONCE(b->irq_armed, NULL); intel_gt_pm_put_async(b->irq_engine->gt, wakeref); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index a8eac59e3779..1c4784cb296c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -15,6 +15,7 @@ #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC +#define HEAD_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_START(base) _MMIO((base) + 0x38) #define RING_CTL(base) _MMIO((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ @@ -26,7 +27,6 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ #define RING_SYNC_0(base) _MMIO((base) + 0x40) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index bb29f361110e..c4a351ebf395 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -185,7 +185,7 @@ int intel_gt_init_hw(struct intel_gt *gt) if (IS_HASWELL(i915)) intel_uncore_write(uncore, HSW_MI_PREDICATE_RESULT_2, - IS_HASWELL_GT3(i915) ? + INTEL_INFO(i915)->gt == 3 ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index ad4c51f18d3a..1240d44eeb85 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -452,10 +452,10 @@ void gen8_gt_irq_reset(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - GEN8_IRQ_RESET_NDX(uncore, GT, 0); - GEN8_IRQ_RESET_NDX(uncore, GT, 1); - GEN8_IRQ_RESET_NDX(uncore, GT, 2); - GEN8_IRQ_RESET_NDX(uncore, GT, 3); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(0)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(1)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(2)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(3)); } void gen8_gt_irq_postinstall(struct intel_gt *gt) @@ -476,14 +476,14 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt) gt->pm_ier = 0x0; gt->pm_imr = ~gt->pm_ier; - GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); - GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(0), ~gt_interrupts[0], gt_interrupts[0]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]); /* * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. Same wil be the case for GuC interrupts. */ - GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); - GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]); } static void gen5_gt_update_irq(struct intel_gt *gt, @@ -514,9 +514,9 @@ void gen5_gt_irq_reset(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - GEN3_IRQ_RESET(uncore, GT); + gen2_irq_reset(uncore, GT_IRQ_REGS); if (GRAPHICS_VER(gt->i915) >= 6) - GEN3_IRQ_RESET(uncore, GEN6_PM); + gen2_irq_reset(uncore, GEN6_PM_IRQ_REGS); } void gen5_gt_irq_postinstall(struct intel_gt *gt) @@ -538,7 +538,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) else gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; - GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); + gen2_irq_init(uncore, GT_IRQ_REGS, gt->gt_imr, gt_irqs); if (GRAPHICS_VER(gt->i915) >= 6) { /* @@ -551,6 +551,6 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) } gt->pm_imr = 0xffffffff; - GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); + gen2_irq_init(uncore, GEN6_PM_IRQ_REGS, gt->pm_imr, pm_irqs); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 911fd0160221..6f25c747bc29 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -35,7 +35,7 @@ static inline void __intel_gt_pm_get(struct intel_gt *gt) static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt) { if (!intel_wakeref_get_if_active(>->wakeref)) - return 0; + return NULL; return intel_wakeref_track(>->wakeref); } @@ -73,7 +73,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha } #define with_intel_gt_pm(gt, wf) \ - for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0) + for ((wf) = intel_gt_pm_get(gt); (wf); intel_gt_pm_put((gt), (wf)), (wf) = NULL) /** * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent @@ -84,7 +84,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha * @wf: pointer to a temporary wakeref. */ #define with_intel_gt_pm_if_awake(gt, wf) \ - for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0) + for ((wf) = intel_gt_pm_get_if_awake(gt); (wf); intel_gt_pm_put_async((gt), (wf)), (wf) = NULL) static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { @@ -105,9 +105,13 @@ int intel_gt_runtime_resume(struct intel_gt *gt); ktime_t intel_gt_get_awake_time(const struct intel_gt *gt); +#define INTEL_WAKEREF_MOCK_GT ERR_PTR(-ENODEV) + static inline bool is_mock_gt(const struct intel_gt *gt) { - return I915_SELFTEST_ONLY(gt->awake == -ENODEV); + BUILD_BUG_ON(INTEL_WAKEREF_DEF == INTEL_WAKEREF_MOCK_GT); + + return I915_SELFTEST_ONLY(gt->awake == INTEL_WAKEREF_MOCK_GT); } #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 8d08b38874ef..b635aa2820d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -431,7 +431,7 @@ static int llc_show(struct seq_file *m, void *data) max_gpu_freq /= GEN9_FREQ_SCALER; } - seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + seq_puts(m, "GPU freq (MHz)\tEffective GPU freq (MHz)\tEffective Ring freq (MHz)\n"); wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 57a3c83d3655..6dba65e54cdb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -432,6 +432,7 @@ #define XEHPG_INSTDONE_GEOM_SVG MCR_REG(0x666c) #define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ +#define DISABLE_REPACKING_FOR_COMPRESSION REG_BIT(15) /* jsl+ */ #define RC_OP_FLUSH_ENABLE (1 << 0) #define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) #define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ @@ -1472,6 +1473,10 @@ GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) +#define GEN6_PM_IRQ_REGS I915_IRQ_REGS(GEN6_PMIMR, \ + GEN6_PMIER, \ + GEN6_PMIIR) + #define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4) #define GEN7_GT_SCRATCH_REG_NUM 8 diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7bd5d2c29056..51847a846002 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -820,8 +820,10 @@ static bool ctx_needs_runalone(const struct intel_context *ce) bool ctx_is_protected = false; /* - * On MTL and newer platforms, protected contexts require setting - * the LRC run-alone bit or else the encryption will not happen. + * Wa_14019159160 - Case 2. + * On some platforms, protected contexts require setting + * the LRC run-alone bit or else the encryption/decryption will not happen. + * NOTE: Case 2 only applies to PXP use-case of said workaround. */ if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) && (ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) { @@ -850,6 +852,7 @@ static void init_common_regs(u32 * const regs, if (GRAPHICS_VER(engine->i915) < 11) ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); + /* Wa_14019159160 - Case 2.*/ if (ctx_needs_runalone(ce)) ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE); regs[CTX_CONTEXT_CONTROL] = ctl; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8f1ea95471ef..f42f21632306 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1233,7 +1233,7 @@ void intel_gt_reset(struct intel_gt *gt, } if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - intel_runtime_pm_disable_interrupts(gt->i915); + intel_irq_suspend(gt->i915); if (do_reset(gt, stalled_mask)) { gt_err(gt, "Failed to reset chip\n"); @@ -1241,7 +1241,7 @@ void intel_gt_reset(struct intel_gt *gt, } if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - intel_runtime_pm_enable_interrupts(gt->i915); + intel_irq_resume(gt->i915); intel_overlay_reset(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 72277bc8322e..32f3b52a183a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -192,6 +192,7 @@ static bool stop_ring(struct intel_engine_cs *engine) static int xcs_resume(struct intel_engine_cs *engine) { struct intel_ring *ring = engine->legacy.ring; + ktime_t kt; ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", ring->head, ring->tail); @@ -230,9 +231,27 @@ static int xcs_resume(struct intel_engine_cs *engine) set_pp_dir(engine); /* First wake the ring up to an empty/idle ring */ - ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC); + ktime_before(ktime_get(), (kt)); cpu_relax()) { + /* + * In case of resets fails because engine resumes from + * incorrect RING_HEAD and then GPU may be then fed + * to invalid instrcutions, which may lead to unrecoverable + * hang. So at first write doesn't succeed then try again. + */ + ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + if (ENGINE_READ_FW(engine, RING_HEAD) == ring->head) + break; + } + ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); - ENGINE_POSTING_READ(engine, RING_TAIL); + if (ENGINE_READ_FW(engine, RING_HEAD) != ENGINE_READ_FW(engine, RING_TAIL)) { + ENGINE_TRACE(engine, "failed to reset empty ring: [%x, %x]: %x\n", + ENGINE_READ_FW(engine, RING_HEAD), + ENGINE_READ_FW(engine, RING_TAIL), + ring->head); + goto err; + } ENGINE_WRITE_FW(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); @@ -241,12 +260,16 @@ static int xcs_resume(struct intel_engine_cs *engine) if (__intel_wait_for_register_fw(engine->uncore, RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, - 5000, 0, NULL)) + 5000, 0, NULL)) { + ENGINE_TRACE(engine, "failed to restart\n"); goto err; + } - if (GRAPHICS_VER(engine->i915) > 2) + if (GRAPHICS_VER(engine->i915) > 2) { ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_POSTING_READ(engine, RING_MI_MODE); + } /* Now awake, let it get started */ if (ring->tail != ring->head) { @@ -1090,9 +1113,6 @@ static void setup_irq(struct intel_engine_cs *engine) } else if (GRAPHICS_VER(i915) >= 5) { engine->irq_enable = gen5_irq_enable; engine->irq_disable = gen5_irq_disable; - } else if (GRAPHICS_VER(i915) >= 3) { - engine->irq_enable = gen3_irq_enable; - engine->irq_disable = gen3_irq_disable; } else { engine->irq_enable = gen2_irq_enable; engine->irq_disable = gen2_irq_disable; @@ -1146,7 +1166,7 @@ static void setup_common(struct intel_engine_cs *engine) * equivalent to our next initial bread so we can elide * engine->emit_init_breadcrumb(). */ - engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen2_emit_breadcrumb; if (GRAPHICS_VER(i915) == 5) engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; @@ -1159,7 +1179,7 @@ static void setup_common(struct intel_engine_cs *engine) else if (IS_I830(i915) || IS_I845G(i915)) engine->emit_bb_start = i830_emit_bb_start; else - engine->emit_bb_start = gen3_emit_bb_start; + engine->emit_bb_start = gen2_emit_bb_start; } static void setup_rcs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c index 756e9ebbc725..2487768bc230 100644 --- a/drivers/gpu/drm/i915/gt/intel_tlb.c +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c @@ -122,7 +122,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) { intel_wakeref_t wakeref; - if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + if (is_mock_gt(gt)) return; if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e539a656cfc3..570c91878189 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -418,7 +418,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0)); } static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -2299,6 +2299,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } + if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { + /* + * "Disable Repacking for Compression (masked R/W access) + * before rendering compressed surfaces for display." + */ + wa_masked_en(wal, CACHE_MODE_0_GEN7, + DISABLE_REPACKING_FOR_COMPRESSION); + } + if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, @@ -2537,7 +2546,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_FF_DS_SCHED_HW); /* WaDisablePSDDualDispatchEnable:ivb */ - if (IS_IVB_GT1(i915)) + if (INTEL_INFO(i915)->gt == 1) wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1, GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 097fc6bd1285..5949ff0b0161 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -239,8 +239,16 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc) static u32 guc_ctl_feature_flags(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); u32 flags = 0; + /* + * Enable PXP GuC autoteardown flow. + * NB: MTL does things differently. + */ + if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915)) + flags |= GUC_CTL_ENABLE_GUC_PXP_CTL; + if (!intel_guc_submission_is_used(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 23f54c84cbab..fe53e8eccf4b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -145,7 +145,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool * an end user should hit the timeout is in case of extreme thermal throttling. * And a system that is that hot during boot is probably dead anyway! */ -#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define GUC_LOAD_RETRY_LIMIT 20 #else #define GUC_LOAD_RETRY_LIMIT 3 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 263c9c3f6a03..4ce6e2332a63 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -105,6 +105,7 @@ #define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 +#define GUC_CTL_ENABLE_GUC_PXP_CTL BIT(1) #define GUC_CTL_ENABLE_SLPC BIT(2) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index bf16351c9349..222c95f62156 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -14,7 +14,7 @@ #include "intel_guc_log.h" #include "intel_guc_print.h" -#if defined(CONFIG_DRM_I915_DEBUG_GUC) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index ed979847187f..9ede6f240d79 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1339,7 +1339,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake. */ - wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt); + wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt); if (wakeref) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 2d9152eb7282..d7ac31c3254c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -455,7 +455,7 @@ static const char *auth_mode_string(struct intel_huc *huc, * an end user should hit the timeout is in case of extreme thermal throttling. * And a system that is that hot during boot is probably dead anyway! */ -#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define HUC_LOAD_RETRY_LIMIT 20 #else #define HUC_LOAD_RETRY_LIMIT 3 diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2f4c9c66b40b..81d67a46cd9e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -50,7 +50,6 @@ #include "trace.h" #include "display/i9xx_plane_regs.h" -#include "display/intel_display.h" #include "display/intel_sprite_regs.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index c66d6d3177c8..17f74cb244bb 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -32,6 +32,8 @@ * */ +#include <drm/display/drm_dp.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -568,7 +570,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + port->dpcd->data[DP_SINK_COUNT] = 0x1; port->type = type; port->id = resolution; port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC; diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index f5616f99ef2f..8090bc53c7e1 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -59,52 +59,10 @@ struct intel_vgpu; #define INTEL_GVT_MAX_UEVENT_VARS 3 -/* DPCD start */ -#define DPCD_SIZE 0x700 - -/* DPCD */ -#define DP_SET_POWER 0x600 -#define DP_SET_POWER_D0 0x1 -#define AUX_NATIVE_WRITE 0x8 -#define AUX_NATIVE_READ 0x9 - -#define AUX_NATIVE_REPLY_MASK (0x3 << 4) -#define AUX_NATIVE_REPLY_ACK (0x0 << 4) #define AUX_NATIVE_REPLY_NAK (0x1 << 4) -#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) #define AUX_BURST_SIZE 20 -/* DPCD addresses */ -#define DPCD_REV 0x000 -#define DPCD_MAX_LINK_RATE 0x001 -#define DPCD_MAX_LANE_COUNT 0x002 - -#define DPCD_TRAINING_PATTERN_SET 0x102 -#define DPCD_SINK_COUNT 0x200 -#define DPCD_LANE0_1_STATUS 0x202 -#define DPCD_LANE2_3_STATUS 0x203 -#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204 -#define DPCD_SINK_STATUS 0x205 - -/* link training */ -#define DPCD_TRAINING_PATTERN_SET_MASK 0x03 -#define DPCD_LINK_TRAINING_DISABLED 0x00 -#define DPCD_TRAINING_PATTERN_1 0x01 -#define DPCD_TRAINING_PATTERN_2 0x02 - -#define DPCD_CP_READY_MASK (1 << 6) - -/* lane status */ -#define DPCD_LANES_CR_DONE 0x11 -#define DPCD_LANES_EQ_DONE 0x22 -#define DPCD_SYMBOL_LOCKED 0x44 - -#define DPCD_INTERLANE_ALIGN_DONE 0x01 - -#define DPCD_SINK_IN_SYNC 0x03 -/* DPCD end */ - #define SBI_RESPONSE_MASK 0x3 #define SBI_RESPONSE_SHIFT 0x1 #define SBI_STAT_MASK 0x1 diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index c022dc736045..0a357ca42db1 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -32,6 +32,8 @@ * */ +#include <drm/display/drm_dp.h> + #include "display/intel_dp_aux_regs.h" #include "display/intel_gmbus_regs.h" #include "gvt.h" @@ -504,13 +506,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } /* Always set the wanted value for vms. */ - ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1); + ret_msg_size = (((op & 0x1) == DP_AUX_I2C_READ) ? 2 : 1); vgpu_vreg(vgpu, offset) = DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size); if (msg_length == 3) { - if (!(op & GVT_AUX_I2C_MOT)) { + if (!(op & DP_AUX_I2C_MOT)) { /* stop */ intel_vgpu_init_i2c_edid(vgpu); } else { @@ -530,7 +532,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, i2c_edid->edid_available = true; } } - } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { + } else if ((op & 0x1) == DP_AUX_I2C_WRITE) { /* TODO * We only support EDID reading from I2C_over_AUX. And * we do not expect the index mode to be used. Right now @@ -538,7 +540,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * support the gfx driver to do EDID access. */ } else { - if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ)) + if (drm_WARN_ON(&i915->drm, (op & 0x1) != DP_AUX_I2C_READ)) return; if (drm_WARN_ON(&i915->drm, msg_length != 4)) return; @@ -553,7 +555,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * ACK of I2C_WRITE * returned byte if it is READ */ - aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24; + aux_data_for_write |= DP_AUX_I2C_REPLY_ACK << 24; vgpu_vreg(vgpu, offset + 4) = aux_data_for_write; } diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index c3b5a55aecb3..13fd06590929 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -42,14 +42,6 @@ struct intel_vgpu; #define EDID_SIZE 128 #define EDID_ADDR 0x50 /* Linux hvm EDID addr */ -#define GVT_AUX_NATIVE_WRITE 0x8 -#define GVT_AUX_NATIVE_READ 0x9 -#define GVT_AUX_I2C_WRITE 0x0 -#define GVT_AUX_I2C_READ 0x1 -#define GVT_AUX_I2C_STATUS 0x2 -#define GVT_AUX_I2C_MOT 0x4 -#define GVT_AUX_I2C_REPLY_ACK 0x0 - struct intel_vgpu_edid_data { bool data_valid; unsigned char edid_block[EDID_SIZE]; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 58cca4906f41..1bce1493b86f 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1190,7 +1190,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ppgtt_set_shadow_entry(spt, se, index); return 0; err: - /* Cancel the existing addess mappings of DMA addr. */ + /* Cancel the existing address mappings of DMA addr. */ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(sub_spt, &sub_se); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0f09344d3c20..9494d812c00a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -36,6 +36,8 @@ */ +#include <drm/display/drm_dp.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -1129,29 +1131,36 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value, static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, u8 t) { - if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { + if ((t & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_1) { /* training pattern 1 for CR */ /* set LANE0_CR_DONE, LANE1_CR_DONE */ - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CR_DONE | + DP_LANE_CR_DONE << 4; /* set LANE2_CR_DONE, LANE3_CR_DONE */ - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE; - } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == - DPCD_TRAINING_PATTERN_2) { + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CR_DONE | + DP_LANE_CR_DONE << 4; + } else if ((t & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_2) { /* training pattern 2 for EQ */ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */ - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE; - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CHANNEL_EQ_DONE | + DP_LANE_CHANNEL_EQ_DONE << 4; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_SYMBOL_LOCKED | + DP_LANE_SYMBOL_LOCKED << 4; /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */ - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE; - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED; + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CHANNEL_EQ_DONE | + DP_LANE_CHANNEL_EQ_DONE << 4; + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_SYMBOL_LOCKED | + DP_LANE_SYMBOL_LOCKED << 4; /* set INTERLANE_ALIGN_DONE */ - dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |= - DPCD_INTERLANE_ALIGN_DONE; - } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == - DPCD_LINK_TRAINING_DISABLED) { + dpcd->data[DP_LANE_ALIGN_STATUS_UPDATED] |= + DP_INTERLANE_ALIGN_DONE; + } else if ((t & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_DISABLE) { /* finish link training */ /* set sink status as synchronized */ - dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC; + dpcd->data[DP_SINK_STATUS] = DP_RECEIVE_PORT_0_STATUS | + DP_RECEIVE_PORT_1_STATUS; } } @@ -1206,7 +1215,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, len = msg & 0xff; op = ctrl >> 4; - if (op == GVT_AUX_NATIVE_WRITE) { + if (op == DP_AUX_NATIVE_WRITE) { int t; u8 buf[16]; @@ -1252,7 +1261,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, dpcd->data[p] = buf[t]; /* check for link training */ - if (p == DPCD_TRAINING_PATTERN_SET) + if (p == DP_TRAINING_PATTERN_SET) dp_aux_ch_ctl_link_training(dpcd, buf[t]); } @@ -1265,7 +1274,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, return 0; } - if (op == GVT_AUX_NATIVE_READ) { + if (op == DP_AUX_NATIVE_READ) { int idx, i, ret = 0; if ((addr + len + 1) >= DPCD_SIZE) { diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 908f910420c2..509f9ccae3a9 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -439,7 +439,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) gvt_vgpu_err("requesting SMI service\n"); return 0; } - /* ignore non 0->1 trasitions */ + /* ignore non 0->1 transitions */ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI] & SWSCI_SCI_TRIGGER) || !(swsci & SWSCI_SCI_TRIGGER)) { diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 60a65435556d..20c3cd807488 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -167,7 +167,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, return -ENXIO; if (unlikely(vgpu->failsafe)) { - /* Remove write protection to prevent furture traps. */ + /* Remove write protection to prevent future traps. */ intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a5c8005ec484..23f2cc397ec9 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1052,7 +1052,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; - /* free the unsubmited workloads in the queues. */ + /* free the unsubmitted workloads in the queues. */ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 5ec293011d99..35319228bc51 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -212,7 +212,7 @@ active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) struct i915_active_fence *active = container_of(cb, typeof(*active), cb); - return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; + return try_cmpxchg(__active_fence_slot(active), &fence, NULL); } static void diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f969f585d07b..1c2a97f593c7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -33,8 +33,6 @@ #include <linux/debugfs.h> #include <drm/drm_debugfs.h> -#include "display/intel_display_params.h" - #include "gem/i915_gem_context.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" @@ -66,7 +64,6 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) static int i915_capabilities(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_display *display = &i915->display; struct drm_printer p = drm_seq_file_printer(m); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); @@ -76,10 +73,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); - kernel_param_lock(THIS_MODULE); i915_params_dump(&i915->params, &p); - intel_display_params_dump(display, &p); - kernel_param_unlock(THIS_MODULE); return 0; } diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index a40f05b993da..365329ff8a07 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -27,6 +27,7 @@ * */ +#include <linux/aperture.h> #include <linux/acpi.h> #include <linux/device.h> #include <linux/module.h> @@ -39,7 +40,6 @@ #include <linux/vga_switcheroo.h> #include <linux/vt.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> @@ -48,8 +48,8 @@ #include "display/intel_acpi.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" +#include "display/intel_crtc.h" #include "display/intel_display_driver.h" -#include "display/intel_display.h" #include "display/intel_dmc.h" #include "display/intel_dp.h" #include "display/intel_dpt.h" @@ -59,7 +59,7 @@ #include "display/intel_overlay.h" #include "display/intel_pch_refclk.h" #include "display/intel_pps.h" -#include "display/intel_sprite.h" +#include "display/intel_sprite_uapi.h" #include "display/skl_watermark.h" #include "gem/i915_gem_context.h" @@ -485,7 +485,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_perf; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); + ret = aperture_remove_conflicting_pci_devices(pdev, dev_priv->drm.driver->name); if (ret) goto err_ggtt; @@ -950,7 +950,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_dp_mst_suspend(i915); - intel_runtime_pm_disable_interrupts(i915); + intel_irq_suspend(i915); intel_hpd_cancel_work(i915); if (HAS_DISPLAY(i915)) @@ -959,7 +959,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_encoder_suspend_all(&i915->display); intel_encoder_shutdown_all(&i915->display); - intel_dmc_suspend(i915); + intel_dmc_suspend(&i915->display); i915_gem_suspend(i915); @@ -1035,7 +1035,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_dp_mst_suspend(dev_priv); - intel_runtime_pm_disable_interrupts(dev_priv); + intel_irq_suspend(dev_priv); intel_hpd_cancel_work(dev_priv); if (HAS_DISPLAY(dev_priv)) @@ -1054,7 +1054,7 @@ static int i915_drm_suspend(struct drm_device *dev) dev_priv->suspend_count++; - intel_dmc_suspend(dev_priv); + intel_dmc_suspend(display); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1164,7 +1164,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Must be called after GGTT is resumed. */ intel_dpt_resume(dev_priv); - intel_dmc_resume(dev_priv); + intel_dmc_resume(display); i915_restore_display(dev_priv); intel_pps_unlock_regs_wa(display); @@ -1181,7 +1181,7 @@ static int i915_drm_resume(struct drm_device *dev) * Modeset enabling in intel_display_driver_init_hw() also needs working * interrupts. */ - intel_runtime_pm_enable_interrupts(dev_priv); + intel_irq_resume(dev_priv); if (HAS_DISPLAY(dev_priv)) drm_mode_config_reset(dev); @@ -1481,7 +1481,7 @@ static int intel_runtime_suspend(struct device *kdev) for_each_gt(gt, dev_priv, i) intel_gt_runtime_suspend(gt); - intel_runtime_pm_disable_interrupts(dev_priv); + intel_irq_suspend(dev_priv); for_each_gt(gt, dev_priv, i) intel_uncore_suspend(gt->uncore); @@ -1494,7 +1494,7 @@ static int intel_runtime_suspend(struct device *kdev) "Runtime suspend failed, disabling it (%d)\n", ret); intel_uncore_runtime_resume(&dev_priv->uncore); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_irq_resume(dev_priv); for_each_gt(gt, dev_priv, i) intel_gt_runtime_resume(gt); @@ -1587,7 +1587,7 @@ static int intel_runtime_resume(struct device *kdev) for_each_gt(gt, dev_priv, i) intel_uncore_runtime_resume(gt->uncore); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_irq_resume(dev_priv); /* * No point of rolling back things in case of an error, as the best @@ -1725,7 +1725,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_crtc_get_pipe_from_crtc_id_ioctl, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aa0b1bfb38e0..7b1a061d92fb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -234,6 +234,7 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; + bool irqs_enabled; /* Sideband mailbox protection */ struct mutex sb_lock; @@ -343,8 +344,6 @@ struct drm_i915_private { struct intel_pxp *pxp; - bool irq_enabled; - struct i915_pmu pmu; /* The TTM device structure. */ @@ -508,8 +507,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915)) #define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE) #define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE) -#define IS_IVB_GT1(i915) (IS_IVYBRIDGE(i915) && \ - INTEL_INFO(i915)->gt == 1) #define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW) #define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW) #define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL) @@ -539,6 +536,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, */ #define IS_LUNARLAKE(i915) (0 && i915) #define IS_BATTLEMAGE(i915) (0 && i915) +#define IS_PANTHERLAKE(i915) (0 && i915) #define IS_ARROWLAKE_H(i915) \ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H) @@ -566,14 +564,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) #define IS_BROADWELL_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) -#define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \ - INTEL_INFO(i915)->gt == 3) #define IS_HASWELL_ULT(i915) \ IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) -#define IS_HASWELL_GT3(i915) (IS_HASWELL(i915) && \ - INTEL_INFO(i915)->gt == 3) -#define IS_HASWELL_GT1(i915) (IS_HASWELL(i915) && \ - INTEL_INFO(i915)->gt == 1) /* ULX machines are also considered ULT. */ #define IS_HASWELL_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) @@ -585,31 +577,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) #define IS_KABYLAKE_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_SKYLAKE_GT2(i915) (IS_SKYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) -#define IS_SKYLAKE_GT3(i915) (IS_SKYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 3) -#define IS_SKYLAKE_GT4(i915) (IS_SKYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 4) -#define IS_KABYLAKE_GT2(i915) (IS_KABYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) -#define IS_KABYLAKE_GT3(i915) (IS_KABYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 3) #define IS_COFFEELAKE_ULT(i915) \ IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) #define IS_COFFEELAKE_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) -#define IS_COFFEELAKE_GT2(i915) (IS_COFFEELAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) -#define IS_COFFEELAKE_GT3(i915) (IS_COFFEELAKE(i915) && \ - INTEL_INFO(i915)->gt == 3) - #define IS_COMETLAKE_ULT(i915) \ IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) #define IS_COMETLAKE_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) #define IS_ICL_WITH_PORT_F(i915) \ IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) @@ -617,9 +592,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TIGERLAKE_UY(i915) \ IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) -#define IS_LP(i915) (INTEL_INFO(i915)->is_lp) -#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915)) -#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915)) +#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) +#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915)) #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) @@ -683,7 +657,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* WaRsDisableCoarsePowerGating:skl,cnl */ #define NEEDS_WaRsDisableCoarsePowerGating(i915) \ - (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915)) + (IS_SKYLAKE(i915) && (INTEL_INFO(i915)->gt == 3 || INTEL_INFO(i915)->gt == 4)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. @@ -697,6 +671,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps) +#define HAS_PXP(i915) \ + (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp) + #define HAS_HECI_PXP(i915) \ (INTEL_INFO(i915)->has_heci_pxp) @@ -744,7 +721,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* DPF == dynamic parity feature */ #define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf) -#define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \ +#define NUM_L3_SLICES(i915) (IS_HASWELL(i915) && INTEL_INFO(i915)->gt == 3 ? \ 2 : HAS_L3_DPF(i915)) #define HAS_GUC_DEPRIVILEGE(i915) \ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 6469b9bcf2ec..4eb58887819a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -40,8 +40,7 @@ #include <drm/drm_cache.h> #include <drm/drm_print.h> -#include "display/intel_dmc.h" -#include "display/intel_overlay.h" +#include "display/intel_display_snapshot.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" @@ -651,8 +650,6 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, struct drm_printer p = i915_error_printer(m); intel_device_info_print(&error->device_info, &error->runtime_info, &p); - intel_display_device_info_print(&error->display_device_info, - &error->display_runtime_info, &p); intel_driver_caps_print(&error->driver_caps, &p); } @@ -660,10 +657,8 @@ static void err_print_params(struct drm_i915_error_state_buf *m, const struct i915_params *params) { struct drm_printer p = i915_error_printer(m); - struct intel_display *display = &m->i915->display; i915_params_dump(params, &p); - intel_display_params_dump(display, &p); } static void err_print_pciid(struct drm_i915_error_state_buf *m, @@ -875,8 +870,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_printf(m, "IOMMU enabled?: %d\n", error->iommu); - intel_dmc_print_error_state(&p, m->i915); - err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock)); err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended)); @@ -905,11 +898,10 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_print_gt_info(m, error->gt); } - if (error->overlay) - intel_overlay_print_error_state(&p, error->overlay); - err_print_capabilities(m, error); err_print_params(m, &error->params); + + intel_display_snapshot_print(error->display_snapshot, &p); } static int err_print_to_sgl(struct i915_gpu_coredump *error) @@ -1032,7 +1024,6 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma) static void cleanup_params(struct i915_gpu_coredump *error) { i915_params_free(&error->params); - intel_display_params_free(&error->display_params); } static void cleanup_uc(struct intel_uc_coredump *uc) @@ -1077,7 +1068,7 @@ void __i915_gpu_coredump_free(struct kref *error_ref) cleanup_gt(gt); } - kfree(error->overlay); + intel_display_snapshot_free(error->display_snapshot); cleanup_params(error); @@ -1113,7 +1104,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, } INIT_LIST_HEAD(&dst->page_list); - strcpy(dst->name, name); + strscpy(dst->name, name); dst->next = NULL; dst->gtt_offset = vma_res->start; @@ -1413,7 +1404,7 @@ static bool record_context(struct i915_gem_context_coredump *e, rcu_read_lock(); task = pid_task(ctx->pid, PIDTYPE_PID); if (task) { - strcpy(e->comm, task->comm); + strscpy(e->comm, task->comm); e->pid = task->pid; } rcu_read_unlock(); @@ -1459,7 +1450,7 @@ capture_vma_snapshot(struct intel_engine_capture_vma *next, return next; } - strcpy(c->name, name); + strscpy(c->name, name); c->vma_res = i915_vma_resource_get(vma_res); c->next = next; @@ -1993,17 +1984,12 @@ static void capture_gen(struct i915_gpu_coredump *error) error->suspend_count = i915->suspend_count; i915_params_copy(&error->params, &i915->params); - intel_display_params_copy(&error->display_params); memcpy(&error->device_info, INTEL_INFO(i915), sizeof(error->device_info)); memcpy(&error->runtime_info, RUNTIME_INFO(i915), sizeof(error->runtime_info)); - memcpy(&error->display_device_info, DISPLAY_INFO(i915), - sizeof(error->display_device_info)); - memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915), - sizeof(error->display_runtime_info)); error->driver_caps = i915->caps; } @@ -2097,6 +2083,7 @@ static struct i915_gpu_coredump * __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) { struct drm_i915_private *i915 = gt->i915; + struct intel_display *display = &i915->display; struct i915_gpu_coredump *error; /* Check if GPU capture has been disabled */ @@ -2138,7 +2125,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du error->simulated |= error->gt->simulated; } - error->overlay = intel_overlay_capture_error_state(i915); + error->display_snapshot = intel_display_snapshot_capture(display); return error; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 7c255bb1c319..78a8928562a9 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -14,8 +14,6 @@ #include <drm/drm_mm.h> -#include "display/intel_display_device.h" -#include "display/intel_display_params.h" #include "gt/intel_engine.h" #include "gt/intel_engine_types.h" #include "gt/intel_gt_types.h" @@ -31,7 +29,7 @@ struct drm_i915_private; struct i915_vma_compress; struct intel_engine_capture_vma; -struct intel_overlay_error_state; +struct intel_display_snapshot; struct i915_vma_coredump { struct i915_vma_coredump *next; @@ -212,15 +210,12 @@ struct i915_gpu_coredump { struct intel_device_info device_info; struct intel_runtime_info runtime_info; - struct intel_display_device_info display_device_info; - struct intel_display_runtime_info display_runtime_info; struct intel_driver_caps driver_caps; struct i915_params params; - struct intel_display_params display_params; - - struct intel_overlay_error_state *overlay; struct scatterlist *sgl, *fit; + + struct intel_display_snapshot *display_snapshot; }; struct i915_gpu_error { diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index 17d30f6b84b0..7dfe1784153f 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -7,6 +7,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/types.h> +#include <linux/units.h> #include "i915_drv.h" #include "i915_hwmon.h" @@ -32,6 +33,7 @@ struct hwm_reg { i915_reg_t gt_perf_status; + i915_reg_t pkg_temp; i915_reg_t pkg_power_sku_unit; i915_reg_t pkg_power_sku; i915_reg_t pkg_rapl_limit; @@ -280,6 +282,7 @@ static const struct attribute_group *hwm_groups[] = { }; static const struct hwmon_channel_info * const hwm_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT), HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT), @@ -311,6 +314,37 @@ static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval) } static umode_t +hwm_temp_is_visible(const struct hwm_drvdata *ddat, u32 attr) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + + if (attr == hwmon_temp_input && i915_mmio_reg_valid(hwmon->rg.pkg_temp)) + return 0444; + + return 0; +} + +static int +hwm_temp_read(struct hwm_drvdata *ddat, u32 attr, long *val) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + intel_wakeref_t wakeref; + u32 reg_val; + + switch (attr) { + case hwmon_temp_input: + with_intel_runtime_pm(ddat->uncore->rpm, wakeref) + reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_temp); + + /* HW register value is in degrees Celsius, convert to millidegrees. */ + *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static umode_t hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr) { struct drm_i915_private *i915 = ddat->uncore->i915; @@ -692,6 +726,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type, struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata; switch (type) { + case hwmon_temp: + return hwm_temp_is_visible(ddat, attr); case hwmon_in: return hwm_in_is_visible(ddat, attr); case hwmon_power: @@ -714,6 +750,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, struct hwm_drvdata *ddat = dev_get_drvdata(dev); switch (type) { + case hwmon_temp: + return hwm_temp_read(ddat, attr, val); case hwmon_in: return hwm_in_read(ddat, attr, val); case hwmon_power: @@ -810,6 +848,7 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) hwmon->rg.gt_perf_status = GEN12_RPSTAT1; if (IS_DG1(i915) || IS_DG2(i915)) { + hwmon->rg.pkg_temp = PCU_PACKAGE_TEMPERATURE; hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT; hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU; hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT; @@ -817,6 +856,7 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) hwmon->rg.energy_status_tile = INVALID_MMIO_REG; hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED; } else { + hwmon->rg.pkg_temp = INVALID_MMIO_REG; hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG; hwmon->rg.pkg_power_sku = INVALID_MMIO_REG; hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2321de48d169..f75cbf5b8a1c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -77,39 +77,24 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915, WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); } -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier) +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) { - intel_uncore_write(uncore, imr, 0xffffffff); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.imr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.imr); - intel_uncore_write(uncore, ier, 0); + intel_uncore_write(uncore, regs.ier, 0); /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); -} - -static void gen2_irq_reset(struct intel_uncore *uncore) -{ - intel_uncore_write16(uncore, GEN2_IMR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IMR); - - intel_uncore_write16(uncore, GEN2_IER, 0); - - /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); } /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { u32 val = intel_uncore_read(uncore, reg); @@ -125,42 +110,14 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_posting_read(uncore, reg); } -static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) -{ - u16 val = intel_uncore_read16(uncore, GEN2_IIR); - - if (val == 0) - return; - - drm_WARN(&uncore->i915->drm, 1, - "Interrupt register 0x%x is not zero: 0x%08x\n", - i915_mmio_reg_offset(GEN2_IIR), val); - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); -} - -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir) -{ - gen3_assert_iir_is_zero(uncore, iir); - - intel_uncore_write(uncore, ier, ier_val); - intel_uncore_write(uncore, imr, imr_val); - intel_uncore_posting_read(uncore, imr); -} - -static void gen2_irq_init(struct intel_uncore *uncore, - u32 imr_val, u32 ier_val) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val) { - gen2_assert_iir_is_zero(uncore); + gen2_assert_iir_is_zero(uncore, regs.iir); - intel_uncore_write16(uncore, GEN2_IER, ier_val); - intel_uncore_write16(uncore, GEN2_IMR, imr_val); - intel_uncore_posting_read16(uncore, GEN2_IMR); + intel_uncore_write(uncore, regs.ier, ier_val); + intel_uncore_write(uncore, regs.imr, imr_val); + intel_uncore_posting_read(uncore, regs.imr); } /** @@ -298,7 +255,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | @@ -380,7 +337,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | @@ -665,7 +622,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_NOP(dev_priv)) return; - GEN3_IRQ_RESET(uncore, SDE); + gen2_irq_reset(uncore, SDE_IRQ_REGS); if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); @@ -677,7 +634,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - GEN3_IRQ_RESET(uncore, DE); + gen2_irq_reset(uncore, DE_IRQ_REGS); dev_priv->irq_mask = ~0u; if (GRAPHICS_VER(dev_priv) == 7) @@ -714,7 +671,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) gen8_gt_irq_reset(to_gt(dev_priv)); gen8_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); @@ -731,8 +688,8 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); } static void dg1_irq_reset(struct drm_i915_private *dev_priv) @@ -748,8 +705,8 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) gen11_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0); } @@ -763,7 +720,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) gen8_gt_irq_reset(to_gt(dev_priv)); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.irq.display_irqs_enabled) @@ -808,7 +765,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) gen11_gt_irq_postinstall(gt); gen11_de_irq_postinstall(dev_priv); - GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); + gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); gen11_master_intr_enable(intel_uncore_regs(uncore)); intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); @@ -824,7 +781,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) for_each_gt(gt, dev_priv, i) gen11_gt_irq_postinstall(gt); - GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); + gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); dg1_de_irq_postinstall(dev_priv); @@ -845,16 +802,6 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); } -static void i8xx_irq_reset(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - i9xx_pipestat_irq_reset(dev_priv); - - gen2_irq_reset(uncore); - dev_priv->irq_mask = ~0u; -} - static u32 i9xx_error_mask(struct drm_i915_private *i915) { /* @@ -876,76 +823,6 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915) I915_ERROR_MEMORY_REFRESH); } -static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u16 enable_mask; - - intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); - - /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask = - ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT); - - enable_mask = - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT | - I915_USER_INTERRUPT; - - gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask); - - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked check happy. */ - spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irq(&dev_priv->irq_lock); -} - -static void i8xx_error_irq_ack(struct drm_i915_private *i915, - u16 *eir, u16 *eir_stuck) -{ - struct intel_uncore *uncore = &i915->uncore; - u16 emr; - - *eir = intel_uncore_read16(uncore, EIR); - intel_uncore_write16(uncore, EIR, *eir); - - *eir_stuck = intel_uncore_read16(uncore, EIR); - if (*eir_stuck == 0) - return; - - /* - * Toggle all EMR bits to make sure we get an edge - * in the ISR master error bit if we don't clear - * all the EIR bits. Otherwise the edge triggered - * IIR on i965/g4x wouldn't notice that an interrupt - * is still pending. Also some EIR bits can't be - * cleared except by handling the underlying error - * (or by a GPU reset) so we mask any bit that - * remains set. - */ - emr = intel_uncore_read16(uncore, EMR); - intel_uncore_write16(uncore, EMR, 0xffff); - intel_uncore_write16(uncore, EMR, emr | *eir_stuck); -} - -static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, - u16 eir, u16 eir_stuck) -{ - drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir); - - if (eir_stuck) - drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", - eir_stuck); - - drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", - intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); -} - static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, u32 *eir, u32 *eir_stuck) { @@ -986,66 +863,13 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); } -static irqreturn_t i8xx_irq_handler(int irq, void *arg) -{ - struct drm_i915_private *dev_priv = arg; - irqreturn_t ret = IRQ_NONE; - - if (!intel_irqs_enabled(dev_priv)) - return IRQ_NONE; - - /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - - do { - u32 pipe_stats[I915_MAX_PIPES] = {}; - u16 eir = 0, eir_stuck = 0; - u16 iir; - - iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); - if (iir == 0) - break; - - ret = IRQ_HANDLED; - - /* Call regardless, as some status bits might not be - * signalled in iir */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); - - if (iir & I915_MASTER_ERROR_INTERRUPT) - i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); - - intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); - - if (iir & I915_USER_INTERRUPT) - intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); - - if (iir & I915_MASTER_ERROR_INTERRUPT) - i8xx_error_irq_handler(dev_priv, eir, eir_stuck); - - i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); - } while (0); - - pmu_irq_stats(dev_priv, ret); - - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - - return ret; -} - static void i915_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - if (I915_HAS_HOTPLUG(dev_priv)) { - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - intel_uncore_rmw(&dev_priv->uncore, - PORT_HOTPLUG_STAT(dev_priv), 0, 0); - } - - i9xx_pipestat_irq_reset(dev_priv); + i9xx_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN2_); + gen2_irq_reset(uncore, GEN2_IRQ_REGS); dev_priv->irq_mask = ~0u; } @@ -1056,28 +880,28 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); - /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = - ~(I915_ASLE_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_MASTER_ERROR_INTERRUPT); enable_mask = - I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; + if (DISPLAY_VER(dev_priv) >= 3) { + dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; + enable_mask |= I915_ASLE_INTERRUPT; + } + if (I915_HAS_HOTPLUG(dev_priv)) { - /* Enable in IER... */ - enable_mask |= I915_DISPLAY_PORT_INTERRUPT; - /* and unmask in IMR */ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; } - GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -1117,7 +941,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) @@ -1148,12 +972,9 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0); + i9xx_display_irq_reset(dev_priv); - i9xx_pipestat_irq_reset(dev_priv); - - GEN3_IRQ_RESET(uncore, GEN2_); + gen2_irq_reset(uncore, GEN2_IRQ_REGS); dev_priv->irq_mask = ~0u; } @@ -1183,7 +1004,6 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); - /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PORT_INTERRUPT | @@ -1202,7 +1022,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; - GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -1242,7 +1062,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) @@ -1317,10 +1137,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) return valleyview_irq_handler; else if (GRAPHICS_VER(dev_priv) == 4) return i965_irq_handler; - else if (GRAPHICS_VER(dev_priv) == 3) - return i915_irq_handler; else - return i8xx_irq_handler; + return i915_irq_handler; } else { if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) return dg1_irq_handler; @@ -1342,10 +1160,8 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv) valleyview_irq_reset(dev_priv); else if (GRAPHICS_VER(dev_priv) == 4) i965_irq_reset(dev_priv); - else if (GRAPHICS_VER(dev_priv) == 3) - i915_irq_reset(dev_priv); else - i8xx_irq_reset(dev_priv); + i915_irq_reset(dev_priv); } else { if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) dg1_irq_reset(dev_priv); @@ -1367,10 +1183,8 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv) valleyview_irq_postinstall(dev_priv); else if (GRAPHICS_VER(dev_priv) == 4) i965_irq_postinstall(dev_priv); - else if (GRAPHICS_VER(dev_priv) == 3) - i915_irq_postinstall(dev_priv); else - i8xx_irq_postinstall(dev_priv); + i915_irq_postinstall(dev_priv); } else { if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) dg1_irq_postinstall(dev_priv); @@ -1404,16 +1218,14 @@ int intel_irq_install(struct drm_i915_private *dev_priv) * interrupts as enabled _before_ actually enabling them to avoid * special cases in our ordering checks. */ - dev_priv->runtime_pm.irqs_enabled = true; - - dev_priv->irq_enabled = true; + dev_priv->irqs_enabled = true; intel_irq_reset(dev_priv); ret = request_irq(irq, intel_irq_handler(dev_priv), IRQF_SHARED, DRIVER_NAME, dev_priv); if (ret < 0) { - dev_priv->irq_enabled = false; + dev_priv->irqs_enabled = false; return ret; } @@ -1433,56 +1245,46 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { int irq = to_pci_dev(dev_priv->drm.dev)->irq; - /* - * FIXME we can get called twice during driver probe - * error handling as well as during driver remove due to - * intel_display_driver_remove() calling us out of sequence. - * Would be nice if it didn't do that... - */ - if (!dev_priv->irq_enabled) + if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled)) return; - dev_priv->irq_enabled = false; - intel_irq_reset(dev_priv); free_irq(irq, dev_priv); intel_hpd_cancel_work(dev_priv); - dev_priv->runtime_pm.irqs_enabled = false; + dev_priv->irqs_enabled = false; } /** - * intel_runtime_pm_disable_interrupts - runtime interrupt disabling - * @dev_priv: i915 device instance + * intel_irq_suspend - Suspend interrupts + * @i915: i915 device instance * - * This function is used to disable interrupts at runtime, both in the runtime - * pm and the system suspend/resume code. + * This function is used to disable interrupts at runtime. */ -void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) +void intel_irq_suspend(struct drm_i915_private *i915) { - intel_irq_reset(dev_priv); - dev_priv->runtime_pm.irqs_enabled = false; - intel_synchronize_irq(dev_priv); + intel_irq_reset(i915); + i915->irqs_enabled = false; + intel_synchronize_irq(i915); } /** - * intel_runtime_pm_enable_interrupts - runtime interrupt enabling - * @dev_priv: i915 device instance + * intel_irq_resume - Resume interrupts + * @i915: i915 device instance * - * This function is used to enable interrupts at runtime, both in the runtime - * pm and the system suspend/resume code. + * This function is used to enable interrupts at runtime. */ -void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) +void intel_irq_resume(struct drm_i915_private *i915) { - dev_priv->runtime_pm.irqs_enabled = true; - intel_irq_reset(dev_priv); - intel_irq_postinstall(dev_priv); + i915->irqs_enabled = true; + intel_irq_reset(i915); + intel_irq_postinstall(i915); } bool intel_irqs_enabled(struct drm_i915_private *dev_priv) { - return dev_priv->runtime_pm.irqs_enabled; + return dev_priv->irqs_enabled; } void intel_synchronize_irq(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index e665a1b007dc..0457f6402e05 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -34,45 +34,17 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask); -void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); -void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); +void intel_irq_suspend(struct drm_i915_private *i915); +void intel_irq_resume(struct drm_i915_private *i915); bool intel_irqs_enabled(struct drm_i915_private *dev_priv); void intel_synchronize_irq(struct drm_i915_private *i915); void intel_synchronize_hardirq(struct drm_i915_private *i915); -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg); +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg); -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier); +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs); -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir); - -#define GEN8_IRQ_RESET_NDX(uncore, type, which) \ -({ \ - unsigned int which_ = which; \ - gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \ - GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \ -}) - -#define GEN3_IRQ_RESET(uncore, type) \ - gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER) - -#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \ -({ \ - unsigned int which_ = which; \ - gen3_irq_init((uncore), \ - GEN8_##type##_IMR(which_), imr_val, \ - GEN8_##type##_IER(which_), ier_val, \ - GEN8_##type##_IIR(which_)); \ -}) - -#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \ - gen3_irq_init((uncore), \ - type##IMR, imr_val, \ - type##IER, ier_val, \ - type##IIR) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val); #endif /* __I915_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index d37bb3a704d0..21006c7f615c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -24,7 +24,7 @@ #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> -#include <drm/intel/i915_pciids.h> +#include <drm/intel/pciids.h> #include "display/intel_display_driver.h" #include "gt/intel_gt_regs.h" @@ -367,7 +367,6 @@ static const struct intel_device_info ivb_q_info = { static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), - .is_lp = 1, .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -451,7 +450,6 @@ static const struct intel_device_info bdw_gt3_info = { static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .is_lp = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, @@ -512,7 +510,6 @@ static const struct intel_device_info skl_gt4_info = { #define GEN9_LP_FEATURES \ GEN(9), \ - .is_lp = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .has_3d_pipeline = 1, \ .has_64bit_reloc = 1, \ @@ -870,6 +867,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_info), INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_info), INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_info), + INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_info), INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_info), {} }; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 21eb0c5b320d..93fbf53578da 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -356,7 +356,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915) return GRAPHICS_VER(i915) == 7; } -static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) { struct intel_engine_pmu *pmu = &engine->pmu; bool busy; @@ -391,6 +391,31 @@ static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); } +static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + u32 tail, head, acthd; + + tail = ENGINE_READ_FW(engine, RING_TAIL); + head = ENGINE_READ_FW(engine, RING_HEAD); + acthd = ENGINE_READ_FW(engine, ACTHD); + + if (head & HEAD_WAIT_I8XX) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); + + if (head & HEAD_WAIT_I8XX || head != acthd || + (head & HEAD_ADDR) != (tail & TAIL_ADDR)) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} + +static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + if (GRAPHICS_VER(engine->i915) >= 3) + gen3_engine_sample(engine, period_ns); + else + gen2_engine_sample(engine, period_ns); +} + static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { @@ -834,15 +859,14 @@ static void i915_pmu_event_start(struct perf_event *event, int flags) static void i915_pmu_event_stop(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); if (pmu->closed) goto out; if (flags & PERF_EF_UPDATE) i915_pmu_event_read(event); + i915_pmu_disable(event); out: @@ -1232,17 +1256,6 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); } -static bool is_igp(struct drm_i915_private *i915) -{ - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - - /* IGP is 0000:00:02.0 */ - return pci_domain_nr(pdev->bus) == 0 && - pdev->bus->number == 0 && - PCI_SLOT(pdev->devfn) == 2 && - PCI_FUNC(pdev->devfn) == 0; -} - void i915_pmu_register(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; @@ -1255,18 +1268,13 @@ void i915_pmu_register(struct drm_i915_private *i915) int ret = -ENOMEM; - if (GRAPHICS_VER(i915) <= 2) { - drm_info(&i915->drm, "PMU not supported for this GPU."); - return; - } - spin_lock_init(&pmu->lock); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; pmu->cpuhp.cpu = -1; init_rc6(pmu); - if (!is_igp(i915)) { + if (IS_DGFX(i915)) { pmu->name = kasprintf(GFP_KERNEL, "i915_%s", dev_name(i915->drm.dev)); @@ -1318,7 +1326,7 @@ err_attr: pmu->base.event_init = NULL; free_event_attributes(pmu); err_name: - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); err: drm_notice(&i915->drm, "Failed to register PMU!\n"); @@ -1346,7 +1354,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915) perf_pmu_unregister(&pmu->base); pmu->base.event_init = NULL; kfree(pmu->base.attr_groups); - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); free_event_attributes(pmu); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 41f4350a7c6c..22be4a731d27 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -422,6 +422,11 @@ #define GEN2_IIR _MMIO(0x20a4) #define GEN2_IMR _MMIO(0x20a8) #define GEN2_ISR _MMIO(0x20ac) + +#define GEN2_IRQ_REGS I915_IRQ_REGS(GEN2_IMR, \ + GEN2_IER, \ + GEN2_IIR) + #define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060) #define GINT_DIS (1 << 22) #define GCFG_DIS (1 << 8) @@ -434,6 +439,10 @@ #define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120) #define VLV_PCBR_ADDR_SHIFT 12 +#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \ + VLV_IER, \ + VLV_IIR) + #define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */ #define EIR _MMIO(0x20b0) #define EMR _MMIO(0x20b4) @@ -1071,87 +1080,77 @@ /* Pipe/transcoder A timing regs */ #define _TRANS_HTOTAL_A 0x60000 +#define _TRANS_HTOTAL_B 0x61000 +#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A) #define HTOTAL_MASK REG_GENMASK(31, 16) #define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal)) #define HACTIVE_MASK REG_GENMASK(15, 0) #define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay)) + #define _TRANS_HBLANK_A 0x60004 +#define _TRANS_HBLANK_B 0x61004 +#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A) #define HBLANK_END_MASK REG_GENMASK(31, 16) #define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end)) #define HBLANK_START_MASK REG_GENMASK(15, 0) #define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start)) + #define _TRANS_HSYNC_A 0x60008 +#define _TRANS_HSYNC_B 0x61008 +#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A) #define HSYNC_END_MASK REG_GENMASK(31, 16) #define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end)) #define HSYNC_START_MASK REG_GENMASK(15, 0) #define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start)) + #define _TRANS_VTOTAL_A 0x6000c +#define _TRANS_VTOTAL_B 0x6100c +#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A) #define VTOTAL_MASK REG_GENMASK(31, 16) #define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal)) #define VACTIVE_MASK REG_GENMASK(15, 0) #define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay)) + #define _TRANS_VBLANK_A 0x60010 +#define _TRANS_VBLANK_B 0x61010 +#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A) #define VBLANK_END_MASK REG_GENMASK(31, 16) #define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end)) #define VBLANK_START_MASK REG_GENMASK(15, 0) #define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start)) + #define _TRANS_VSYNC_A 0x60014 +#define _TRANS_VSYNC_B 0x61014 +#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A) #define VSYNC_END_MASK REG_GENMASK(31, 16) #define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end)) #define VSYNC_START_MASK REG_GENMASK(15, 0) #define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start)) -#define _TRANS_EXITLINE_A 0x60018 + #define _PIPEASRC 0x6001c +#define _PIPEBSRC 0x6101c +#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC) #define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) #define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) #define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) #define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) -#define _BCLRPAT_A 0x60020 -#define _TRANS_VSYNCSHIFT_A 0x60028 -#define _TRANS_MULT_A 0x6002c -/* Pipe/transcoder B timing regs */ -#define _TRANS_HTOTAL_B 0x61000 -#define _TRANS_HBLANK_B 0x61004 -#define _TRANS_HSYNC_B 0x61008 -#define _TRANS_VTOTAL_B 0x6100c -#define _TRANS_VBLANK_B 0x61010 -#define _TRANS_VSYNC_B 0x61014 -#define _PIPEBSRC 0x6101c +#define _BCLRPAT_A 0x60020 #define _BCLRPAT_B 0x61020 -#define _TRANS_VSYNCSHIFT_B 0x61028 -#define _TRANS_MULT_B 0x6102c - -/* DSI 0 timing regs */ -#define _TRANS_HTOTAL_DSI0 0x6b000 -#define _TRANS_HSYNC_DSI0 0x6b008 -#define _TRANS_VTOTAL_DSI0 0x6b00c -#define _TRANS_VSYNC_DSI0 0x6b014 -#define _TRANS_VSYNCSHIFT_DSI0 0x6b028 - -/* DSI 1 timing regs */ -#define _TRANS_HTOTAL_DSI1 0x6b800 -#define _TRANS_HSYNC_DSI1 0x6b808 -#define _TRANS_VTOTAL_DSI1 0x6b80c -#define _TRANS_VSYNC_DSI1 0x6b814 -#define _TRANS_VSYNCSHIFT_DSI1 0x6b828 - -#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A) -#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A) -#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A) -#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A) -#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A) -#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A) #define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A) + +#define _TRANS_VSYNCSHIFT_A 0x60028 +#define _TRANS_VSYNCSHIFT_B 0x61028 #define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A) -#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC) + +#define _TRANS_MULT_A 0x6002c +#define _TRANS_MULT_B 0x6102c #define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A) /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) #define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100) - #define ADPA_DAC_ENABLE (1 << 31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SEL_SHIFT 30 @@ -1195,7 +1194,6 @@ #define ADPA_DPMS_STANDBY (2 << 10) #define ADPA_DPMS_OFF (3 << 10) - /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110) #define PORTB_HOTPLUG_INT_EN (1 << 29) @@ -1446,11 +1444,9 @@ #define DP_B _MMIO(0x64100) #define DP_C _MMIO(0x64200) #define DP_D _MMIO(0x64300) - #define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100) #define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200) #define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300) - #define DP_PORT_EN (1 << 31) #define DP_PIPE_SEL_SHIFT 30 #define DP_PIPE_SEL_MASK (1 << 30) @@ -1549,16 +1545,16 @@ */ #define _PIPEA_DATA_M_G4X 0x70050 #define _PIPEB_DATA_M_G4X 0x71050 - +#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ #define TU_SIZE_MASK REG_GENMASK(30, 25) #define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */ - #define DATA_LINK_M_N_MASK REG_GENMASK(23, 0) #define DATA_LINK_N_MAX (0x800000) #define _PIPEA_DATA_N_G4X 0x70054 #define _PIPEB_DATA_N_G4X 0x71054 +#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) /* * Computing Link M and N values for the Display Port link @@ -1570,22 +1566,22 @@ * The Link value is transmitted in the Main Stream * Attributes and VB-ID. */ - #define _PIPEA_LINK_M_G4X 0x70060 #define _PIPEB_LINK_M_G4X 0x71060 +#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) + #define _PIPEA_LINK_N_G4X 0x70064 #define _PIPEB_LINK_N_G4X 0x71064 - -#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) -#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) -#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) #define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) /* Pipe A */ #define _PIPEADSL 0x70000 +#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL) #define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ #define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) + #define _TRANSACONF 0x70008 +#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF) #define TRANSCONF_ENABLE REG_BIT(31) #define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ #define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */ @@ -1645,6 +1641,7 @@ #define TRANSCONF_PIXEL_COUNT_SCALING_X4 1 #define _PIPEASTAT 0x70024 +#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT) #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) #define PIPE_CRC_ERROR_ENABLE (1UL << 29) @@ -1691,15 +1688,8 @@ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1) #define PIPE_HBLANK_INT_STATUS (1UL << 0) #define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0) - -#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 -#define PIPESTAT_INT_STATUS_MASK 0x0000ffff - -#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF) -#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL) -#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH) -#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL) -#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT) +#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 +#define PIPESTAT_INT_STATUS_MASK 0x0000ffff #define _PIPE_ARB_CTL_A 0x70028 /* icl+ */ #define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A) @@ -1707,6 +1697,7 @@ #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 +#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) #define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ #define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ #define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ @@ -1734,23 +1725,15 @@ #define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1) #define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2) #define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3) -#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) #define _PIPE_MISC2_A 0x7002C #define _PIPE_MISC2_B 0x7102C +#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) #define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24) #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80) #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20) #define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */ #define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id)) -#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) - -#define _ICL_PIPE_A_STATUS 0x70058 -#define ICL_PIPESTATUS(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS) -#define PIPE_STATUS_UNDERRUN REG_BIT(31) -#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28) -#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27) -#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26) #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29) @@ -2066,33 +2049,38 @@ * frame = (high1 << 8) | low1; */ #define _PIPEAFRAMEHIGH 0x70040 +#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH) #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 + #define _PIPEAFRAMEPIXEL 0x70044 +#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL) #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 + /* GM45+ just has to be different */ #define _PIPEA_FRMCOUNT_G4X 0x70040 -#define _PIPEA_FLIPCOUNT_G4X 0x70044 #define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X) + +#define _PIPEA_FLIPCOUNT_G4X 0x70044 #define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X) /* CHV pipe B blender */ #define _CHV_BLEND_A 0x60a00 +#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A) #define CHV_BLEND_MASK REG_GENMASK(31, 30) #define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0) #define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1) #define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2) + #define _CHV_CANVAS_A 0x60a04 +#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A) #define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20) #define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10) #define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0) -#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A) -#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A) - /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) #define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK) @@ -2114,11 +2102,6 @@ #define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4) #define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4) -/* ICL DSI 0 and 1 */ -#define _PIPEDSI0CONF 0x7b008 -#define _PIPEDSI1CONF 0x7b808 - - /* VBIOS regs */ #define VGACNTRL _MMIO(0x71400) # define VGA_DISP_DISABLE (1 << 31) @@ -2156,38 +2139,42 @@ # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) #define _PIPEA_DATA_M1 0x60030 -#define _PIPEA_DATA_N1 0x60034 -#define _PIPEA_DATA_M2 0x60038 -#define _PIPEA_DATA_N2 0x6003c -#define _PIPEA_LINK_M1 0x60040 -#define _PIPEA_LINK_N1 0x60044 -#define _PIPEA_LINK_M2 0x60048 -#define _PIPEA_LINK_N2 0x6004c - -/* PIPEB timing regs are same start from 0x61000 */ - #define _PIPEB_DATA_M1 0x61030 -#define _PIPEB_DATA_N1 0x61034 -#define _PIPEB_DATA_M2 0x61038 -#define _PIPEB_DATA_N2 0x6103c -#define _PIPEB_LINK_M1 0x61040 -#define _PIPEB_LINK_N1 0x61044 -#define _PIPEB_LINK_M2 0x61048 -#define _PIPEB_LINK_N2 0x6104c - #define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1) + +#define _PIPEA_DATA_N1 0x60034 +#define _PIPEB_DATA_N1 0x61034 #define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1) + +#define _PIPEA_DATA_M2 0x60038 +#define _PIPEB_DATA_M2 0x61038 #define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2) + +#define _PIPEA_DATA_N2 0x6003c +#define _PIPEB_DATA_N2 0x6103c #define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2) + +#define _PIPEA_LINK_M1 0x60040 +#define _PIPEB_LINK_M1 0x61040 #define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1) + +#define _PIPEA_LINK_N1 0x60044 +#define _PIPEB_LINK_N1 0x61044 #define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1) + +#define _PIPEA_LINK_M2 0x60048 +#define _PIPEB_LINK_M2 0x61048 #define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2) + +#define _PIPEA_LINK_N2 0x6004c +#define _PIPEB_LINK_N2 0x6104c #define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 +#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) #define PF_ENABLE REG_BIT(31) #define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */ #define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe)) @@ -2196,37 +2183,43 @@ #define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1) #define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2) #define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3) + #define _PFA_WIN_SZ 0x68074 #define _PFB_WIN_SZ 0x68874 +#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) #define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16) #define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w)) #define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0) #define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h)) + #define _PFA_WIN_POS 0x68070 #define _PFB_WIN_POS 0x68870 +#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) #define PF_WIN_XPOS_MASK REG_GENMASK(31, 16) #define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x)) #define PF_WIN_YPOS_MASK REG_GENMASK(15, 0) #define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y)) + #define _PFA_VSCALE 0x68084 #define _PFB_VSCALE 0x68884 +#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) + #define _PFA_HSCALE 0x68090 #define _PFB_HSCALE 0x68890 - -#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) -#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) -#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) -#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) #define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) /* * Skylake scalers */ +#define _ID(id, a, b) _PICK_EVEN(id, a, b) #define _PS_1A_CTRL 0x68180 #define _PS_2A_CTRL 0x68280 #define _PS_1B_CTRL 0x68980 #define _PS_2B_CTRL 0x68A80 #define _PS_1C_CTRL 0x69180 +#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ + _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) #define PS_SCALER_EN REG_BIT(31) #define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */ #define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0) @@ -2279,6 +2272,9 @@ #define _PS_PWR_GATE_1B 0x68960 #define _PS_PWR_GATE_2B 0x68A60 #define _PS_PWR_GATE_1C 0x69160 +#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ + _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) #define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31) #define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3) #define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0) @@ -2296,6 +2292,9 @@ #define _PS_WIN_POS_1B 0x68970 #define _PS_WIN_POS_2B 0x68A70 #define _PS_WIN_POS_1C 0x69170 +#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ + _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) #define PS_WIN_XPOS_MASK REG_GENMASK(31, 16) #define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x)) #define PS_WIN_YPOS_MASK REG_GENMASK(15, 0) @@ -2306,6 +2305,9 @@ #define _PS_WIN_SZ_1B 0x68974 #define _PS_WIN_SZ_2B 0x68A74 #define _PS_WIN_SZ_1C 0x69174 +#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ + _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) #define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16) #define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w)) #define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0) @@ -2316,18 +2318,27 @@ #define _PS_VSCALE_1B 0x68984 #define _PS_VSCALE_2B 0x68A84 #define _PS_VSCALE_1C 0x69184 +#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ + _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) #define _PS_HSCALE_1A 0x68190 #define _PS_HSCALE_2A 0x68290 #define _PS_HSCALE_1B 0x68990 #define _PS_HSCALE_2B 0x68A90 #define _PS_HSCALE_1C 0x69190 +#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ + _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) #define _PS_VPHASE_1A 0x68188 #define _PS_VPHASE_2A 0x68288 #define _PS_VPHASE_1B 0x68988 #define _PS_VPHASE_2B 0x68A88 #define _PS_VPHASE_1C 0x69188 +#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ + _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) #define PS_Y_PHASE_MASK REG_GENMASK(31, 16) #define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x)) #define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0) @@ -2340,56 +2351,32 @@ #define _PS_HPHASE_1B 0x68994 #define _PS_HPHASE_2B 0x68A94 #define _PS_HPHASE_1C 0x69194 +#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ + _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) #define _PS_ECC_STAT_1A 0x681D0 #define _PS_ECC_STAT_2A 0x682D0 #define _PS_ECC_STAT_1B 0x689D0 #define _PS_ECC_STAT_2B 0x68AD0 #define _PS_ECC_STAT_1C 0x691D0 +#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ + _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)) #define _PS_COEF_SET0_INDEX_1A 0x68198 #define _PS_COEF_SET0_INDEX_2A 0x68298 #define _PS_COEF_SET0_INDEX_1B 0x68998 #define _PS_COEF_SET0_INDEX_2B 0x68A98 +#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \ + _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \ + _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8) #define PS_COEF_INDEX_AUTO_INC REG_BIT(10) #define _PS_COEF_SET0_DATA_1A 0x6819C #define _PS_COEF_SET0_DATA_2A 0x6829C #define _PS_COEF_SET0_DATA_1B 0x6899C #define _PS_COEF_SET0_DATA_2B 0x68A9C - -#define _ID(id, a, b) _PICK_EVEN(id, a, b) -#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ - _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) -#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ - _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) -#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ - _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) -#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ - _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) -#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ - _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) -#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ - _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) -#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ - _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) -#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ - _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) -#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ - _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)) -#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \ - _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \ - _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8) - #define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \ _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \ _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8) @@ -2459,11 +2446,19 @@ #define DEIIR _MMIO(0x44008) #define DEIER _MMIO(0x4400c) +#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \ + DEIER, \ + DEIIR) + #define GTISR _MMIO(0x44010) #define GTIMR _MMIO(0x44014) #define GTIIR _MMIO(0x44018) #define GTIER _MMIO(0x4401c) +#define GT_IRQ_REGS I915_IRQ_REGS(GTIMR, \ + GTIER, \ + GTIIR) + #define GEN8_MASTER_IRQ _MMIO(0x44200) #define GEN8_MASTER_IRQ_CONTROL (1 << 31) #define GEN8_PCU_IRQ (1 << 30) @@ -2489,6 +2484,10 @@ #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which))) #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which))) +#define GEN8_GT_IRQ_REGS(which) I915_IRQ_REGS(GEN8_GT_IMR(which), \ + GEN8_GT_IER(which), \ + GEN8_GT_IIR(which)) + #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 #define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */ @@ -2506,9 +2505,7 @@ #define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */ #define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */ #define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */ -#define XELPD_PIPE_SOFT_UNDERRUN REG_BIT(22) /* adl/dg2+ */ #define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */ -#define XELPD_PIPE_HARD_UNDERRUN REG_BIT(21) /* adl/dg2+ */ #define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */ #define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */ #define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */ @@ -2540,6 +2537,10 @@ #define GEN8_PIPE_VSYNC REG_BIT(1) #define GEN8_PIPE_VBLANK REG_BIT(0) +#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \ + GEN8_DE_PIPE_IER(pipe), \ + GEN8_DE_PIPE_IIR(pipe)) + #define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A) #define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1) @@ -2575,6 +2576,10 @@ #define TGL_DE_PORT_AUX_DDIB REG_BIT(1) #define TGL_DE_PORT_AUX_DDIA REG_BIT(0) +#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \ + GEN8_DE_PORT_IER, \ + GEN8_DE_PORT_IIR) + #define GEN8_DE_MISC_ISR _MMIO(0x44460) #define GEN8_DE_MISC_IMR _MMIO(0x44464) #define GEN8_DE_MISC_IIR _MMIO(0x44468) @@ -2584,18 +2589,31 @@ #define GEN8_DE_MISC_GSE REG_BIT(27) #define GEN8_DE_EDP_PSR REG_BIT(19) #define XELPDP_PMDEMAND_RSP REG_BIT(3) +#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1) + +#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \ + GEN8_DE_MISC_IER, \ + GEN8_DE_MISC_IIR) #define GEN8_PCU_ISR _MMIO(0x444e0) #define GEN8_PCU_IMR _MMIO(0x444e4) #define GEN8_PCU_IIR _MMIO(0x444e8) #define GEN8_PCU_IER _MMIO(0x444ec) +#define GEN8_PCU_IRQ_REGS I915_IRQ_REGS(GEN8_PCU_IMR, \ + GEN8_PCU_IER, \ + GEN8_PCU_IIR) + #define GEN11_GU_MISC_ISR _MMIO(0x444f0) #define GEN11_GU_MISC_IMR _MMIO(0x444f4) #define GEN11_GU_MISC_IIR _MMIO(0x444f8) #define GEN11_GU_MISC_IER _MMIO(0x444fc) #define GEN11_GU_MISC_GSE (1 << 27) +#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \ + GEN11_GU_MISC_IER, \ + GEN11_GU_MISC_IIR) + #define GEN11_GFX_MSTR_IRQ _MMIO(0x190010) #define GEN11_MASTER_IRQ (1 << 31) #define GEN11_PCU_IRQ (1 << 30) @@ -2639,6 +2657,10 @@ GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \ GEN11_TBT_HOTPLUG(HPD_PORT_TC1)) +#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \ + GEN11_DE_HPD_IER, \ + GEN11_DE_HPD_IIR) + #define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030) #define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038) #define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4)) @@ -2659,6 +2681,10 @@ #define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin)) #define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0) +#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \ + PICAINTERRUPT_IER, \ + PICAINTERRUPT_IIR) + #define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200)) #define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6) #define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5) @@ -2671,6 +2697,7 @@ #define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) #define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) #define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) +#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4) #define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) #define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) #define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0) @@ -2869,6 +2896,7 @@ #define SKL_DFSM_PIPE_C_DISABLE (1 << 28) #define TGL_DFSM_PIPE_D_DISABLE (1 << 22) #define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7) +#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3) #define XE2LPD_DE_CAP _MMIO(0x41100) #define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30) @@ -3015,6 +3043,10 @@ #define SDEIIR _MMIO(0xc4008) #define SDEIER _MMIO(0xc400c) +#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \ + SDEIER, \ + SDEIIR) + #define SERR_INT _MMIO(0xc4040) #define SERR_INT_POISON (1 << 31) #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3)) @@ -3098,11 +3130,12 @@ #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) #define _PCH_FPA0 0xc6040 +#define _PCH_FPB0 0xc6048 +#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0) #define FP_CB_TUNE (0x3 << 22) + #define _PCH_FPA1 0xc6044 -#define _PCH_FPB0 0xc6048 #define _PCH_FPB1 0xc604c -#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0) #define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1) #define PCH_DPLL_TEST _MMIO(0xc606c) @@ -3155,50 +3188,93 @@ /* transcoder */ #define _PCH_TRANS_HTOTAL_A 0xe0000 +#define _PCH_TRANS_HTOTAL_B 0xe1000 +#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) #define TRANS_HTOTAL_SHIFT 16 #define TRANS_HACTIVE_SHIFT 0 + #define _PCH_TRANS_HBLANK_A 0xe0004 +#define _PCH_TRANS_HBLANK_B 0xe1004 +#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) #define TRANS_HBLANK_END_SHIFT 16 #define TRANS_HBLANK_START_SHIFT 0 + #define _PCH_TRANS_HSYNC_A 0xe0008 +#define _PCH_TRANS_HSYNC_B 0xe1008 +#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) #define TRANS_HSYNC_END_SHIFT 16 #define TRANS_HSYNC_START_SHIFT 0 + #define _PCH_TRANS_VTOTAL_A 0xe000c +#define _PCH_TRANS_VTOTAL_B 0xe100c +#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) #define TRANS_VTOTAL_SHIFT 16 #define TRANS_VACTIVE_SHIFT 0 + #define _PCH_TRANS_VBLANK_A 0xe0010 +#define _PCH_TRANS_VBLANK_B 0xe1010 +#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) #define TRANS_VBLANK_END_SHIFT 16 #define TRANS_VBLANK_START_SHIFT 0 + #define _PCH_TRANS_VSYNC_A 0xe0014 +#define _PCH_TRANS_VSYNC_B 0xe1014 +#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) #define TRANS_VSYNC_END_SHIFT 16 #define TRANS_VSYNC_START_SHIFT 0 + #define _PCH_TRANS_VSYNCSHIFT_A 0xe0028 +#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 +#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B) #define _PCH_TRANSA_DATA_M1 0xe0030 +#define _PCH_TRANSB_DATA_M1 0xe1030 +#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) + #define _PCH_TRANSA_DATA_N1 0xe0034 +#define _PCH_TRANSB_DATA_N1 0xe1034 +#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) + #define _PCH_TRANSA_DATA_M2 0xe0038 +#define _PCH_TRANSB_DATA_M2 0xe1038 +#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) + #define _PCH_TRANSA_DATA_N2 0xe003c +#define _PCH_TRANSB_DATA_N2 0xe103c +#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) + #define _PCH_TRANSA_LINK_M1 0xe0040 +#define _PCH_TRANSB_LINK_M1 0xe1040 +#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) + #define _PCH_TRANSA_LINK_N1 0xe0044 +#define _PCH_TRANSB_LINK_N1 0xe1044 +#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) + #define _PCH_TRANSA_LINK_M2 0xe0048 +#define _PCH_TRANSB_LINK_M2 0xe1048 +#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) + #define _PCH_TRANSA_LINK_N2 0xe004c +#define _PCH_TRANSB_LINK_N2 0xe104c +#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) /* Per-transcoder DIP controls (PCH) */ #define _VIDEO_DIP_CTL_A 0xe0200 +#define _VIDEO_DIP_CTL_B 0xe1200 +#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) + #define _VIDEO_DIP_DATA_A 0xe0208 +#define _VIDEO_DIP_DATA_B 0xe1208 +#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) + #define _VIDEO_DIP_GCP_A 0xe0210 +#define _VIDEO_DIP_GCP_B 0xe1210 +#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) #define GCP_COLOR_INDICATION (1 << 2) #define GCP_DEFAULT_PHASE_ENABLE (1 << 1) #define GCP_AV_MUTE (1 << 0) -#define _VIDEO_DIP_CTL_B 0xe1200 -#define _VIDEO_DIP_DATA_B 0xe1208 -#define _VIDEO_DIP_GCP_B 0xe1210 - -#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) -#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) -#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) - /* Per-transcoder DIP controls (VLV) */ #define _VLV_VIDEO_DIP_CTL_A 0x60200 #define _VLV_VIDEO_DIP_CTL_B 0x61170 @@ -3225,36 +3301,54 @@ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C) /* Haswell DIP controls */ - #define _HSW_VIDEO_DIP_CTL_A 0x60200 -#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220 -#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260 -#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 -#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 -#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 -#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484 -#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440 -#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 -#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 -#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 -#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300 -#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344 -#define _HSW_VIDEO_DIP_GCP_A 0x60210 - #define _HSW_VIDEO_DIP_CTL_B 0x61200 +#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A) + +#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220 #define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220 +#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260 #define _HSW_VIDEO_DIP_VS_DATA_B 0x61260 +#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 #define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 +#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 #define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 +#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 #define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320 +#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) + +/*ADLP and later: */ +#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484 #define _ADL_VIDEO_DIP_AS_DATA_B 0x61484 +#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\ + _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4) + +#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440 #define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440 +#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 #define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240 +#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 #define _HSW_VIDEO_DIP_VS_ECC_B 0x61280 +#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 #define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 +#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300 #define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300 +#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344 #define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344 + +#define _HSW_VIDEO_DIP_GCP_A 0x60210 #define _HSW_VIDEO_DIP_GCP_B 0x61210 +#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A) /* Icelake PPS_DATA and _ECC DIP Registers. * These are available for transcoders B,C and eDP. @@ -3264,62 +3358,16 @@ #define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350 #define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350 +#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) + #define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4 #define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 - -#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A) -#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A) -#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) -#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) -#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) -/*ADLP and later: */ -#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\ - _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4) #define _HSW_STEREO_3D_CTL_A 0x70020 -#define S3D_ENABLE (1 << 31) #define _HSW_STEREO_3D_CTL_B 0x71020 - #define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A) - -#define _PCH_TRANS_HTOTAL_B 0xe1000 -#define _PCH_TRANS_HBLANK_B 0xe1004 -#define _PCH_TRANS_HSYNC_B 0xe1008 -#define _PCH_TRANS_VTOTAL_B 0xe100c -#define _PCH_TRANS_VBLANK_B 0xe1010 -#define _PCH_TRANS_VSYNC_B 0xe1014 -#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 - -#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) -#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) -#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) -#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) -#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) -#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) -#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B) - -#define _PCH_TRANSB_DATA_M1 0xe1030 -#define _PCH_TRANSB_DATA_N1 0xe1034 -#define _PCH_TRANSB_DATA_M2 0xe1038 -#define _PCH_TRANSB_DATA_N2 0xe103c -#define _PCH_TRANSB_LINK_M1 0xe1040 -#define _PCH_TRANSB_LINK_N1 0xe1044 -#define _PCH_TRANSB_LINK_M2 0xe1048 -#define _PCH_TRANSB_LINK_N2 0xe104c - -#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) -#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) -#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) -#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) -#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) -#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) -#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) -#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) +#define S3D_ENABLE (1 << 31) #define _PCH_TRANSACONF 0xf0008 #define _PCH_TRANSBCONF 0xf1008 @@ -4125,6 +4173,7 @@ enum skl_power_gate { #define _DPLL1_CFGCR1 0x6C040 #define _DPLL2_CFGCR1 0x6C048 #define _DPLL3_CFGCR1 0x6C050 +#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) #define DPLL_CFGCR1_FREQ_ENABLE (1 << 31) #define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9) #define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9) @@ -4133,6 +4182,7 @@ enum skl_power_gate { #define _DPLL1_CFGCR2 0x6C044 #define _DPLL2_CFGCR2 0x6C04C #define _DPLL3_CFGCR2 0x6C054 +#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) #define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8) #define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8) #define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7) @@ -4151,9 +4201,6 @@ enum skl_power_gate { #define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) -#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) -#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) - /* ICL Clocks */ #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) #define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5)) @@ -4246,7 +4293,6 @@ enum skl_power_gate { /* ADL-P Type C PLL */ #define PORTTC1_PLL_ENABLE 0x46038 #define PORTTC2_PLL_ENABLE 0x46040 - #define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \ PORTTC1_PLL_ENABLE, \ PORTTC2_PLL_ENABLE) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index a685db1e815d..e251bcc0c89f 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -284,4 +284,14 @@ typedef struct { #define i915_mmio_reg_equal(a, b) (i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b)) #define i915_mmio_reg_valid(r) (!i915_mmio_reg_equal(r, INVALID_MMIO_REG)) +/* A triplet for IMR/IER/IIR registers. */ +struct i915_irq_regs { + i915_reg_t imr; + i915_reg_t ier; + i915_reg_t iir; +}; + +#define I915_IRQ_REGS(_imr, _ier, _iir) \ + ((const struct i915_irq_regs){ .imr = (_imr), .ier = (_ier), .iir = (_iir) }) + #endif /* __I915_REG_DEFS__ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f8373a461f17..f18f1acf2158 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -118,6 +118,7 @@ void i915_save_display(struct drm_i915_private *dev_priv) void i915_restore_display(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); if (!HAS_DISPLAY(dev_priv)) @@ -134,7 +135,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, DSPARB(dev_priv), dev_priv->regfile.saveDSPARB); - intel_vga_redisable(dev_priv); + intel_vga_redisable(display); - intel_gmbus_reset(dev_priv); + intel_gmbus_reset(display); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index ce1cbee1b39d..09d89bdf82f4 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -322,7 +322,7 @@ DEFINE_EVENT(i915_request, i915_request_add, TP_ARGS(rq) ); -#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) +#if IS_ENABLED(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) DEFINE_EVENT(i915_request, i915_request_guc_submit, TP_PROTO(struct i915_request *rq), TP_ARGS(rq) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 71bdc89bd621..609214231ffc 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -270,7 +270,7 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ -#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) #else # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index d2f064d2525c..776f8cc51b2f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -2157,7 +2157,7 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma) { struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; int err; assert_object_held_shared(vma->obj); @@ -2196,7 +2196,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) { struct drm_i915_gem_object *obj = vma->obj; struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; struct dma_fence *fence; int err; diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index 26c4dbda076e..f76642886569 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -502,7 +502,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915) CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - if (IS_IVB_GT1(i915)) + if (INTEL_INFO(i915)->gt == 1) intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); else { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 467999249b9a..856b30fa37dc 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -25,7 +25,7 @@ #include <linux/string_helpers.h> #include <drm/drm_print.h> -#include <drm/intel/i915_pciids.h> +#include <drm/intel/pciids.h> #include "gt/intel_gt_regs.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index a9fcaf33df9e..ef84eea9ba0b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -140,7 +140,6 @@ enum intel_ppgtt_type { #define DEV_INFO_FOR_EACH_FLAG(func) \ func(is_mobile); \ - func(is_lp); \ func(require_force_probe); \ func(is_dgfx); \ /* Keep has_* in alphabetical order */ \ diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h index 73900c098d59..dc2477179c3e 100644 --- a/drivers/gpu/drm/i915/intel_mchbar_regs.h +++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h @@ -207,6 +207,10 @@ #define PCU_PACKAGE_ENERGY_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x593c) #define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948) + +#define PCU_PACKAGE_TEMPERATURE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5978) +#define TEMP_MASK REG_GENMASK(7, 0) + #define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994) #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) #define RP0_CAP_MASK REG_GENMASK(7, 0) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 2d0647aca964..1a47ecfd3fd8 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -66,7 +66,7 @@ static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { if (!rpm->available || rpm->no_wakeref_tracking) - return -1; + return INTEL_WAKEREF_DEF; return intel_ref_tracker_alloc(&rpm->debug); } @@ -114,7 +114,7 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - return -1; + return INTEL_WAKEREF_DEF; } static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, @@ -250,7 +250,7 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm pm_runtime_get_if_active(rpm->kdev) <= 0) || (!ignore_usecount && pm_runtime_get_if_in_use(rpm->kdev) <= 0)) - return 0; + return NULL; } intel_runtime_pm_acquire(rpm, true); @@ -336,7 +336,7 @@ intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) */ void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) { - __intel_runtime_pm_put(rpm, -1, true); + __intel_runtime_pm_put(rpm, INTEL_WAKEREF_DEF, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index de3579d399e1..e22669d61e95 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -42,7 +42,6 @@ struct intel_runtime_pm { atomic_t wakeref_count; struct device *kdev; /* points to i915->drm.dev */ bool available; - bool irqs_enabled; bool no_wakeref_tracking; /* @@ -97,10 +96,16 @@ intel_rpm_wakelock_count(int wakeref_count) return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT; } +static inline bool +intel_runtime_pm_suspended(struct intel_runtime_pm *rpm) +{ + return pm_runtime_suspended(rpm->kdev); +} + static inline void assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm) { - WARN_ONCE(pm_runtime_suspended(rpm->kdev), + WARN_ONCE(intel_runtime_pm_suspended(rpm), "Device suspended during HW access\n"); } @@ -189,15 +194,15 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); #define with_intel_runtime_pm(rpm, wf) \ for ((wf) = intel_runtime_pm_get(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) #define with_intel_runtime_pm_if_in_use(rpm, wf) \ for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) #define with_intel_runtime_pm_if_active(rpm, wf) \ for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index dea2f63184f8..87f246047312 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -27,11 +27,11 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf) if (!atomic_read(&wf->count)) { INTEL_WAKEREF_BUG_ON(wf->wakeref); wf->wakeref = wakeref; - wakeref = 0; + wakeref = NULL; ret = wf->ops->get(wf); if (ret) { - wakeref = xchg(&wf->wakeref, 0); + wakeref = xchg(&wf->wakeref, NULL); wake_up_var(&wf->wakeref); goto unlock; } @@ -52,7 +52,7 @@ unlock: static void ____intel_wakeref_put_last(struct intel_wakeref *wf) { - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_dec_and_test(&wf->count))) @@ -61,7 +61,7 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf) /* ops->put() must reschedule its own release on error/deferral */ if (likely(!wf->ops->put(wf))) { INTEL_WAKEREF_BUG_ON(!wf->wakeref); - wakeref = xchg(&wf->wakeref, 0); + wakeref = xchg(&wf->wakeref, NULL); wake_up_var(&wf->wakeref); } @@ -107,7 +107,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf, __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); atomic_set(&wf->count, 0); - wf->wakeref = 0; + wf->wakeref = NULL; INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work); lockdep_init_map(&wf->work.work.lockdep_map, @@ -142,7 +142,7 @@ static void wakeref_auto_timeout(struct timer_list *t) if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) return; - wakeref = fetch_and_zero(&wf->wakeref); + wakeref = xchg(&wf->wakeref, NULL); spin_unlock_irqrestore(&wf->lock, flags); intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); @@ -154,7 +154,7 @@ void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, spin_lock_init(&wf->lock); timer_setup(&wf->timer, wakeref_auto_timeout, 0); refcount_set(&wf->count, 0); - wf->wakeref = 0; + wf->wakeref = NULL; wf->i915 = i915; } diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 68aa3be48251..48836ef52d40 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -21,7 +21,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> -typedef unsigned long intel_wakeref_t; +typedef struct ref_tracker *intel_wakeref_t; #define INTEL_REFTRACK_DEAD_COUNT 16 #define INTEL_REFTRACK_PRINT_LIMIT 16 @@ -273,7 +273,7 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf) */ int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); -#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1)) +#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT) static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir) { @@ -281,17 +281,19 @@ static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *di ref_tracker_alloc(dir, &user, GFP_NOWAIT); - return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF; + return user ?: INTEL_WAKEREF_DEF; } static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir, - intel_wakeref_t handle) + intel_wakeref_t wakeref) { - struct ref_tracker *user; + if (wakeref == INTEL_WAKEREF_DEF) + wakeref = NULL; - user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle; + if (WARN_ON(IS_ERR(wakeref))) + return; - ref_tracker_free(dir, &user); + ref_tracker_free(dir, &wakeref); } void intel_ref_tracker_show(struct ref_tracker_dir *dir, @@ -314,7 +316,7 @@ static inline void intel_wakeref_untrack(struct intel_wakeref *wf, static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) { - return -1; + return INTEL_WAKEREF_DEF; } static inline void intel_wakeref_untrack(struct intel_wakeref *wf, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 75278e78ca90..9cf169665d7c 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -170,7 +170,7 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9 static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915) { - if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp) + if (!HAS_PXP(i915)) return NULL; /* @@ -461,9 +461,11 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp) } int intel_pxp_key_check(struct intel_pxp *pxp, - struct drm_i915_gem_object *obj, + struct drm_gem_object *_obj, bool assign) { + struct drm_i915_gem_object *obj = to_intel_bo(_obj); + if (!intel_pxp_is_active(pxp)) return -ENODEV; @@ -529,7 +531,7 @@ void intel_pxp_invalidate(struct intel_pxp *pxp) if (ctx->pxp_wakeref) { intel_runtime_pm_put(&i915->runtime_pm, ctx->pxp_wakeref); - ctx->pxp_wakeref = 0; + ctx->pxp_wakeref = NULL; } spin_lock_irq(&i915->gem.contexts.lock); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index d9372f6f7797..4ed97db5e7c6 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -9,7 +9,7 @@ #include <linux/errno.h> #include <linux/types.h> -struct drm_i915_gem_object; +struct drm_gem_object; struct drm_i915_private; struct intel_pxp; @@ -32,7 +32,7 @@ int intel_pxp_start(struct intel_pxp *pxp); void intel_pxp_end(struct intel_pxp *pxp); int intel_pxp_key_check(struct intel_pxp *pxp, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, bool assign); void intel_pxp_invalidate(struct intel_pxp *pxp); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 91794ca17a58..ae57eb03dfca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -137,7 +137,7 @@ static const struct intel_device_info mock_info = { struct drm_i915_private *mock_gem_device(void) { -#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) +#if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU) static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; #endif struct drm_i915_private *i915; @@ -153,7 +153,7 @@ struct drm_i915_private *mock_gem_device(void) dev_set_name(&pdev->dev, "mock"); dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); -#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) +#if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU) /* HACK to disable iommu for the fake device; force identity mapping */ pdev->dev.iommu = &fake_iommu; #endif @@ -203,7 +203,7 @@ struct drm_i915_private *mock_gem_device(void) intel_root_gt_init_early(i915); mock_uncore_init(&i915->uncore, i915); atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */ - to_gt(i915)->awake = -ENODEV; + to_gt(i915)->awake = INTEL_WAKEREF_MOCK_GT; mock_gt_probe(i915); ret = intel_region_ttm_device_init(i915); diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 4aba47bccc63..9e310f4099f4 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -714,7 +714,7 @@ void intel_dram_detect(struct drm_i915_private *i915) * Assume level 0 watermark latency adjustment is needed until proven * otherwise, this w/a is not needed by bxt/glk. */ - dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915); + dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915); if (DISPLAY_VER(i915) >= 14) ret = xelpdp_get_dram_info(i915); @@ -722,7 +722,7 @@ void intel_dram_detect(struct drm_i915_private *i915) ret = gen12_get_dram_info(i915); else if (GRAPHICS_VER(i915) >= 11) ret = gen11_get_dram_info(i915); - else if (IS_GEN9_LP(i915)) + else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) ret = bxt_get_dram_info(i915); else ret = skl_get_dram_info(i915); diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c index 542eea50093c..842db43e46c0 100644 --- a/drivers/gpu/drm/i915/soc/intel_pch.c +++ b/drivers/gpu/drm/i915/soc/intel_pch.c @@ -124,7 +124,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) && !IS_ROCKETLAKE(dev_priv) && - !IS_GEN9_BC(dev_priv)); + !IS_SKYLAKE(dev_priv) && + !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv) && + !IS_COMETLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); diff --git a/drivers/gpu/drm/i915/soc/intel_rom.c b/drivers/gpu/drm/i915/soc/intel_rom.c new file mode 100644 index 000000000000..243d98cab8c3 --- /dev/null +++ b/drivers/gpu/drm/i915/soc/intel_rom.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" + +#include "intel_rom.h" +#include "intel_uncore.h" + +struct intel_rom { + /* for PCI ROM */ + struct pci_dev *pdev; + void __iomem *oprom; + + /* for SPI */ + struct intel_uncore *uncore; + loff_t offset; + + size_t size; + + u32 (*read32)(struct intel_rom *rom, loff_t offset); + u16 (*read16)(struct intel_rom *rom, loff_t offset); + void (*read_block)(struct intel_rom *rom, void *data, loff_t offset, size_t size); + void (*free)(struct intel_rom *rom); +}; + +static u32 spi_read32(struct intel_rom *rom, loff_t offset) +{ + intel_uncore_write(rom->uncore, PRIMARY_SPI_ADDRESS, + rom->offset + offset); + + return intel_uncore_read(rom->uncore, PRIMARY_SPI_TRIGGER); +} + +static u16 spi_read16(struct intel_rom *rom, loff_t offset) +{ + return spi_read32(rom, offset) & 0xffff; +} + +struct intel_rom *intel_rom_spi(struct drm_i915_private *i915) +{ + struct intel_rom *rom; + u32 static_region; + + rom = kzalloc(sizeof(*rom), GFP_KERNEL); + if (!rom) + return NULL; + + rom->uncore = &i915->uncore; + + static_region = intel_uncore_read(rom->uncore, SPI_STATIC_REGIONS); + static_region &= OPTIONROM_SPI_REGIONID_MASK; + intel_uncore_write(rom->uncore, PRIMARY_SPI_REGIONID, static_region); + + rom->offset = intel_uncore_read(rom->uncore, OROM_OFFSET) & OROM_OFFSET_MASK; + + rom->size = 0x200000; + + rom->read32 = spi_read32; + rom->read16 = spi_read16; + + return rom; +} + +static u32 pci_read32(struct intel_rom *rom, loff_t offset) +{ + return ioread32(rom->oprom + offset); +} + +static u16 pci_read16(struct intel_rom *rom, loff_t offset) +{ + return ioread16(rom->oprom + offset); +} + +static void pci_read_block(struct intel_rom *rom, void *data, + loff_t offset, size_t size) +{ + memcpy_fromio(data, rom->oprom + offset, size); +} + +static void pci_free(struct intel_rom *rom) +{ + pci_unmap_rom(rom->pdev, rom->oprom); +} + +struct intel_rom *intel_rom_pci(struct drm_i915_private *i915) +{ + struct intel_rom *rom; + + rom = kzalloc(sizeof(*rom), GFP_KERNEL); + if (!rom) + return NULL; + + rom->pdev = to_pci_dev(i915->drm.dev); + + rom->oprom = pci_map_rom(rom->pdev, &rom->size); + if (!rom->oprom) { + kfree(rom); + return NULL; + } + + rom->read32 = pci_read32; + rom->read16 = pci_read16; + rom->read_block = pci_read_block; + rom->free = pci_free; + + return rom; +} + +u32 intel_rom_read32(struct intel_rom *rom, loff_t offset) +{ + return rom->read32(rom, offset); +} + +u16 intel_rom_read16(struct intel_rom *rom, loff_t offset) +{ + return rom->read16(rom, offset); +} + +void intel_rom_read_block(struct intel_rom *rom, void *data, + loff_t offset, size_t size) +{ + u32 *ptr = data; + loff_t index; + + if (rom->read_block) { + rom->read_block(rom, data, offset, size); + return; + } + + for (index = 0; index < size; index += 4) + *ptr++ = rom->read32(rom, offset + index); +} + +loff_t intel_rom_find(struct intel_rom *rom, u32 needle) +{ + loff_t offset; + + for (offset = 0; offset < rom->size; offset += 4) { + if (rom->read32(rom, offset) == needle) + return offset; + } + + return -ENOENT; +} + +size_t intel_rom_size(struct intel_rom *rom) +{ + return rom->size; +} + +void intel_rom_free(struct intel_rom *rom) +{ + if (rom && rom->free) + rom->free(rom); + + kfree(rom); +} diff --git a/drivers/gpu/drm/i915/soc/intel_rom.h b/drivers/gpu/drm/i915/soc/intel_rom.h new file mode 100644 index 000000000000..fb2979c8ef7f --- /dev/null +++ b/drivers/gpu/drm/i915/soc/intel_rom.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef __INTEL_ROM_H__ +#define __INTEL_ROM_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_rom; + +struct intel_rom *intel_rom_spi(struct drm_i915_private *i915); +struct intel_rom *intel_rom_pci(struct drm_i915_private *i915); + +u32 intel_rom_read32(struct intel_rom *rom, loff_t offset); +u16 intel_rom_read16(struct intel_rom *rom, loff_t offset); +void intel_rom_read_block(struct intel_rom *rom, void *data, + loff_t offset, size_t size); +loff_t intel_rom_find(struct intel_rom *rom, u32 needle); +size_t intel_rom_size(struct intel_rom *rom); +void intel_rom_free(struct intel_rom *rom); + +#endif /* __INTEL_ROM_H__ */ diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c index 4deeac7ed40a..2bbdc05a3b97 100644 --- a/drivers/gpu/drm/imagination/pvr_ccb.c +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -321,7 +321,7 @@ static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev) bool reserved = false; u32 retries = 0; - while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT || + while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) || retries < RESERVE_SLOT_MIN_RETRIES) { reserved = pvr_kccb_try_reserve_slot(pvr_dev); if (reserved) diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index 4cb3494c0bb2..5edc3c01af72 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -73,24 +73,12 @@ process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream void *stream; int err; - stream = kzalloc(stream_size, GFP_KERNEL); - if (!stream) - return -ENOMEM; - - if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) { - err = -EFAULT; - goto err_free; - } + stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size); + if (IS_ERR(stream)) + return PTR_ERR(stream); err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest); - if (err) - goto err_free; - - kfree(stream); - - return 0; -err_free: kfree(stream); return err; diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index fb17196e05f4..36c0e768698e 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -221,7 +221,7 @@ err_drm_dev_exit: return ret; } -static __always_inline u64 +static __always_inline __maybe_unused u64 pvr_fw_version_packed(u32 major, u32 minor) { return ((u64)major << 32) | minor; diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 78c2f3c6dce0..618503a212a7 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -90,20 +90,13 @@ static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job, void *stream; int err; - stream = kzalloc(stream_len, GFP_KERNEL); - if (!stream) - return -ENOMEM; - - if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) { - err = -EFAULT; - goto err_free_stream; - } + stream = memdup_user(u64_to_user_ptr(stream_userptr), stream_len); + if (IS_ERR(stream)) + return PTR_ERR(stream); err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job); -err_free_stream: kfree(stream); - return err; } diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 20cb46012082..c4f08432882b 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue) } } - drm_sched_start(&queue->scheduler); + drm_sched_start(&queue->scheduler, 0); } /** @@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job) } mutex_unlock(&pvr_dev->queues.lock); - drm_sched_start(sched); + drm_sched_start(sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 7bd6ba4c6e8a..363f885a7098 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -654,9 +654,7 @@ pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle) xa_lock(&pvr_file->vm_ctx_handles); vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); - if (vm_ctx) - kref_get(&vm_ctx->ref_count); - + pvr_vm_context_get(vm_ctx); xa_unlock(&pvr_file->vm_ctx_handles); return vm_ctx; diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 59e3b6a1dff0..e014ed3ae66c 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -1,12 +1,13 @@ config DRM_IMX_DCSS tristate "i.MX8MQ DCSS" select IMX_IRQSTEER + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS - depends on DRM && ARCH_MXC && ARM64 + depends on DRM && ((ARCH_MXC && ARM64) || COMPILE_TEST) help Choose this if you have a NXP i.MX8MQ based system and want to use the Display Controller Subsystem. This option enables DCSS support. diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c index 31267c00782f..af91e45b5d13 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c +++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c @@ -206,15 +206,13 @@ int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm) if (crtc->irq < 0) return crtc->irq; - ret = request_irq(crtc->irq, dcss_crtc_irq_handler, - 0, "dcss_drm", crtc); + ret = request_irq(crtc->irq, dcss_crtc_irq_handler, IRQF_NO_AUTOEN, + "dcss_drm", crtc); if (ret) { dev_err(dcss->dev, "irq request failed with %d.\n", ret); return ret; } - disable_irq(crtc->irq); - return 0; } diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c index 2968f5d5bd41..6bbfd9aa27ac 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dtg.c +++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c @@ -134,14 +134,12 @@ static int dcss_dtg_irq_config(struct dcss_dtg *dtg, dtg->base_reg + DCSS_DTG_INT_MASK); ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler, - 0, "dcss_ctxld_kick", dtg); + IRQF_NO_AUTOEN, "dcss_ctxld_kick", dtg); if (ret) { dev_err(dtg->dev, "dtg: irq request failed.\n"); return ret; } - disable_irq(dtg->ctxld_kick_irq); - dtg->ctxld_kick_irq_en = false; return 0; diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index d0ea4e97cded..63a335c62296 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -5,7 +5,9 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -28,6 +30,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { static const struct drm_driver dcss_kms_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &dcss_cma_fops, .name = "imx-dcss", .desc = "i.MX8MQ Display Subsystem", @@ -145,7 +148,7 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss) if (ret) goto cleanup_crtc; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return kms; diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c index 825728c356ff..32c3f46b21da 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c +++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c @@ -136,7 +136,7 @@ static int div_q(int A, int B) else temp -= B / 2; - result = (int)(temp / B); + result = div_s64(temp, B); return result; } @@ -239,7 +239,7 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, ll_temp = coef[phase][i]; ll_temp <<= PSC_COEFF_PRECISION; ll_temp += sum >> 1; - ll_temp /= sum; + ll_temp = div_s64(ll_temp, sum); coef[phase][i] = (int)ll_temp; } } diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig index bacf0655ebaf..acaf25089001 100644 --- a/drivers/gpu/drm/imx/ipuv3/Kconfig +++ b/drivers/gpu/drm/imx/ipuv3/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_IMX tristate "DRM Support for Freescale i.MX" + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select VIDEOMODE_HELPERS select DRM_GEM_DMA_HELPER @@ -11,8 +12,11 @@ config DRM_IMX config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" - select DRM_PANEL depends on DRM_IMX + select DRM_BRIDGE + select DRM_BRIDGE_CONNECTOR + select DRM_IMX_LEGACY_BRIDGE + select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS config DRM_IMX_TVE @@ -26,9 +30,13 @@ config DRM_IMX_TVE config DRM_IMX_LDB tristate "Support for LVDS displays" - depends on DRM_IMX && MFD_SYSCON + depends on DRM_IMX depends on COMMON_CLK - select DRM_PANEL + select MFD_SYSCON + select DRM_BRIDGE + select DRM_BRIDGE_CONNECTOR + select DRM_PANEL_BRIDGE + select DRM_IMX_LEGACY_BRIDGE help Choose this to enable the internal LVDS Display Bridge (LDB) found on i.MX53 and i.MX6 processors. diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c index 4cfabcf7375a..ced06bd8eae8 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -34,13 +35,6 @@ module_param(legacyfb_depth, int, 0444); DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops); -void imx_drm_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} -EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); - static int imx_drm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -163,6 +157,7 @@ static int imx_drm_dumb_create(struct drm_file *file_priv, static const struct drm_driver imx_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .ioctls = imx_drm_ioctls, .num_ioctls = ARRAY_SIZE(imx_drm_ioctls), .fops = &imx_drm_driver_fops, @@ -249,7 +244,7 @@ static int imx_drm_bind(struct device *dev) if (ret) goto err_poll_fini; - drm_fbdev_dma_setup(drm, legacyfb_depth); + drm_client_setup_with_color_mode(drm, legacyfb_depth); return 0; diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h index e721bebda2bd..0c85bf83ffbf 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm.h +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h @@ -3,14 +3,9 @@ #define _IMX_DRM_H_ struct device_node; -struct drm_crtc; struct drm_connector; struct drm_device; -struct drm_display_mode; struct drm_encoder; -struct drm_framebuffer; -struct drm_plane; -struct platform_device; struct imx_crtc_state { struct drm_crtc_state base; @@ -24,21 +19,12 @@ static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s) { return container_of(s, struct imx_crtc_state, base); } -int imx_drm_init_drm(struct platform_device *pdev, - int preferred_bpp); -int imx_drm_exit_drm(void); extern struct platform_driver ipu_drm_driver; -void imx_drm_mode_config_init(struct drm_device *drm); - -struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); - int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np); -void imx_drm_connector_destroy(struct drm_connector *connector); - int ipu_planes_assign_pre(struct drm_device *dev, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 793dfb1a3ed0..ff74018ac5cd 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -19,19 +19,16 @@ #include <linux/regmap.h> #include <linux/videodev2.h> -#include <video/of_display_timing.h> -#include <video/of_videomode.h> - #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_edid.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> -#include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/bridge/imx.h> #include "imx-drm.h" @@ -55,7 +52,6 @@ struct imx_ldb_channel; struct imx_ldb_encoder { - struct drm_connector connector; struct drm_encoder encoder; struct imx_ldb_channel *channel; }; @@ -65,25 +61,13 @@ struct imx_ldb; struct imx_ldb_channel { struct imx_ldb *ldb; - /* Defines what is connected to the ldb, only one at a time */ - struct drm_panel *panel; struct drm_bridge *bridge; struct device_node *child; - struct i2c_adapter *ddc; int chno; - const struct drm_edid *drm_edid; - struct drm_display_mode mode; - int mode_valid; u32 bus_format; - u32 bus_flags; }; -static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) -{ - return container_of(c, struct imx_ldb_encoder, connector)->channel; -} - static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e) { return container_of(e, struct imx_ldb_encoder, encoder)->channel; @@ -133,38 +117,6 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch, } } -static int imx_ldb_connector_get_modes(struct drm_connector *connector) -{ - struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); - int num_modes; - - num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector); - if (num_modes > 0) - return num_modes; - - if (!imx_ldb_ch->drm_edid && imx_ldb_ch->ddc) { - imx_ldb_ch->drm_edid = drm_edid_read_ddc(connector, - imx_ldb_ch->ddc); - drm_edid_connector_update(connector, imx_ldb_ch->drm_edid); - } - - if (imx_ldb_ch->drm_edid) - num_modes = drm_edid_connector_add_modes(connector); - - if (imx_ldb_ch->mode_valid) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &imx_ldb_ch->mode); - if (!mode) - return -EINVAL; - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - num_modes++; - } - - return num_modes; -} - static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, unsigned long serial_clk, unsigned long di_clk) { @@ -205,8 +157,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) return; } - drm_panel_prepare(imx_ldb_ch->panel); - if (dual) { clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]); clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]); @@ -245,8 +195,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) } regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); - - drm_panel_enable(imx_ldb_ch->panel); } static void @@ -323,8 +271,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; - drm_panel_disable(imx_ldb_ch->panel); - if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; if (imx_ldb_ch == &ldb->channel[1] || dual) @@ -358,8 +304,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) dev_err(ldb->dev, "unable to set di%d parent clock to original parent\n", mux); - - drm_panel_unprepare(imx_ldb_ch->panel); } static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, @@ -374,11 +318,12 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, /* Bus format description in DT overrides connector display info. */ if (!bus_format && di->num_bus_formats) { bus_format = di->bus_formats[0]; - imx_crtc_state->bus_flags = di->bus_flags; } else { bus_format = imx_ldb_ch->bus_format; - imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags; } + + imx_crtc_state->bus_flags = di->bus_flags; + switch (bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18; @@ -398,18 +343,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, } -static const struct drm_connector_funcs imx_ldb_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { - .get_modes = imx_ldb_connector_get_modes, -}; - static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, .enable = imx_ldb_encoder_enable, @@ -447,7 +380,6 @@ static int imx_ldb_register(struct drm_device *drm, return PTR_ERR(ldb_encoder); ldb_encoder->channel = imx_ldb_ch; - connector = &ldb_encoder->connector; encoder = &ldb_encoder->encoder; ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child); @@ -466,25 +398,16 @@ static int imx_ldb_register(struct drm_device *drm, drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); - if (imx_ldb_ch->bridge) { - ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0); - if (ret) - return ret; - } else { - /* - * We want to add the connector whenever there is no bridge - * that brings its own, not only when there is a panel. For - * historical reasons, the ldb driver can also work without - * a panel. - */ - drm_connector_helper_add(connector, - &imx_ldb_connector_helper_funcs); - drm_connector_init_with_ddc(drm, connector, - &imx_ldb_connector_funcs, - DRM_MODE_CONNECTOR_LVDS, - imx_ldb_ch->ddc); - drm_connector_attach_encoder(connector, encoder); - } + ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) + return PTR_ERR(connector); + + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -549,47 +472,6 @@ static const struct of_device_id imx_ldb_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids); -static int imx_ldb_panel_ddc(struct device *dev, - struct imx_ldb_channel *channel, struct device_node *child) -{ - struct device_node *ddc_node; - int ret; - - ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); - if (ddc_node) { - channel->ddc = of_find_i2c_adapter_by_node(ddc_node); - of_node_put(ddc_node); - if (!channel->ddc) { - dev_warn(dev, "failed to get ddc i2c adapter\n"); - return -EPROBE_DEFER; - } - } - - if (!channel->ddc) { - const void *edidp; - int edid_len; - - /* if no DDC available, fallback to hardcoded EDID */ - dev_dbg(dev, "no ddc available\n"); - - edidp = of_get_property(child, "edid", &edid_len); - if (edidp) { - channel->drm_edid = drm_edid_alloc(edidp, edid_len); - if (!channel->drm_edid) - return -ENOMEM; - } else if (!channel->panel) { - /* fallback to display-timings node */ - ret = of_get_drm_display_mode(child, - &channel->mode, - &channel->bus_flags, - OF_USE_NATIVE_MODE); - if (!ret) - channel->mode_valid = 1; - } - } - return 0; -} - static int imx_ldb_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; @@ -694,29 +576,22 @@ static int imx_ldb_probe(struct platform_device *pdev) * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ - ret = drm_of_find_panel_or_bridge(child, - imx_ldb->lvds_mux ? 4 : 2, 0, - &channel->panel, &channel->bridge); - if (ret && ret != -ENODEV) - goto free_child; - - /* panel ddc only if there is no bridge */ - if (!channel->bridge) { - ret = imx_ldb_panel_ddc(dev, channel, child); - if (ret) + channel->bridge = devm_drm_of_get_bridge(dev, child, + imx_ldb->lvds_mux ? 4 : 2, 0); + if (IS_ERR(channel->bridge)) { + ret = PTR_ERR(channel->bridge); + if (ret != -ENODEV) goto free_child; + channel->bridge = NULL; } bus_format = of_get_bus_format(dev, child); - if (bus_format == -EINVAL) { - /* - * If no bus format was specified in the device tree, - * we can still get it from the connected panel later. - */ - if (channel->panel && channel->panel->funcs && - channel->panel->funcs->get_modes) - bus_format = 0; - } + /* + * If no bus format was specified in the device tree, + * we can still get it from the connected panel later. + */ + if (bus_format == -EINVAL && channel->bridge) + bus_format = 0; if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); @@ -724,6 +599,20 @@ static int imx_ldb_probe(struct platform_device *pdev) goto free_child; } channel->bus_format = bus_format; + + /* + * legacy bridge doesn't handle bus_format, so create it after + * checking the bus_format property. + */ + if (!channel->bridge) { + channel->bridge = devm_imx_drm_legacy_bridge(dev, child, + DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(channel->bridge)) { + ret = PTR_ERR(channel->bridge); + goto free_child; + } + } + channel->child = child; } @@ -738,16 +627,6 @@ free_child: static void imx_ldb_remove(struct platform_device *pdev) { - struct imx_ldb *imx_ldb = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < 2; i++) { - struct imx_ldb_channel *channel = &imx_ldb->channel[i]; - - drm_edid_free(channel->drm_edid); - i2c_put_adapter(channel->ddc); - } - component_del(&pdev->dev, &imx_ldb_ops); } diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index 29f494bfff67..d46d07d25f51 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -305,9 +305,15 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder, return 0; } +static void imx_tve_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + static const struct drm_connector_funcs imx_tve_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, + .destroy = imx_tve_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index ef29c9a61a46..99db53e167bd 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -410,14 +410,12 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data) } ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); - ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, - "imx_drm", ipu_crtc); + ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, + IRQF_NO_AUTOEN, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); return ret; } - /* Only enable IRQ when we actually need it to trigger work. */ - disable_irq(ipu_crtc->irq); return 0; } diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 91d7808a2d8d..70f62e89622e 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -12,21 +12,18 @@ #include <linux/platform_device.h> #include <linux/videodev2.h> -#include <video/of_display_timing.h> - #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_edid.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> -#include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/bridge/imx.h> #include "imx-drm.h" struct imx_parallel_display_encoder { - struct drm_connector connector; struct drm_encoder encoder; struct drm_bridge bridge; struct imx_parallel_display *pd; @@ -34,79 +31,15 @@ struct imx_parallel_display_encoder { struct imx_parallel_display { struct device *dev; - const struct drm_edid *drm_edid; u32 bus_format; - u32 bus_flags; - struct drm_display_mode mode; - struct drm_panel *panel; struct drm_bridge *next_bridge; }; -static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) -{ - return container_of(c, struct imx_parallel_display_encoder, connector)->pd; -} - static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; } -static int imx_pd_connector_get_modes(struct drm_connector *connector) -{ - struct imx_parallel_display *imxpd = con_to_imxpd(connector); - struct device_node *np = imxpd->dev->of_node; - int num_modes; - - num_modes = drm_panel_get_modes(imxpd->panel, connector); - if (num_modes > 0) - return num_modes; - - if (imxpd->drm_edid) { - drm_edid_connector_update(connector, imxpd->drm_edid); - num_modes = drm_edid_connector_add_modes(connector); - } - - if (np) { - struct drm_display_mode *mode = drm_mode_create(connector->dev); - int ret; - - if (!mode) - return 0; - - ret = of_get_drm_display_mode(np, &imxpd->mode, - &imxpd->bus_flags, - OF_USE_NATIVE_MODE); - if (ret) { - drm_mode_destroy(connector->dev, mode); - return 0; - } - - drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - num_modes++; - } - - return num_modes; -} - -static void imx_pd_bridge_enable(struct drm_bridge *bridge) -{ - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - - drm_panel_prepare(imxpd->panel); - drm_panel_enable(imxpd->panel); -} - -static void imx_pd_bridge_disable(struct drm_bridge *bridge) -{ - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - - drm_panel_disable(imxpd->panel); - drm_panel_unprepare(imxpd->panel); -} - static const u32 imx_pd_bus_fmts[] = { MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_BGR888_1X24, @@ -200,7 +133,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); struct drm_bridge_state *next_bridge_state = NULL; struct drm_bridge *next_bridge; u32 bus_flags, bus_fmt; @@ -212,10 +144,8 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (next_bridge_state) bus_flags = next_bridge_state->input_bus_cfg.flags; - else if (di->num_bus_formats) - bus_flags = di->bus_flags; else - bus_flags = imxpd->bus_flags; + bus_flags = di->bus_flags; bus_fmt = bridge_state->input_bus_cfg.format; if (!imx_pd_format_supported(bus_fmt)) @@ -231,21 +161,16 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static const struct drm_connector_funcs imx_pd_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; +static int imx_pd_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); -static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { - .get_modes = imx_pd_connector_get_modes, -}; + return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags); +} static const struct drm_bridge_funcs imx_pd_bridge_funcs = { - .enable = imx_pd_bridge_enable, - .disable = imx_pd_bridge_disable, + .attach = imx_pd_bridge_attach, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -270,7 +195,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(imxpd_encoder); imxpd_encoder->pd = imxpd; - connector = &imxpd_encoder->connector; encoder = &imxpd_encoder->encoder; bridge = &imxpd_encoder->bridge; @@ -278,28 +202,14 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - /* set the connector's dpms to OFF so that - * drm_helper_connector_dpms() won't return - * immediately since the current state is ON - * at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - bridge->funcs = &imx_pd_bridge_funcs; - drm_bridge_attach(encoder, bridge, NULL, 0); - - if (imxpd->next_bridge) { - ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0); - if (ret < 0) - return ret; - } else { - drm_connector_helper_add(connector, - &imx_pd_connector_helper_funcs); - drm_connector_init(drm, connector, &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_DPI); - - drm_connector_attach_encoder(connector, encoder); - } + drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) + return PTR_ERR(connector); + + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -312,9 +222,7 @@ static int imx_pd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const u8 *edidp; struct imx_parallel_display *imxpd; - int edid_len; int ret; u32 bus_format = 0; const char *fmt; @@ -324,14 +232,13 @@ static int imx_pd_probe(struct platform_device *pdev) return -ENOMEM; /* port@1 is the output port */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, - &imxpd->next_bridge); - if (ret && ret != -ENODEV) + imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); + if (imxpd->next_bridge == ERR_PTR(-ENODEV)) + imxpd->next_bridge = devm_imx_drm_legacy_bridge(dev, np, DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(imxpd->next_bridge)) { + ret = PTR_ERR(imxpd->next_bridge); return ret; - - edidp = of_get_property(np, "edid", &edid_len); - if (edidp) - imxpd->drm_edid = drm_edid_alloc(edidp, edid_len); + } ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { @@ -355,11 +262,7 @@ static int imx_pd_probe(struct platform_device *pdev) static void imx_pd_remove(struct platform_device *pdev) { - struct imx_parallel_display *imxpd = platform_get_drvdata(pdev); - component_del(&pdev->dev, &imx_pd_ops); - - drm_edid_free(imxpd->drm_edid); } static const struct of_device_id imx_pd_dt_ids[] = { diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig index 9c28bb0f4662..75869489b0e6 100644 --- a/drivers/gpu/drm/imx/lcdc/Kconfig +++ b/drivers/gpu/drm/imx/lcdc/Kconfig @@ -1,6 +1,7 @@ config DRM_IMX_LCDC tristate "Freescale i.MX LCDC displays" depends on DRM && (ARCH_MXC || COMPILE_TEST) + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 36668455aee8..3215c4acd675 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -3,6 +3,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -348,6 +349,7 @@ static struct drm_driver imx_lcdc_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &imx_lcdc_drm_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "imx-lcdc", .desc = "i.MX LCDC driver", .date = "20200716", @@ -501,7 +503,7 @@ static int imx_lcdc_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Cannot register device\n"); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig index 8cd7b750dffe..04ecfb0c5dd6 100644 --- a/drivers/gpu/drm/ingenic/Kconfig +++ b/drivers/gpu/drm/ingenic/Kconfig @@ -6,6 +6,7 @@ config DRM_INGENIC depends on OF depends on COMMON_CLK select DRM_BRIDGE + select DRM_CLIENT_SELECTION select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 39fa291f43dd..056b70b63554 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -24,6 +24,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> #include <drm/drm_damage_helper.h> @@ -960,6 +961,7 @@ static const struct drm_driver ingenic_drm_driver_data = { .fops = &ingenic_drm_fops, .gem_create_object = ingenic_drm_gem_create_object, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, }; static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = { @@ -1399,7 +1401,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) goto err_clk_notifier_unregister; } - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig index e5ae3ec52392..7a2aa892a957 100644 --- a/drivers/gpu/drm/kmb/Kconfig +++ b/drivers/gpu/drm/kmb/Kconfig @@ -2,6 +2,7 @@ config DRM_KMB_DISPLAY tristate "Intel Keembay Display" depends on DRM depends on ARCH_KEEMBAY || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 169b83987ce2..0274ab9caa85 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -14,6 +14,7 @@ #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -441,6 +442,7 @@ static const struct drm_driver kmb_driver = { /* GEM Operations */ .fops = &fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "kmb-drm", .desc = "KEEMBAY DISPLAY DRIVER", .date = DRIVER_DATE, @@ -561,7 +563,7 @@ static int kmb_probe(struct platform_device *pdev) if (ret) goto err_register; - drm_fbdev_dma_setup(&kmb->drm, 0); + drm_client_setup(&kmb->drm, NULL); return 0; diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c index cf7cf0b07541..faf38ca9e44c 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.c +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -818,7 +818,7 @@ static void test_mode_send(struct kmb_dsi *kmb_dsi, u32 dphy_no, } } -static inline void +static inline __maybe_unused void set_test_mode_src_osc_freq_target_low_bits(struct kmb_dsi *kmb_dsi, u32 dphy_no, u32 freq) @@ -830,7 +830,7 @@ static inline void (freq & 0x7f)); } -static inline void +static inline __maybe_unused void set_test_mode_src_osc_freq_target_hi_bits(struct kmb_dsi *kmb_dsi, u32 dphy_no, u32 freq) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 1a944edb6ddc..b40c90e97d7e 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job lima_pm_idle(ldev); drm_sched_resubmit_jobs(&pipe->base); - drm_sched_start(&pipe->base); + drm_sched_start(&pipe->base, 0); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig index 1df22a852a23..579a358ed5cf 100644 --- a/drivers/gpu/drm/logicvc/Kconfig +++ b/drivers/gpu/drm/logicvc/Kconfig @@ -2,6 +2,7 @@ config DRM_LOGICVC tristate "LogiCVC DRM" depends on DRM depends on OF || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_KMS_DMA_HELPER select DRM_GEM_DMA_HELPER diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c index 01a37e28c080..e4d90701b29d 100644 --- a/drivers/gpu/drm/logicvc/logicvc_drm.c +++ b/drivers/gpu/drm/logicvc/logicvc_drm.c @@ -16,8 +16,10 @@ #include <linux/types.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_print.h> @@ -55,6 +57,7 @@ static struct drm_driver logicvc_drm_driver = { .minor = 0, DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, }; static struct regmap_config logicvc_drm_regmap_config = { @@ -301,7 +304,6 @@ static int logicvc_drm_probe(struct platform_device *pdev) struct regmap *regmap = NULL; struct resource res; void __iomem *base; - unsigned int preferred_bpp; int irq; int ret; @@ -439,17 +441,7 @@ static int logicvc_drm_probe(struct platform_device *pdev) goto error_mode; } - switch (drm_dev->mode_config.preferred_depth) { - case 16: - preferred_bpp = 16; - break; - case 24: - case 32: - default: - preferred_bpp = 32; - break; - } - drm_fbdev_dma_setup(drm_dev, preferred_bpp); + drm_client_setup(drm_dev, NULL); return 0; diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig index 9ed463a76ae2..552edfec7afb 100644 --- a/drivers/gpu/drm/loongson/Kconfig +++ b/drivers/gpu/drm/loongson/Kconfig @@ -4,6 +4,7 @@ config DRM_LOONGSON tristate "DRM support for Loongson Graphics" depends on DRM && PCI && MMU depends on LOONGARCH || MIPS || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_TTM select DRM_TTM_HELPER diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c index adc7344d2f80..b350bdcf1645 100644 --- a/drivers/gpu/drm/loongson/lsdc_drv.c +++ b/drivers/gpu/drm/loongson/lsdc_drv.c @@ -3,12 +3,13 @@ * Copyright (C) 2023 Loongson Technology Corporation Limited */ +#include <linux/aperture.h> #include <linux/pci.h> #include <linux/vgaarb.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -47,6 +48,7 @@ static const struct drm_driver lsdc_drm_driver = { .dumb_create = lsdc_dumb_create, .dumb_map_offset = lsdc_dumb_map_offset, .gem_prime_import_sg_table = lsdc_prime_import_sg_table, + DRM_FBDEV_TTM_DRIVER_OPS, }; static const struct drm_mode_config_funcs lsdc_mode_config_funcs = { @@ -213,9 +215,9 @@ lsdc_create_device(struct pci_dev *pdev, return ERR_PTR(ret); } - ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base, - ldev->vram_size, - driver); + ret = aperture_remove_conflicting_devices(ldev->vram_base, + ldev->vram_size, + driver->name); if (ret) { drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret); return ERR_PTR(ret); @@ -314,7 +316,7 @@ static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - drm_fbdev_ttm_setup(ddev, 32); + drm_client_setup(ddev, NULL); return 0; } diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig index 907460b69d4f..3516c8d2a5d9 100644 --- a/drivers/gpu/drm/mcde/Kconfig +++ b/drivers/gpu/drm/mcde/Kconfig @@ -6,6 +6,7 @@ config DRM_MCDE depends on OF depends on COMMON_CLK select MFD_SYSCON + select DRM_CLIENT_SELECTION select DRM_MIPI_DSI select DRM_BRIDGE select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 10c06440c7e7..f60bdd7b6c13 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -67,6 +67,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fbdev_dma.h> @@ -212,6 +213,7 @@ static const struct drm_driver mcde_drm_driver = { .minor = 0, .patchlevel = 0, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, }; static int mcde_drm_bind(struct device *dev) @@ -237,7 +239,7 @@ static int mcde_drm_bind(struct device *dev) if (ret < 0) goto unbind; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; @@ -473,6 +475,7 @@ static const struct of_device_id mcde_of_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, mcde_of_match); static struct platform_driver mcde_driver = { .driver = { diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 417ac8c9af41..f496e6cfdfe0 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -2,11 +2,12 @@ config DRM_MEDIATEK tristate "DRM Support for Mediatek SoCs" depends on DRM - depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) + depends on ARCH_MEDIATEK || COMPILE_TEST depends on COMMON_CLK - depends on HAVE_ARM_SMCCC + depends on HAVE_ARM_SMCCC || COMPILE_TEST depends on OF depends on MTK_MMSYS + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 04154db9085c..04217a36939c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -109,6 +109,7 @@ size_t mtk_ovl_get_num_formats(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node); void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next); void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index bf2546c4681a..187855d83590 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -497,6 +497,41 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } +static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node, + enum mtk_ovl_adaptor_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ovl_adaptor_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node) +{ + enum mtk_ovl_adaptor_comp_type type; + int ret; + + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) + return false; + + if (type >= OVL_ADAPTOR_TYPE_NUM) + return false; + + /* + * In the context of mediatek-drm, ETHDR, MDP_RDMA and Padding are + * used exclusively by OVL Adaptor: if this component is not one of + * those, it's likely not an OVL Adaptor path. + */ + return type == OVL_ADAPTOR_TYPE_ETHDR || + type == OVL_ADAPTOR_TYPE_MDP_RDMA || + type == OVL_ADAPTOR_TYPE_PADDING; +} + static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match) { struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); @@ -506,12 +541,11 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma parent = dev->parent->parent->of_node->parent; for_each_child_of_node_scoped(parent, node) { - const struct of_device_id *of_id; enum mtk_ovl_adaptor_comp_type type; - int id; + int id, ret; - of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); - if (!of_id) + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) continue; if (!of_device_is_available(node)) { @@ -520,7 +554,6 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma continue; } - type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data; id = ovl_adaptor_comp_get_id(dev, node, type); if (id < 0) { dev_warn(dev, "Skipping unknown component %pOF\n", diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index f2bee617f063..1cc916b16471 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -394,7 +394,7 @@ static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = { }, }; -static struct regmap_config mtk_dp_regmap_config = { +static const struct regmap_config mtk_dp_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index a08d20654954..20a9d589fd75 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -704,6 +704,20 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); + int ret; + + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); + if (IS_ERR(dpi->next_bridge)) { + ret = PTR_ERR(dpi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); + if (IS_ERR(dpi->next_bridge)) + return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), + "Failed to get bridge\n"); + } return drm_bridge_attach(bridge->encoder, dpi->next_bridge, &dpi->bridge, flags); @@ -1058,13 +1072,6 @@ static int mtk_dpi_probe(struct platform_device *pdev) if (dpi->irq < 0) return dpi->irq; - dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); - if (IS_ERR(dpi->next_bridge)) - return dev_err_probe(dev, PTR_ERR(dpi->next_bridge), - "Failed to get bridge\n"); - - dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node); - platform_set_drvdata(pdev, dpi); dpi->bridge.funcs = &mtk_dpi_bridge_funcs; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 3e807195a0d0..9a8ef8558da9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> @@ -26,6 +27,7 @@ #include "mtk_crtc.h" #include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" #include "mtk_drm_drv.h" #include "mtk_gem.h" @@ -371,12 +373,11 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) struct mtk_drm_private *temp_drm_priv; struct device_node *phandle = dev->parent->of_node; const struct of_device_id *of_id; - struct device_node *node; struct device *drm_dev; unsigned int cnt = 0; int i, j; - for_each_child_of_node(phandle->parent, node) { + for_each_child_of_node_scoped(phandle->parent, node) { struct platform_device *pdev; of_id = of_match_node(mtk_drm_of_ids, node); @@ -606,6 +607,7 @@ static const struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = mtk_gem_dumb_create, + DRM_FBDEV_DMA_DRIVER_OPS, .gem_prime_import = mtk_gem_prime_import, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, @@ -662,7 +664,7 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; @@ -818,12 +820,235 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { { } }; +static int mtk_drm_of_get_ddp_comp_type(struct device_node *node, enum mtk_ddp_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ddp_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ddp_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +static int mtk_drm_of_get_ddp_ep_cid(struct device_node *node, + int output_port, enum mtk_crtc_path crtc_path, + struct device_node **next, unsigned int *cid) +{ + struct device_node *ep_dev_node, *ep_out; + enum mtk_ddp_comp_type comp_type; + int ret; + + ep_out = of_graph_get_endpoint_by_regs(node, output_port, crtc_path); + if (!ep_out) + return -ENOENT; + + ep_dev_node = of_graph_get_remote_port_parent(ep_out); + of_node_put(ep_out); + if (!ep_dev_node) + return -EINVAL; + + /* + * Pass the next node pointer regardless of failures in the later code + * so that if this function is called in a loop it will walk through all + * of the subsequent endpoints anyway. + */ + *next = ep_dev_node; + + if (!of_device_is_available(ep_dev_node)) + return -ENODEV; + + ret = mtk_drm_of_get_ddp_comp_type(ep_dev_node, &comp_type); + if (ret) { + if (mtk_ovl_adaptor_is_comp_present(ep_dev_node)) { + *cid = (unsigned int)DDP_COMPONENT_DRM_OVL_ADAPTOR; + return 0; + } + return ret; + } + + ret = mtk_ddp_comp_get_id(ep_dev_node, comp_type); + if (ret < 0) + return ret; + + /* All ok! Pass the Component ID to the caller. */ + *cid = (unsigned int)ret; + + return 0; +} + +/** + * mtk_drm_of_ddp_path_build_one - Build a Display HW Pipeline for a CRTC Path + * @dev: The mediatek-drm device + * @cpath: CRTC Path relative to a VDO or MMSYS + * @out_path: Pointer to an array that will contain the new pipeline + * @out_path_len: Number of entries in the pipeline array + * + * MediaTek SoCs can use different DDP hardware pipelines (or paths) depending + * on the board-specific desired display configuration; this function walks + * through all of the output endpoints starting from a VDO or MMSYS hardware + * instance and builds the right pipeline as specified in device trees. + * + * Return: + * * %0 - Display HW Pipeline successfully built and validated + * * %-ENOENT - Display pipeline was not specified in device tree + * * %-EINVAL - Display pipeline built but validation failed + * * %-ENOMEM - Failure to allocate pipeline array to pass to the caller + */ +static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path cpath, + const unsigned int **out_path, + unsigned int *out_path_len) +{ + struct device_node *next, *prev, *vdo = dev->parent->of_node; + unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 }; + unsigned int *final_ddp_path; + unsigned short int idx = 0; + bool ovl_adaptor_comp_added = false; + int ret; + + /* Get the first entry for the temp_path array */ + ret = mtk_drm_of_get_ddp_ep_cid(vdo, 0, cpath, &next, &temp_path[idx]); + if (ret) { + if (next && temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + dev_dbg(dev, "Adding OVL Adaptor for %pOF\n", next); + ovl_adaptor_comp_added = true; + } else { + if (next) + dev_err(dev, "Invalid component %pOF\n", next); + else + dev_err(dev, "Cannot find first endpoint for path %d\n", cpath); + + return ret; + } + } + idx++; + + /* + * Walk through port outputs until we reach the last valid mediatek-drm component. + * To be valid, this must end with an "invalid" component that is a display node. + */ + do { + prev = next; + ret = mtk_drm_of_get_ddp_ep_cid(next, 1, cpath, &next, &temp_path[idx]); + of_node_put(prev); + if (ret) { + of_node_put(next); + break; + } + + /* + * If this is an OVL adaptor exclusive component and one of those + * was already added, don't add another instance of the generic + * DDP_COMPONENT_OVL_ADAPTOR, as this is used only to decide whether + * to probe that component master driver of which only one instance + * is needed and possible. + */ + if (temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + if (!ovl_adaptor_comp_added) + ovl_adaptor_comp_added = true; + else + idx--; + } + } while (++idx < DDP_COMPONENT_DRM_ID_MAX); + + /* + * The device component might not be enabled: in that case, don't + * check the last entry and just report that the device is missing. + */ + if (ret == -ENODEV) + return ret; + + /* If the last entry is not a final display output, the configuration is wrong */ + switch (temp_path[idx - 1]) { + case DDP_COMPONENT_DP_INTF0: + case DDP_COMPONENT_DP_INTF1: + case DDP_COMPONENT_DPI0: + case DDP_COMPONENT_DPI1: + case DDP_COMPONENT_DSI0: + case DDP_COMPONENT_DSI1: + case DDP_COMPONENT_DSI2: + case DDP_COMPONENT_DSI3: + break; + default: + dev_err(dev, "Invalid display hw pipeline. Last component: %d (ret=%d)\n", + temp_path[idx - 1], ret); + return -EINVAL; + } + + final_ddp_path = devm_kmemdup(dev, temp_path, idx * sizeof(temp_path[0]), GFP_KERNEL); + if (!final_ddp_path) + return -ENOMEM; + + dev_dbg(dev, "Display HW Pipeline built with %d components.\n", idx); + + /* Pipeline built! */ + *out_path = final_ddp_path; + *out_path_len = idx; + + return 0; +} + +static int mtk_drm_of_ddp_path_build(struct device *dev, struct device_node *node, + struct mtk_mmsys_driver_data *data) +{ + struct device_node *ep_node; + struct of_endpoint of_ep; + bool output_present[MAX_CRTC] = { false }; + int ret; + + for_each_endpoint_of_node(node, ep_node) { + ret = of_graph_parse_endpoint(ep_node, &of_ep); + if (ret) { + dev_err_probe(dev, ret, "Cannot parse endpoint\n"); + break; + } + + if (of_ep.id >= MAX_CRTC) { + ret = dev_err_probe(dev, -EINVAL, + "Invalid endpoint%u number\n", of_ep.port); + break; + } + + output_present[of_ep.id] = true; + } + + if (ret) { + of_node_put(ep_node); + return ret; + } + + if (output_present[CRTC_MAIN]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_MAIN, + &data->main_path, &data->main_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_EXT]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_EXT, + &data->ext_path, &data->ext_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_THIRD]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_THIRD, + &data->third_path, &data->third_len); + if (ret && ret != -ENODEV) + return ret; + } + + return 0; +} + static int mtk_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *phandle = dev->parent->of_node; const struct of_device_id *of_id; struct mtk_drm_private *private; + struct mtk_mmsys_driver_data *mtk_drm_data; struct device_node *node; struct component_match *match = NULL; struct platform_device *ovl_adaptor; @@ -844,7 +1069,27 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; - private->data = of_id->data; + mtk_drm_data = (struct mtk_mmsys_driver_data *)of_id->data; + if (!mtk_drm_data) + return -EINVAL; + + /* Try to build the display pipeline from devicetree graphs */ + if (of_graph_is_present(phandle)) { + dev_dbg(dev, "Building display pipeline for MMSYS %u\n", + mtk_drm_data->mmsys_id); + private->data = devm_kmemdup(dev, mtk_drm_data, + sizeof(*mtk_drm_data), GFP_KERNEL); + if (!private->data) + return -ENOMEM; + + ret = mtk_drm_of_ddp_path_build(dev, phandle, private->data); + if (ret) + return ret; + } else { + /* No devicetree graphs support: go with hardcoded paths if present */ + dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id); + private->data = mtk_drm_data; + }; private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num, sizeof(*private->all_drm_private), @@ -866,12 +1111,11 @@ static int mtk_drm_probe(struct platform_device *pdev) /* Iterate over sibling DISP function blocks */ for_each_child_of_node(phandle->parent, node) { - const struct of_device_id *of_id; enum mtk_ddp_comp_type comp_type; int comp_id; - of_id = of_match_node(mtk_ddp_comp_dt_ids, node); - if (!of_id) + ret = mtk_drm_of_get_ddp_comp_type(node, &comp_type); + if (ret) continue; if (!of_device_is_available(node)) { @@ -880,8 +1124,6 @@ static int mtk_drm_probe(struct platform_device *pdev) continue; } - comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data; - if (comp_type == MTK_DISP_MUTEX) { int id; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index ce897984de51..675cdc90a440 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -63,7 +63,7 @@ struct mtk_drm_private { struct device *mmsys_dev; struct device_node *comp_node[DDP_COMPONENT_DRM_ID_MAX]; struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_DRM_ID_MAX]; - const struct mtk_mmsys_driver_data *data; + struct mtk_mmsys_driver_data *data; struct drm_atomic_state *suspend_state; unsigned int mbox_index; struct mtk_drm_private **all_drm_private; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index eeec641cab60..33ceeb8d6925 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -988,9 +988,17 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host, dsi->lanes = device->lanes; dsi->format = device->format; dsi->mode_flags = device->mode_flags; - dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); - if (IS_ERR(dsi->next_bridge)) - return PTR_ERR(dsi->next_bridge); + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); + if (IS_ERR(dsi->next_bridge)) + return PTR_ERR(dsi->next_bridge); + } drm_bridge_add(&dsi->bridge); diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig index 2544756538cc..417f79829cf8 100644 --- a/drivers/gpu/drm/meson/Kconfig +++ b/drivers/gpu/drm/meson/Kconfig @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_MESON tristate "DRM Support for Amlogic Meson Display Controller" - depends on DRM && OF && (ARM || ARM64) + depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on ARCH_MESON || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 4bd0baa2a4f5..7cace75a38af 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -8,6 +8,7 @@ * Jasper St. Pierre <jstpierre@mecheye.net> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/module.h> #include <linux/of_graph.h> @@ -15,8 +16,8 @@ #include <linux/platform_device.h> #include <linux/soc/amlogic/meson-canvas.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -98,6 +99,7 @@ static const struct drm_driver meson_driver = { /* DMA Ops */ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, /* Misc */ .fops = &fops, @@ -126,7 +128,7 @@ static bool meson_vpu_has_available_connectors(struct device *dev) return false; } -static struct regmap_config meson_regmap_config = { +static const struct regmap_config meson_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -277,7 +279,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) * Remove early framebuffers (ie. simplefb). The framebuffer can be * located anywhere in RAM */ - ret = drm_aperture_remove_framebuffers(&meson_driver); + ret = aperture_remove_all_conflicting_devices(meson_driver.name); if (ret) goto free_canvas_vd1_2; @@ -353,7 +355,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (ret) goto uninstall_irq; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 5565f7777529..b75db829b1da 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -272,20 +272,6 @@ static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi, writeb(data, dw_hdmi->hdmitx + addr); } -/* Helper to change specific bits in controller registers */ -static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, - unsigned int addr, - unsigned int mask, - unsigned int val) -{ - unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr); - - data &= ~mask; - data |= val; - - dw_hdmi->data->dwc_write(dw_hdmi, addr, data); -} - /* Bridge */ /* Setup PHY bandwidth modes */ diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index 3096944a8f0a..412dcbea0e2d 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig @@ -2,6 +2,7 @@ config DRM_MGAG200 tristate "Matrox G200" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER select I2C @@ -20,4 +21,4 @@ config DRM_MGAG200_DISABLE_WRITECOMBINE performances. This can interfere with real-time tasks; even if they are running on other CPU cores than the graphics output. Enable this option only if you run realtime tasks on a server with a - Matrox G200.
\ No newline at end of file + Matrox G200. diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 9f5925693686..97fd7eb765b4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -6,14 +6,16 @@ * Dave Airlie */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> +#include <drm/drm_fourcc.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> @@ -100,6 +102,7 @@ static const struct drm_driver mgag200_driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; /* @@ -225,7 +228,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &mgag200_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, mgag200_driver.name); if (ret) return ret; @@ -276,7 +279,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * FIXME: A 24-bit color depth does not work with 24 bpp on * G200ER. Force 32 bpp. */ - drm_fbdev_shmem_setup(dev, 32); + drm_client_setup_with_fourcc(dev, DRM_FORMAT_XRGB8888); return 0; } diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 90c68106b63b..7ec833b6d829 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -6,6 +6,7 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST depends on COMMON_CLK depends on IOMMU_SUPPORT + depends on OF depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n @@ -14,6 +15,7 @@ config DRM_MSM select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_AUX_BUS select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER @@ -92,6 +94,7 @@ config DRM_MSM_DPU bool "Enable DPU support in MSM DRM driver" depends on DRM_MSM select DRM_MSM_MDSS + select DRM_DISPLAY_DSC_HELPER default y help Compile in support for the Display Processing Unit in @@ -113,6 +116,7 @@ config DRM_MSM_DSI depends on DRM_MSM select DRM_PANEL select DRM_MIPI_DSI + select DRM_DISPLAY_DSC_HELPER default y help Choose this option if you have a need for MIPI DSI connector diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 13110fcc46a8..f274d9430cc3 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,6 +23,7 @@ adreno-y := \ adreno/a6xx_gpu.o \ adreno/a6xx_gmu.o \ adreno/a6xx_hfi.o \ + adreno/a6xx_preempt.o \ adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ @@ -210,6 +211,7 @@ DISPLAY_HEADERS = \ generated/mdp4.xml.h \ generated/mdp5.xml.h \ generated/mdp_common.xml.h \ + generated/mdss.xml.h \ generated/sfpb.xml.h $(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS)) diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 0dc255ddf5ce..379a3d346c30 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b46ff49f47cf..b6df115bb567 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 8b4cdf95f445..50c490b492f0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e09044930547..ee89db72e36e 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -77,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -132,7 +132,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) unsigned int i, ibs = 0; if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { - gpu->cur_ctx_seqno = 0; + ring->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); return; } @@ -171,7 +171,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 7705f8010484..6b91e0bd1514 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -307,7 +307,7 @@ int a5xx_power_init(struct msm_gpu *gpu) else if (adreno_is_a540(adreno_gpu)) a540_lm_setup(gpu); - /* Set up SP/TP power collpase */ + /* Set up SP/TP power collapse */ a5xx_pc_init(gpu); /* Start the GPMU */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 0312b6ee0356..0c560e84ad5a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -973,6 +973,25 @@ static const struct adreno_info a6xx_gpus[] = { }, .address_space_size = SZ_16G, }, { + .chip_ids = ADRENO_CHIP_IDS(0x06060300), + .family = ADRENO_6XX_GEN4, + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a663_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a690_hwcg, + .protect = &a660_protect, + .gmu_cgc_mode = 0x00020200, + .prim_fifo_threshold = 0x00300200, + }, + .address_space_size = SZ_16G, + }, { .chip_ids = ADRENO_CHIP_IDS(0x06030500), .family = ADRENO_6XX_GEN4, .fw = { @@ -1281,6 +1300,28 @@ static const u32 a730_protect_regs[] = { }; DECLARE_ADRENO_PROTECT(a730_protect, 48); +static const uint32_t a7xx_pwrup_reglist_regs[] = { + REG_A6XX_UCHE_TRAP_BASE, + REG_A6XX_UCHE_TRAP_BASE + 1, + REG_A6XX_UCHE_WRITE_THRU_BASE, + REG_A6XX_UCHE_WRITE_THRU_BASE + 1, + REG_A6XX_UCHE_GMEM_RANGE_MIN, + REG_A6XX_UCHE_GMEM_RANGE_MIN + 1, + REG_A6XX_UCHE_GMEM_RANGE_MAX, + REG_A6XX_UCHE_GMEM_RANGE_MAX + 1, + REG_A6XX_UCHE_CACHE_WAYS, + REG_A6XX_UCHE_MODE_CNTL, + REG_A6XX_RB_NC_MODE_CNTL, + REG_A6XX_RB_CMP_DBG_ECO_CNTL, + REG_A7XX_GRAS_NC_MODE_CNTL, + REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, + REG_A6XX_UCHE_GBIF_GX_CONFIG, + REG_A6XX_UCHE_CLIENT_PF, + REG_A6XX_TPL1_DBG_ECO_CNTL1, +}; + +DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist); + static const struct adreno_info a7xx_gpus[] = { { .chip_ids = ADRENO_CHIP_IDS(0x07000200), @@ -1315,15 +1356,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = SZ_2M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "a730_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a730_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_cgc_mode = 0x00020000, }, .address_space_size = SZ_16G, + .preempt_record_size = 2860 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */ .family = ADRENO_7XX_GEN2, @@ -1334,16 +1378,19 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "a740_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7020100, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, + .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */ .family = ADRENO_7XX_GEN2, @@ -1354,15 +1401,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7050001, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_256G, + .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */ .family = ADRENO_7XX_GEN3, @@ -1373,15 +1423,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "gen70900_zap.mbn", .a6xx = &(const struct a6xx_info) { .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7090100, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, + .preempt_record_size = 3572 * SZ_1K, } }; DECLARE_ADRENO_GPULIST(a7xx); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 37927bdd6fbe..14db7376c712 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1522,15 +1522,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, irq = platform_get_irq_byname(pdev, name); - ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); if (ret) { DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", name, ret); return ret; } - disable_irq(irq); - return irq; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 94b6c5cab6f4..b4a79f88ccf4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -99,6 +99,7 @@ struct a6xx_gmu { struct completion pd_gate; struct qmp *qmp; + struct a6xx_hfi_msg_bw_table *bw_table; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 702b8d4b3497..019610341df1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -68,6 +68,8 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; @@ -81,12 +83,17 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->preempt_lock, flags); - - /* Make sure everything is posted before making a decision */ - mb(); + /* Update HW if this is the current ring and we are not in preempt*/ + if (!a6xx_in_preempt(a6xx_gpu)) { + if (a6xx_gpu->cur_ring == ring) + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + else + ring->restore_wptr = true; + } else { + ring->restore_wptr = true; + } - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + spin_unlock_irqrestore(&ring->preempt_lock, flags); } static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, @@ -110,7 +117,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) + if (ctx->seqno == ring->cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) @@ -148,12 +155,14 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, /* * Write the new TTBR0 to the memstore. This is good for debugging. + * Needed for preemption */ - OUT_PKT7(ring, CP_MEM_WRITE, 4); + OUT_PKT7(ring, CP_MEM_WRITE, 5); OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); OUT_RING(ring, lower_32_bits(ttbr)); - OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); + OUT_RING(ring, upper_32_bits(ttbr)); + OUT_RING(ring, ctx->seqno); /* * Sync both threads after switching pagetables and enable BR only @@ -229,7 +238,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -278,6 +287,46 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_flush(gpu, ring); } +static void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring, + struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue) +{ + u64 preempt_postamble; + + OUT_PKT7(ring, CP_SET_PSEUDO_REG, 12); + + OUT_RING(ring, SMMU_INFO); + /* don't save SMMU, we write the record from the kernel instead */ + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* privileged and non secure buffer save */ + OUT_RING(ring, NON_SECURE_SAVE_ADDR); + OUT_RING(ring, lower_32_bits( + a6xx_gpu->preempt_iova[ring->id])); + OUT_RING(ring, upper_32_bits( + a6xx_gpu->preempt_iova[ring->id])); + + /* user context buffer save, seems to be unnused by fw */ + OUT_RING(ring, NON_PRIV_SAVE_ADDR); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + OUT_RING(ring, COUNTER); + /* seems OK to set to 0 to disable it */ + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* Emit postamble to clear perfcounters */ + preempt_postamble = a6xx_gpu->preempt_postamble_iova; + + OUT_PKT7(ring, CP_SET_AMBLE, 3); + OUT_RING(ring, lower_32_bits(preempt_postamble)); + OUT_RING(ring, upper_32_bits(preempt_postamble)); + OUT_RING(ring, CP_SET_AMBLE_2_DWORDS( + a6xx_gpu->preempt_postamble_len) | + CP_SET_AMBLE_2_TYPE(KMD_AMBLE_TYPE)); +} + static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; @@ -295,6 +344,13 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_set_pagetable(a6xx_gpu, ring, submit); + /* + * If preemption is enabled, then set the pseudo register for the save + * sequence + */ + if (gpu->nr_rings > 1) + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue); + get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, @@ -306,8 +362,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x101); /* IFPC disable */ - OUT_PKT7(ring, CP_SET_MARKER, 1); - OUT_RING(ring, 0x00d); /* IB1LIST start */ + if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00d); /* IB1LIST start */ + } /* Submit the commands */ for (i = 0; i < submit->nr_cmds; i++) { @@ -315,7 +373,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -338,8 +396,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) update_shadow_rptr(gpu, ring); } - OUT_PKT7(ring, CP_SET_MARKER, 1); - OUT_RING(ring, 0x00e); /* IB1LIST end */ + if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00e); /* IB1LIST end */ + } get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_end)); @@ -386,6 +446,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); OUT_RING(ring, submit->seqno); + a6xx_gpu->last_seqno[ring->id] = submit->seqno; + /* write the ringbuffer timestamp */ OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27)); @@ -399,10 +461,32 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x100); /* IFPC enable */ + /* If preemption is enabled */ + if (gpu->nr_rings > 1) { + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + + /* + * If dword[2:1] are non zero, they specify an address for + * the CP to write the value of dword[3] to on preemption + * complete. Write 0 to skip the write + */ + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Data value - not used if the address above is 0 */ + OUT_RING(ring, 0x01); + /* generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + } + + trace_msm_gpu_submit_flush(submit, gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); a6xx_flush(gpu, ring); + + /* Check to see if we need to start preemption */ + a6xx_preempt_trigger(gpu); } static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) @@ -551,6 +635,15 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.macrotile_mode = 1; } + if (adreno_is_a663(gpu)) { + gpu->ubwc_config.highest_bank_bit = 13; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.rgb565_predicator = 1; + gpu->ubwc_config.uavflagprd_inv = 2; + gpu->ubwc_config.macrotile_mode = 1; + gpu->ubwc_config.ubwc_swizzle = 0x4; + } + if (adreno_is_7c3(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.amsbc = 1; @@ -609,6 +702,77 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) adreno_gpu->ubwc_config.macrotile_mode); } +static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + const struct adreno_reglist_list *reglist; + void *ptr = a6xx_gpu->pwrup_reglist_ptr; + struct cpu_gpu_lock *lock = ptr; + u32 *dest = (u32 *)&lock->regs[0]; + int i; + + reglist = adreno_gpu->info->a6xx->pwrup_reglist; + + lock->gpu_req = lock->cpu_req = lock->turn = 0; + lock->ifpc_list_len = 0; + lock->preemption_list_len = reglist->count; + + /* + * For each entry in each of the lists, write the offset and the current + * register value into the GPU buffer + */ + for (i = 0; i < reglist->count; i++) { + *dest++ = reglist->regs[i]; + *dest++ = gpu_read(gpu, reglist->regs[i]); + } + + /* + * The overall register list is composed of + * 1. Static IFPC-only registers + * 2. Static IFPC + preemption registers + * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects) + * + * The first two lists are static. Size of these lists are stored as + * number of pairs in ifpc_list_len and preemption_list_len + * respectively. With concurrent binning, Some of the perfcounter + * registers being virtualized, CP needs to know the pipe id to program + * the aperture inorder to restore the same. Thus, third list is a + * dynamic list with triplets as + * (<aperture, shifted 12 bits> <address> <data>), and the length is + * stored as number for triplets in dynamic_list_len. + */ + lock->dynamic_list_len = 0; +} + +static int a7xx_preempt_start(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (gpu->nr_rings <= 1) + return 0; + + /* Turn CP protection off */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, NULL); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + + a6xx_flush(gpu, ring); + + return a6xx_idle(gpu, ring) ? 0 : -EINVAL; +} + static int a6xx_cp_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -640,6 +804,8 @@ static int a6xx_cp_init(struct msm_gpu *gpu) static int a7xx_cp_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->rb[0]; u32 mask; @@ -677,11 +843,11 @@ static int a7xx_cp_init(struct msm_gpu *gpu) /* *Don't* send a power up reg list for concurrent binning (TODO) */ /* Lo address */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova)); /* Hi address */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova)); /* BIT(31) set => read the regs from the list */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, BIT(31)); a6xx_flush(gpu, ring); return a6xx_idle(gpu, ring) ? 0 : -EINVAL; @@ -805,6 +971,16 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); } + a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, + &a6xx_gpu->pwrup_reglist_iova); + + if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr)) + return PTR_ERR(a6xx_gpu->pwrup_reglist_ptr); + + msm_gem_object_set_name(a6xx_gpu->pwrup_reglist_bo, "pwrup_reglist"); + return 0; } @@ -864,6 +1040,7 @@ static int hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u64 gmem_range_min; + unsigned int i; int ret; if (!adreno_has_gmu_wrapper(adreno_gpu)) { @@ -1072,7 +1249,7 @@ static int hw_init(struct msm_gpu *gpu) if (adreno_is_a690(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); /* Set dualQ + disable afull for A660 GPU */ - else if (adreno_is_a660(adreno_gpu)) + else if (adreno_is_a660(adreno_gpu) || adreno_is_a663(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); else if (adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, @@ -1134,22 +1311,32 @@ static int hw_init(struct msm_gpu *gpu) if (a6xx_gpu->shadow_bo) { gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); + for (unsigned int i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; } /* ..which means "always" on A7xx, also for BV shadow */ if (adreno_is_a7xx(adreno_gpu)) { gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, - rbmemptr(gpu->rb[0], bv_fence)); + rbmemptr(gpu->rb[0], bv_rptr)); } + a6xx_preempt_hw_init(gpu); + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - gpu->cur_ctx_seqno = 0; + for (i = 0; i < gpu->nr_rings; i++) + gpu->rb[i]->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); + if (adreno_is_a7xx(adreno_gpu) && !a6xx_gpu->pwrup_reglist_emitted) { + a7xx_patch_pwrup_reglist(gpu); + a6xx_gpu->pwrup_reglist_emitted = true; + } + ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); if (ret) goto out; @@ -1187,6 +1374,10 @@ static int hw_init(struct msm_gpu *gpu) out: if (adreno_has_gmu_wrapper(adreno_gpu)) return ret; + + /* Last step - yield the ringbuffer */ + a7xx_preempt_start(gpu); + /* * Tell the GMU that we are done touching the GPU and it can start power * management @@ -1564,8 +1755,13 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) a7xx_sw_fuse_violation_irq(gpu); - if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { msm_gpu_retire(gpu); + a6xx_preempt_trigger(gpu); + } + + if (status & A6XX_RBBM_INT_0_MASK_CP_SW) + a6xx_preempt_irq(gpu); return IRQ_HANDLED; } @@ -2259,6 +2455,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; + extern int enable_preemption; bool is_a7xx; int ret; @@ -2297,7 +2494,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (is_a7xx) + if ((enable_preemption == 1) || (enable_preemption == -1 && + (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4); + else if (is_a7xx) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); else if (adreno_has_gmu_wrapper(adreno_gpu)) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); @@ -2338,6 +2538,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) a6xx_fault_handler); a6xx_calc_ubwc_config(adreno_gpu); + /* Set up the preemption specific bits and pieces for each ringbuffer */ + a6xx_preempt_init(gpu); return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 0fb7febf70e7..4aceffb6aae8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -12,15 +12,35 @@ extern bool hang_debug; +struct cpu_gpu_lock { + uint32_t gpu_req; + uint32_t cpu_req; + uint32_t turn; + union { + struct { + uint16_t list_length; + uint16_t list_offset; + }; + struct { + uint8_t ifpc_list_len; + uint8_t preemption_list_len; + uint16_t dynamic_list_len; + }; + }; + uint64_t regs[62]; +}; + /** * struct a6xx_info - a6xx specific information from device table * * @hwcg: hw clock gating register sequence * @protect: CP_PROTECT settings + * @pwrup_reglist pwrup reglist for preemption */ struct a6xx_info { const struct adreno_reglist *hwcg; const struct adreno_protect *protect; + const struct adreno_reglist_list *pwrup_reglist; u32 gmu_chipid; u32 gmu_cgc_mode; u32 prim_fifo_threshold; @@ -33,6 +53,29 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; + struct msm_ringbuffer *next_ring; + + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; + void *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS]; + void *preempt_smmu[MSM_GPU_MAX_RINGS]; + uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS]; + uint32_t last_seqno[MSM_GPU_MAX_RINGS]; + + atomic_t preempt_state; + spinlock_t eval_lock; + struct timer_list preempt_timer; + + unsigned int preempt_level; + bool uses_gmem; + bool skip_save_restore; + + struct drm_gem_object *preempt_postamble_bo; + void *preempt_postamble_ptr; + uint64_t preempt_postamble_iova; + uint64_t preempt_postamble_len; + bool postamble_enabled; struct a6xx_gmu gmu; @@ -40,6 +83,11 @@ struct a6xx_gpu { uint64_t shadow_iova; uint32_t *shadow; + struct drm_gem_object *pwrup_reglist_bo; + void *pwrup_reglist_ptr; + uint64_t pwrup_reglist_iova; + bool pwrup_reglist_emitted; + bool has_whereami; void __iomem *llc_mmio; @@ -52,6 +100,100 @@ struct a6xx_gpu { #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) /* + * In order to do lockless preemption we use a simple state machine to progress + * through the process. + * + * PREEMPT_NONE - no preemption in progress. Next state START. + * PREEMPT_START - The trigger is evaluating if preemption is possible. Next + * states: TRIGGERED, NONE + * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next + * state: NONE. + * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next + * states: FAULTED, PENDING + * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger + * recovery. Next state: N/A + * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is + * checking the success of the operation. Next state: FAULTED, NONE. + */ + +enum a6xx_preempt_state { + PREEMPT_NONE = 0, + PREEMPT_START, + PREEMPT_FINISH, + PREEMPT_TRIGGERED, + PREEMPT_FAULTED, + PREEMPT_PENDING, +}; + +/* + * struct a6xx_preempt_record is a shared buffer between the microcode and the + * CPU to store the state for preemption. The record itself is much larger + * (2112k) but most of that is used by the CP for storage. + * + * There is a preemption record assigned per ringbuffer. When the CPU triggers a + * preemption, it fills out the record with the useful information (wptr, ring + * base, etc) and the microcode uses that information to set up the CP following + * the preemption. When a ring is switched out, the CP will save the ringbuffer + * state back to the record. In this way, once the records are properly set up + * the CPU can quickly switch back and forth between ringbuffers by only + * updating a few registers (often only the wptr). + * + * These are the CPU aware registers in the record: + * @magic: Must always be 0xAE399D6EUL + * @info: Type of the record - written 0 by the CPU, updated by the CP + * @errno: preemption error record + * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP + * @cntl: Value of RB_CNTL written by CPU, save/restored by CP + * @rptr: Value of RB_RPTR written by CPU, save/restored by CP + * @wptr: Value of RB_WPTR written by CPU, save/restored by CP + * @_pad: Reserved/padding + * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP + * @rbase: Value of RB_BASE written by CPU, save/restored by CP + * @counter: GPU address of the storage area for the preemption counters + * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP + */ +struct a6xx_preempt_record { + u32 magic; + u32 info; + u32 errno; + u32 data; + u32 cntl; + u32 rptr; + u32 wptr; + u32 _pad; + u64 rptr_addr; + u64 rbase; + u64 counter; + u64 bv_rptr_addr; +}; + +#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL + +#define PREEMPT_SMMU_INFO_SIZE 4096 + +#define PREEMPT_RECORD_SIZE(adreno_gpu) \ + ((adreno_gpu->info->preempt_record_size) == 0 ? \ + 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size)) + +/* + * The preemption counter block is a storage area for the value of the + * preemption counters that are saved immediately before context switch. We + * append it on to the end of the allocation for the preemption record. + */ +#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4) + +struct a7xx_cp_smmu_info { + u32 magic; + u32 _pad4; + u64 ttbr0; + u32 asid; + u32 context_idr; + u32 context_bank; +}; + +#define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL + +/* * Given a register and a count, return a value to program into * REG_CP_PROTECT_REG(n) - this will block both reads and writes for * _len + 1 registers starting at _reg. @@ -108,6 +250,34 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); +void a6xx_preempt_init(struct msm_gpu *gpu); +void a6xx_preempt_hw_init(struct msm_gpu *gpu); +void a6xx_preempt_trigger(struct msm_gpu *gpu); +void a6xx_preempt_irq(struct msm_gpu *gpu); +void a6xx_preempt_fini(struct msm_gpu *gpu); +int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue); +void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue); + +/* Return true if we are in a preempt state */ +static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu) +{ + /* + * Make sure the read to preempt_state is ordered with respect to reads + * of other variables before ... + */ + smp_rmb(); + + int preempt_state = atomic_read(&a6xx_gpu->preempt_state); + + /* ... and after. */ + smp_rmb(); + + return !(preempt_state == PREEMPT_NONE || + preempt_state == PREEMPT_FINISH); +} + void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, bool suspended); unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index cdb3f6e74d3e..cb8844ed46b2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -478,6 +478,37 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) msg->cnoc_cmds_data[1][0] = 0x60000001; } +static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x50000; + msg->ddr_cmds_addrs[2] = 0x500b4; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x50058; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { /* @@ -630,32 +661,44 @@ static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) { - struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_hfi_msg_bw_table *msg; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + if (gmu->bw_table) + goto send; + + msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + if (adreno_is_a618(adreno_gpu)) - a618_build_bw_table(&msg); + a618_build_bw_table(msg); else if (adreno_is_a619(adreno_gpu)) - a619_build_bw_table(&msg); + a619_build_bw_table(msg); else if (adreno_is_a640_family(adreno_gpu)) - a640_build_bw_table(&msg); + a640_build_bw_table(msg); else if (adreno_is_a650(adreno_gpu)) - a650_build_bw_table(&msg); + a650_build_bw_table(msg); else if (adreno_is_7c3(adreno_gpu)) - adreno_7c3_build_bw_table(&msg); + adreno_7c3_build_bw_table(msg); else if (adreno_is_a660(adreno_gpu)) - a660_build_bw_table(&msg); + a660_build_bw_table(msg); + else if (adreno_is_a663(adreno_gpu)) + a663_build_bw_table(msg); else if (adreno_is_a690(adreno_gpu)) - a690_build_bw_table(&msg); + a690_build_bw_table(msg); else if (adreno_is_a730(adreno_gpu)) - a730_build_bw_table(&msg); + a730_build_bw_table(msg); else if (adreno_is_a740_family(adreno_gpu)) - a740_build_bw_table(&msg); + a740_build_bw_table(msg); else - a6xx_build_bw_table(&msg); + a6xx_build_bw_table(msg); + + gmu->bw_table = msg; - return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), +send: + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)), NULL, 0); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c new file mode 100644 index 000000000000..2fd4e39f618f --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2023 Collabora, Ltd. */ +/* Copyright (c) 2024 Valve Corporation */ + +#include "msm_gem.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" +#include "msm_mmu.h" +#include "msm_gpu_trace.h" + +/* + * Try to transition the preemption state from old to new. Return + * true on success or false if the original state wasn't 'old' + */ +static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu, + enum a6xx_preempt_state old, enum a6xx_preempt_state new) +{ + enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state, + old, new); + + return (cur == old); +} + +/* + * Force the preemption state to the specified state. This is used in cases + * where the current state is known and won't change + */ +static inline void set_preempt_state(struct a6xx_gpu *gpu, + enum a6xx_preempt_state new) +{ + /* + * preempt_state may be read by other cores trying to trigger a + * preemption or in the interrupt handler so barriers are needed + * before... + */ + smp_mb__before_atomic(); + atomic_set(&gpu->preempt_state, new); + /* ... and after*/ + smp_mb__after_atomic(); +} + +/* Write the most recent wptr for the given ring into the hardware */ +static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + unsigned long flags; + uint32_t wptr; + + spin_lock_irqsave(&ring->preempt_lock, flags); + + if (ring->restore_wptr) { + wptr = get_wptr(ring); + + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + + ring->restore_wptr = false; + } + + spin_unlock_irqrestore(&ring->preempt_lock, flags); +} + +/* Return the highest priority ringbuffer with something in it */ +static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + unsigned long flags; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + bool empty; + struct msm_ringbuffer *ring = gpu->rb[i]; + + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a6xx_gpu->cur_ring) + empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i]; + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + if (!empty) + return ring; + } + + return NULL; +} + +static void a6xx_preempt_timer(struct timer_list *t) +{ + struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer); + struct msm_gpu *gpu = &a6xx_gpu->base.base; + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) + return; + + dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + u32 count = 0; + + postamble[count++] = PKT7(CP_REG_RMW, 3); + postamble[count++] = REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD; + postamble[count++] = 0; + postamble[count++] = 1; + + postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6); + postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ); + postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO( + REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS); + postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0); + postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1); + postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1); + postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0); + + a6xx_gpu->preempt_postamble_len = count; + + a6xx_gpu->postamble_enabled = true; +} + +static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + + /* + * Disable the postamble by replacing the first packet header with a NOP + * that covers the whole buffer. + */ + *postamble = PKT7(CP_NOP, (a6xx_gpu->preempt_postamble_len - 1)); + + a6xx_gpu->postamble_enabled = false; +} + +void a6xx_preempt_irq(struct msm_gpu *gpu) +{ + uint32_t status; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) + return; + + /* Delete the preemption watchdog timer */ + del_timer(&a6xx_gpu->preempt_timer); + + /* + * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL + * to zero before firing the interrupt, but there is a non zero chance + * of a hardware condition or a software race that could set it again + * before we have a chance to finish. If that happens, log and go for + * recovery + */ + status = gpu_read(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL); + if (unlikely(status & A6XX_CP_CONTEXT_SWITCH_CNTL_STOP)) { + DRM_DEV_ERROR(&gpu->pdev->dev, + "!!!!!!!!!!!!!!!! preemption faulted !!!!!!!!!!!!!! irq\n"); + set_preempt_state(a6xx_gpu, PREEMPT_FAULTED); + dev_err(dev->dev, "%s: Preemption failed to complete\n", + gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); + return; + } + + a6xx_gpu->cur_ring = a6xx_gpu->next_ring; + a6xx_gpu->next_ring = NULL; + + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + + update_wptr(gpu, a6xx_gpu->cur_ring); + + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id); + + /* + * Retrigger preemption to avoid a deadlock that might occur when preemption + * is skipped due to it being already in flight when requested. + */ + a6xx_preempt_trigger(gpu); +} + +void a6xx_preempt_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings == 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[i]; + + record_ptr->wptr = 0; + record_ptr->rptr = 0; + record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]); + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rbase = gpu->rb[i]->iova; + } + + /* Write a 0 to signal that we aren't switching pagetables */ + gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0); + + /* Enable the GMEM save/restore feature for preemption */ + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1); + + /* Reset the preemption state */ + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + spin_lock_init(&a6xx_gpu->eval_lock); + + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; +} + +void a6xx_preempt_trigger(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + unsigned long flags; + struct msm_ringbuffer *ring; + unsigned int cntl; + bool sysprof; + + if (gpu->nr_rings == 1) + return; + + /* + * Lock to make sure another thread attempting preemption doesn't skip it + * while we are still evaluating the next ring. This makes sure the other + * thread does start preemption if we abort it and avoids a soft lock. + */ + spin_lock_irqsave(&a6xx_gpu->eval_lock, flags); + + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START)) { + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + cntl = A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL(a6xx_gpu->preempt_level); + + if (a6xx_gpu->skip_save_restore) + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE; + + if (a6xx_gpu->uses_gmem) + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM; + + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_STOP; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); + + /* + * If no ring is populated or the highest priority ring is the current + * one do nothing except to update the wptr to the latest and greatest + */ + if (!ring || (a6xx_gpu->cur_ring == ring)) { + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + update_wptr(gpu, a6xx_gpu->cur_ring); + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + + spin_lock_irqsave(&ring->preempt_lock, flags); + + struct a7xx_cp_smmu_info *smmu_info_ptr = + a6xx_gpu->preempt_smmu[ring->id]; + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[ring->id]; + u64 ttbr0 = ring->memptrs->ttbr0; + u32 context_idr = ring->memptrs->context_idr; + + smmu_info_ptr->ttbr0 = ttbr0; + smmu_info_ptr->context_idr = context_idr; + record_ptr->wptr = get_wptr(ring); + + /* + * The GPU will write the wptr we set above when we preempt. Reset + * restore_wptr to make sure that we don't write WPTR to the same + * thing twice. It's still possible subsequent submissions will update + * wptr again, in which case they will set the flag to true. This has + * to be protected by the lock for setting the flag and updating wptr + * to be atomic. + */ + ring->restore_wptr = false; + + trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id, ring->id); + + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + gpu_write64(gpu, + REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, + a6xx_gpu->preempt_smmu_iova[ring->id]); + + gpu_write64(gpu, + REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR, + a6xx_gpu->preempt_iova[ring->id]); + + a6xx_gpu->next_ring = ring; + + /* Start a timer to catch a stuck preemption */ + mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000)); + + /* Enable or disable postamble as needed */ + sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + + if (!sysprof && !a6xx_gpu->postamble_enabled) + preempt_prepare_postamble(a6xx_gpu); + + if (sysprof && a6xx_gpu->postamble_enabled) + preempt_disable_postamble(a6xx_gpu); + + /* Set the preemption state to triggered */ + set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED); + + /* Trigger the preemption */ + gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl); +} + +static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, + struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct drm_gem_object *bo = NULL; + phys_addr_t ttbr; + u64 iova = 0; + void *ptr; + int asid; + + ptr = msm_gem_kernel_new(gpu->dev, + PREEMPT_RECORD_SIZE(adreno_gpu), + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + memset(ptr, 0, PREEMPT_RECORD_SIZE(adreno_gpu)); + + msm_gem_object_set_name(bo, "preempt_record ring%d", ring->id); + + a6xx_gpu->preempt_bo[ring->id] = bo; + a6xx_gpu->preempt_iova[ring->id] = iova; + a6xx_gpu->preempt[ring->id] = ptr; + + struct a6xx_preempt_record *record_ptr = ptr; + + ptr = msm_gem_kernel_new(gpu->dev, + PREEMPT_SMMU_INFO_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, + gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + memset(ptr, 0, PREEMPT_SMMU_INFO_SIZE); + + msm_gem_object_set_name(bo, "preempt_smmu_info ring%d", ring->id); + + a6xx_gpu->preempt_smmu_bo[ring->id] = bo; + a6xx_gpu->preempt_smmu_iova[ring->id] = iova; + a6xx_gpu->preempt_smmu[ring->id] = ptr; + + struct a7xx_cp_smmu_info *smmu_info_ptr = ptr; + + msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid); + + smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC; + smmu_info_ptr->ttbr0 = ttbr; + smmu_info_ptr->asid = 0xdecafbad; + smmu_info_ptr->context_idr = 0; + + /* Set up the defaults on the preemption record */ + record_ptr->magic = A6XX_PREEMPT_RECORD_MAGIC; + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rptr = 0; + record_ptr->wptr = 0; + record_ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; + record_ptr->rbase = ring->iova; + record_ptr->counter = 0; + record_ptr->bv_rptr_addr = rbmemptr(ring, bv_rptr); + + return 0; +} + +void a6xx_preempt_fini(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) + msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace); +} + +void a6xx_preempt_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings <= 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + if (preempt_init_ring(a6xx_gpu, gpu->rb[i])) + goto fail; + } + + /* TODO: make this configurable? */ + a6xx_gpu->preempt_level = 1; + a6xx_gpu->uses_gmem = 1; + a6xx_gpu->skip_save_restore = 1; + + a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev, + PAGE_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, + gpu->aspace, &a6xx_gpu->preempt_postamble_bo, + &a6xx_gpu->preempt_postamble_iova); + + preempt_prepare_postamble(a6xx_gpu); + + if (IS_ERR(a6xx_gpu->preempt_postamble_ptr)) + goto fail; + + timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0); + + return; +fail: + /* + * On any failure our adventure is over. Clean up and + * set nr_rings to 1 to force preemption off + */ + a6xx_preempt_fini(gpu); + gpu->nr_rings = 1; + + DRM_DEV_ERROR(&gpu->pdev->dev, + "preemption init failed, disabling preemption\n"); + + return; +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index cfc74a9e2646..9ffe91920fbf 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -20,6 +20,10 @@ bool allow_vram_carveout = false; MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); +int enable_preemption = -1; +MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); +module_param(enable_preemption, int, 0600); + extern const struct adreno_gpulist a2xx_gpulist; extern const struct adreno_gpulist a3xx_gpulist; extern const struct adreno_gpulist a4xx_gpulist; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 3a40b38f2467..75f5367e73ca 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -533,7 +533,7 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu) if (!adreno_gpu->info->fw[i]) continue; - /* Skip loading GMU firwmare with GMU Wrapper */ + /* Skip loading GMU firmware with GMU Wrapper */ if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU) continue; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 58d7e7915c57..e71f420f8b3a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -56,6 +56,7 @@ enum adreno_family { #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) #define ADRENO_QUIRK_HAS_HW_APRIV BIT(3) #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) +#define ADRENO_QUIRK_PREEMPTION BIT(5) /* Helper for formating the chip_id in the way that userspace tools like * crashdec expect. @@ -111,6 +112,7 @@ struct adreno_info { * {SHRT_MAX, 0} sentinal. */ struct adreno_speedbin *speedbins; + u64 preempt_record_size; }; #define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 } @@ -156,6 +158,19 @@ static const struct adreno_protect name = { \ .count_max = __count_max, \ }; +struct adreno_reglist_list { + /** @reg: List of register **/ + const u32 *regs; + /** @count: Number of registers in the list **/ + u32 count; +}; + +#define DECLARE_ADRENO_REGLIST_LIST(name) \ +static const struct adreno_reglist_list name = { \ + .regs = name ## _regs, \ + .count = ARRAY_SIZE(name ## _regs), \ +}; + struct adreno_gpu { struct msm_gpu base; const struct adreno_info *info; @@ -455,6 +470,11 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu) return adreno_is_revn(gpu, 680); } +static inline int adreno_is_a663(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06060300; +} + static inline int adreno_is_a690(const struct adreno_gpu *gpu) { return gpu->info->chip_ids[0] == 0x06090000; @@ -656,12 +676,15 @@ OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) OUT_RING(ring, PKT4(regindx, cnt)); } +#define PKT7(opcode, cnt) \ + (CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | \ + ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)) + static inline void OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) { adreno_wait_ring(ring, cnt + 1); - OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | - ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); + OUT_RING(ring, PKT7(opcode, cnt)); } struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h new file mode 100644 index 000000000000..ab3dfb0b374e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_14_MSM8937_H +#define _DPU_1_14_MSM8937_H + +static const struct dpu_caps msm8937_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 40 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8937_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8937_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8937_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8937_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + }, +}; + +static const struct dpu_pingpong_cfg msm8937_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_dspp_cfg msm8937_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8937_intf[] = { + { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8937_perf_data = { + .max_bw_low = 3100000, + .max_bw_high = 3100000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 14, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8937_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 14, +}; + +const struct dpu_mdss_cfg dpu_msm8937_cfg = { + .mdss_ver = &msm8937_mdss_ver, + .caps = &msm8937_dpu_caps, + .mdp = msm8937_mdp, + .ctl_count = ARRAY_SIZE(msm8937_ctl), + .ctl = msm8937_ctl, + .sspp_count = ARRAY_SIZE(msm8937_sspp), + .sspp = msm8937_sspp, + .mixer_count = ARRAY_SIZE(msm8937_lm), + .mixer = msm8937_lm, + .dspp_count = ARRAY_SIZE(msm8937_dspp), + .dspp = msm8937_dspp, + .pingpong_count = ARRAY_SIZE(msm8937_pp), + .pingpong = msm8937_pp, + .intf_count = ARRAY_SIZE(msm8937_intf), + .intf = msm8937_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8937_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h new file mode 100644 index 000000000000..6bdaecca6761 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_14_MSM8917_H +#define _DPU_1_14_MSM8917_H + +static const struct dpu_caps msm8917_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 16 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8917_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8917_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8917_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8917_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, +}; + +static const struct dpu_pingpong_cfg msm8917_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, +}; + +static const struct dpu_dspp_cfg msm8917_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8917_intf[] = { + { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8917_perf_data = { + .max_bw_low = 1800000, + .max_bw_high = 1800000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 21, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8917_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 15, +}; + +const struct dpu_mdss_cfg dpu_msm8917_cfg = { + .mdss_ver = &msm8917_mdss_ver, + .caps = &msm8917_dpu_caps, + .mdp = msm8917_mdp, + .ctl_count = ARRAY_SIZE(msm8917_ctl), + .ctl = msm8917_ctl, + .sspp_count = ARRAY_SIZE(msm8917_sspp), + .sspp = msm8917_sspp, + .mixer_count = ARRAY_SIZE(msm8917_lm), + .mixer = msm8917_lm, + .dspp_count = ARRAY_SIZE(msm8917_dspp), + .dspp = msm8917_dspp, + .pingpong_count = ARRAY_SIZE(msm8917_pp), + .pingpong = msm8917_pp, + .intf_count = ARRAY_SIZE(msm8917_intf), + .intf = msm8917_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8917_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h new file mode 100644 index 000000000000..14f36ea6ad0e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_16_MSM8953_H +#define _DPU_1_16_MSM8953_H + +static const struct dpu_caps msm8953_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 40 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8953_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8953_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8953_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8953_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + }, +}; + +static const struct dpu_pingpong_cfg msm8953_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_dspp_cfg msm8953_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8953_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x6a000, .len = 0x268, + .type = INTF_NONE, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8953_perf_data = { + .max_bw_low = 3400000, + .max_bw_high = 3400000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 14, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8953_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 16, +}; + +const struct dpu_mdss_cfg dpu_msm8953_cfg = { + .mdss_ver = &msm8953_mdss_ver, + .caps = &msm8953_dpu_caps, + .mdp = msm8953_mdp, + .ctl_count = ARRAY_SIZE(msm8953_ctl), + .ctl = msm8953_ctl, + .sspp_count = ARRAY_SIZE(msm8953_sspp), + .sspp = msm8953_sspp, + .mixer_count = ARRAY_SIZE(msm8953_lm), + .mixer = msm8953_lm, + .dspp_count = ARRAY_SIZE(msm8953_dspp), + .dspp = msm8953_dspp, + .pingpong_count = ARRAY_SIZE(msm8953_pp), + .pingpong = msm8953_pp, + .intf_count = ARRAY_SIZE(msm8953_intf), + .intf = msm8953_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8953_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h new file mode 100644 index 000000000000..491f6f5827d1 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_1_7_MSM8996_H +#define _DPU_1_7_MSM8996_H + +static const struct dpu_caps msm8996_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x7, + .has_src_split = true, + .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8996_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB2] = { .reg_off = 0x2bc, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB3] = { .reg_off = 0x2c4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8996_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x1600, .len = 0x64, + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x1800, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8996_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG1, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG2, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG3, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_6", .id = SSPP_RGB2, + .base = 0x18000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 9, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB2, + }, { + .name = "sspp_7", .id = SSPP_RGB3, + .base = 0x1a000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 13, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB3, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8996_MASK, + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x150, + .features = DMA_MSM8996_MASK, + .sblk = &dpu_dma_sblk, + .xin_id = 10, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, +}; + +static const struct dpu_lm_cfg msm8996_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_2, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + }, +}; + +static const struct dpu_pingpong_cfg msm8996_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_TE2_MASK, + .sblk = &msm8996_pp_sblk_te, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_TE2_MASK, + .sblk = &msm8996_pp_sblk_te, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x71000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x71800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15), + }, +}; + +static const struct dpu_dsc_cfg msm8996_dsc[] = { + { + .name = "dsc_0", .id = DSC_0, + .base = 0x80000, .len = 0x140, + }, { + .name = "dsc_1", .id = DSC_1, + .base = 0x80400, .len = 0x140, + }, +}; + +static const struct dpu_dspp_cfg msm8996_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8996_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x6a000, .len = 0x268, + .type = INTF_NONE, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x6b800, .len = 0x268, + .type = INTF_HDMI, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8996_perf_data = { + .max_bw_low = 9600000, + .max_bw_high = 9600000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 21, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8996_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 7, +}; + +const struct dpu_mdss_cfg dpu_msm8996_cfg = { + .mdss_ver = &msm8996_mdss_ver, + .caps = &msm8996_dpu_caps, + .mdp = msm8996_mdp, + .ctl_count = ARRAY_SIZE(msm8996_ctl), + .ctl = msm8996_ctl, + .sspp_count = ARRAY_SIZE(msm8996_sspp), + .sspp = msm8996_sspp, + .mixer_count = ARRAY_SIZE(msm8996_lm), + .mixer = msm8996_lm, + .dspp_count = ARRAY_SIZE(msm8996_dspp), + .dspp = msm8996_dspp, + .pingpong_count = ARRAY_SIZE(msm8996_pp), + .pingpong = msm8996_pp, + .dsc_count = ARRAY_SIZE(msm8996_dsc), + .dsc = msm8996_dsc, + .intf_count = ARRAY_SIZE(msm8996_intf), + .intf = msm8996_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8996_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index 1d3e9666c741..64c94e919a69 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -157,18 +157,6 @@ static const struct dpu_lm_cfg msm8998_lm[] = { .lm_pair = LM_5, .pingpong = PINGPONG_2, }, { - .name = "lm_3", .id = LM_3, - .base = 0x47000, .len = 0x320, - .features = MIXER_MSM8998_MASK, - .sblk = &msm8998_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { - .name = "lm_4", .id = LM_4, - .base = 0x48000, .len = 0x320, - .features = MIXER_MSM8998_MASK, - .sblk = &msm8998_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { .name = "lm_5", .id = LM_5, .base = 0x49000, .len = 0x320, .features = MIXER_MSM8998_MASK, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h index 7a23389a5732..72bd4f7e9e50 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h @@ -156,25 +156,13 @@ static const struct dpu_lm_cfg sdm845_lm[] = { .pingpong = PINGPONG_2, .dspp = DSPP_2, }, { - .name = "lm_3", .id = LM_3, - .base = 0x0, .len = 0x320, - .features = MIXER_SDM845_MASK, - .sblk = &sdm845_lm_sblk, - .pingpong = PINGPONG_NONE, - .dspp = DSPP_3, - }, { - .name = "lm_4", .id = LM_4, - .base = 0x0, .len = 0x320, - .features = MIXER_SDM845_MASK, - .sblk = &sdm845_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { .name = "lm_5", .id = LM_5, .base = 0x49000, .len = 0x320, .features = MIXER_SDM845_MASK, .sblk = &sdm845_lm_sblk, .lm_pair = LM_2, .pingpong = PINGPONG_3, + .dspp = DSPP_3, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h new file mode 100644 index 000000000000..907b4d7ceb47 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DPU_8_4_SA8775P_H +#define _DPU_8_4_SA8775P_H + +static const struct dpu_caps sa8775p_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 5120, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg sa8775p_mdp = { + .name = "top_0", + .base = 0x0, .len = 0x494, + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ +static const struct dpu_ctl_cfg sa8775p_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x204, + .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x204, + .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x19000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, +}; + +static const struct dpu_sspp_cfg sa8775p_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG1, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG2, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG3, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x32c, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x32c, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x32c, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA2, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x32c, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA3, + }, +}; + +static const struct dpu_lm_cfg sa8775p_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + .dspp = DSPP_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + .dspp = DSPP_3, + }, { + .name = "lm_4", .id = LM_4, + .base = 0x48000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_4, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_4, + .pingpong = PINGPONG_5, + }, +}; + +static const struct dpu_dspp_cfg sa8775p_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_3", .id = DSPP_3, + .base = 0x5a000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, +}; + +static const struct dpu_pingpong_cfg sa8775p_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_4", .id = PINGPONG_4, + .base = 0x6d000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + }, { + .name = "pingpong_5", .id = PINGPONG_5, + .base = 0x6e000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + }, { + .name = "pingpong_6", .id = PINGPONG_6, + .base = 0x65800, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_7", .id = PINGPONG_7, + .base = 0x65c00, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, +}; + +static const struct dpu_merge_3d_cfg sa8775p_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x8, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x8, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x50000, .len = 0x8, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x65f00, .len = 0x8, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg sa8775p_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_1_1", .id = DSC_3, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_2_0", .id = DSC_4, + .base = 0x82000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_2_1", .id = DSC_5, + .base = 0x82000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, +}; + +static const struct dpu_wb_cfg sa8775p_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +/* TODO: INTF 3, 6, 7 and 8 are used for MST, marked as INTF_NONE for now */ +static const struct dpu_intf_cfg sa8775p_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + }, { + .name = "intf_4", .id = INTF_4, + .base = 0x38000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21), + }, { + .name = "intf_6", .id = INTF_6, + .base = 0x3A000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), + }, { + .name = "intf_7", .id = INTF_7, + .base = 0x3b000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19), + }, { + .name = "intf_8", .id = INTF_8, + .base = 0x3c000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_perf_cfg sa8775p_perf_data = { + .max_bw_low = 13600000, + .max_bw_high = 18200000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + /* FIXME: lut tables */ + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xfff0, 0x1}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile), + .entries = sm6350_qos_linear_macrotile + }, + {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile), + .entries = sm6350_qos_linear_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version sa8775p_mdss_ver = { + .core_major_ver = 8, + .core_minor_ver = 4, +}; + +const struct dpu_mdss_cfg dpu_sa8775p_cfg = { + .mdss_ver = &sa8775p_mdss_ver, + .caps = &sa8775p_dpu_caps, + .mdp = &sa8775p_mdp, + .cdm = &sc7280_cdm, + .ctl_count = ARRAY_SIZE(sa8775p_ctl), + .ctl = sa8775p_ctl, + .sspp_count = ARRAY_SIZE(sa8775p_sspp), + .sspp = sa8775p_sspp, + .mixer_count = ARRAY_SIZE(sa8775p_lm), + .mixer = sa8775p_lm, + .dspp_count = ARRAY_SIZE(sa8775p_dspp), + .dspp = sa8775p_dspp, + .pingpong_count = ARRAY_SIZE(sa8775p_pp), + .pingpong = sa8775p_pp, + .dsc_count = ARRAY_SIZE(sa8775p_dsc), + .dsc = sa8775p_dsc, + .merge_3d_count = ARRAY_SIZE(sa8775p_merge_3d), + .merge_3d = sa8775p_merge_3d, + .wb_count = ARRAY_SIZE(sa8775p_wb), + .wb = sa8775p_wb, + .intf_count = ARRAY_SIZE(sa8775p_intf), + .intf = sa8775p_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .perf = &sa8775p_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h index 7c286bafb948..e7183cf05776 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h @@ -8,72 +8,26 @@ #include "dpu_kms.h" #include "dpu_hw_interrupts.h" -/** - * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler - * @kms: MSM KMS handle - * @return: none - */ void dpu_core_irq_preinstall(struct msm_kms *kms); -/** - * dpu_core_irq_uninstall - uninstall core IRQ handler - * @kms: MSM KMS handle - * @return: none - */ void dpu_core_irq_uninstall(struct msm_kms *kms); -/** - * dpu_core_irq - core IRQ handler - * @kms: MSM KMS handle - * @return: interrupt handling status - */ irqreturn_t dpu_core_irq(struct msm_kms *kms); -/** - * dpu_core_irq_read - IRQ helper function for reading IRQ status - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @return: non-zero if irq detected; otherwise no irq detected - */ u32 dpu_core_irq_read( struct dpu_kms *dpu_kms, unsigned int irq_idx); -/** - * dpu_core_irq_register_callback - For registering callback function on IRQ - * interrupt - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @irq_cb: IRQ callback funcion. - * @irq_arg: IRQ callback argument. - * @return: 0 for success registering callback, otherwise failure - * - * This function supports registration of multiple callbacks for each interrupt. - */ int dpu_core_irq_register_callback( struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), void *irq_arg); -/** - * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ - * interrupt - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @return: 0 for success registering callback, otherwise failure - * - * This function supports registration of multiple callbacks for each interrupt. - */ int dpu_core_irq_unregister_callback( struct dpu_kms *dpu_kms, unsigned int irq_idx); -/** - * dpu_debugfs_core_irq_init - register core irq debugfs - * @dpu_kms: pointer to kms - * @parent: debugfs directory root - */ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 68fae048a9a8..6f0a37f954fe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg, mode = &state->adjusted_mode; - crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); drm_atomic_crtc_for_each_plane(plane, crtc) { pstate = to_dpu_plane_state(plane->state); @@ -140,6 +140,12 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, perf->max_per_pipe_ib, perf->bw_ctl); } +/** + * dpu_core_perf_crtc_check - validate performance of the given crtc state + * @crtc: Pointer to crtc + * @state: Pointer to new crtc state + * return: zero if success, or error code otherwise + */ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -301,6 +307,12 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) return clk_rate; } +/** + * dpu_core_perf_crtc_update - update performance of the given crtc + * @crtc: Pointer to crtc + * @params_changed: true if crtc parameters are modified + * return: zero if success, or error code otherwise + */ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, int params_changed) { @@ -446,6 +458,11 @@ static const struct file_operations dpu_core_perf_mode_fops = { .write = _dpu_core_perf_mode_write, }; +/** + * dpu_core_perf_debugfs_init - initialize debugfs for core performance context + * @dpu_kms: Pointer to the dpu_kms struct + * @parent: Pointer to parent debugfs + */ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) { struct dpu_core_perf *perf = &dpu_kms->perf; @@ -482,6 +499,12 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) } #endif +/** + * dpu_core_perf_init - initialize the given core performance context + * @perf: Pointer to core performance context + * @perf_cfg: Pointer to platform performance configuration + * @max_core_clk_rate: Maximum core clock rate + */ int dpu_core_perf_init(struct dpu_core_perf *perf, const struct dpu_perf_cfg *perf_cfg, unsigned long max_core_clk_rate) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h index 4186977390bd..451bf8021114 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h @@ -54,47 +54,20 @@ struct dpu_core_perf { u64 fix_core_ab_vote; }; -/** - * dpu_core_perf_crtc_check - validate performance of the given crtc state - * @crtc: Pointer to crtc - * @state: Pointer to new crtc state - * return: zero if success, or error code otherwise - */ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state); -/** - * dpu_core_perf_crtc_update - update performance of the given crtc - * @crtc: Pointer to crtc - * @params_changed: true if crtc parameters are modified - * return: zero if success, or error code otherwise - */ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, int params_changed); -/** - * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc - * @crtc: Pointer to crtc - */ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc); -/** - * dpu_core_perf_init - initialize the given core performance context - * @perf: Pointer to core performance context - * @perf_cfg: Pointer to platform performance configuration - * @max_core_clk_rate: Maximum core clock rate - */ int dpu_core_perf_init(struct dpu_core_perf *perf, const struct dpu_perf_cfg *perf_cfg, unsigned long max_core_clk_rate); struct dpu_kms; -/** - * dpu_core_perf_debugfs_init - initialize debugfs for core performance context - * @dpu_kms: Pointer to the dpu_kms struct - * @debugfs_parent: Pointer to parent debugfs - */ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent); #endif /* _DPU_CORE_PERF_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index db6c57900781..9f6ffd344693 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -572,6 +572,10 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev->event_lock, flags); } +/** + * dpu_crtc_get_intf_mode - get interface mode of the given crtc + * @crtc: Pointert to crtc + */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -594,6 +598,10 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) return INTF_MODE_NONE; } +/** + * dpu_crtc_vblank_callback - called on vblank irq, issues completion events + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -704,6 +712,10 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event) kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); } +/** + * dpu_crtc_complete_commit - callback signalling completion of current commit + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_complete_commit(struct drm_crtc *crtc) { trace_dpu_crtc_complete_commit(DRMID(crtc)); @@ -934,6 +946,10 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) return rc; } +/** + * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -1230,6 +1246,24 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, return 0; } +static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + + /* + * max crtc width is equal to the max mixer width * 2 and max height is 4K + */ + return drm_mode_validate_size(mode, + 2 * dpu_kms->catalog->caps->max_mixer_width, + 4096); +} + +/** + * dpu_crtc_vblank - enable or disable vblanks for this crtc + * @crtc: Pointer to drm crtc object + * @en: true to enable vblanks, false to disable + */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -1445,10 +1479,19 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { .atomic_check = dpu_crtc_atomic_check, .atomic_begin = dpu_crtc_atomic_begin, .atomic_flush = dpu_crtc_atomic_flush, + .mode_valid = dpu_crtc_mode_valid, .get_scanout_position = dpu_crtc_get_scanout_position, }; -/* initialize crtc */ +/** + * dpu_crtc_init - create a new crtc object + * @dev: dpu device + * @plane: base plane + * @cursor: cursor plane + * @return: new crtc object or error + * + * initialize CRTC + */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index febc3e764a63..0b148f3ce0d7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -239,55 +239,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL; } -/** - * dpu_crtc_vblank - enable or disable vblanks for this crtc - * @crtc: Pointer to drm crtc object - * @en: true to enable vblanks, false to disable - */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en); -/** - * dpu_crtc_vblank_callback - called on vblank irq, issues completion events - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc); -/** - * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc); -/** - * dpu_crtc_complete_commit - callback signalling completion of current commit - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_complete_commit(struct drm_crtc *crtc); -/** - * dpu_crtc_init - create a new crtc object - * @dev: dpu device - * @plane: base plane - * @cursor: cursor plane - * @Return: new crtc object or error - */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor); -/** - * dpu_crtc_register_custom_event - api for enabling/disabling crtc event - * @kms: Pointer to dpu_kms - * @crtc_drm: Pointer to crtc object - * @event: Event that client is interested - * @en: Flag to enable/disable the event - */ -int dpu_crtc_register_custom_event(struct dpu_kms *kms, - struct drm_crtc *crtc_drm, u32 event, bool en); - -/** - * dpu_crtc_get_intf_mode - get interface mode of the given crtc - * @crtc: Pointert to crtc - */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc); /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index bd3698bf0cf7..83de7564e2c1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -217,6 +217,10 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = { 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 }; +/** + * dpu_encoder_get_drm_fmt - return DRM fourcc format + * @phys_enc: Pointer to physical encoder structure + */ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *drm_enc; @@ -235,6 +239,11 @@ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) return DRM_FORMAT_RGB888; } +/** + * dpu_encoder_needs_periph_flush - return true if physical encoder requires + * peripheral flush + * @phys_enc: Pointer to physical encoder structure + */ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *drm_enc; @@ -253,6 +262,10 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode); } +/** + * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) { const struct dpu_encoder_virt *dpu_enc; @@ -272,6 +285,11 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) return false; } +/** + * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled + * for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) { const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -279,6 +297,12 @@ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) return dpu_enc->dsc ? true : false; } +/** + * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained + * in virtual encoder that can collect CRC values + * @drm_enc: Pointer to previously created drm encoder structure + * Returns: Number of physical encoders for given drm encoder + */ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -297,6 +321,10 @@ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) return num_intf; } +/** + * dpu_encoder_setup_misr - enable misr calculations + * @drm_enc: Pointer to previously created drm encoder structure + */ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -315,6 +343,13 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) } } +/** + * dpu_encoder_get_crc - get the crc value from interface blocks + * @drm_enc: Pointer to previously created drm encoder structure + * @crcs: array to fill with CRC data + * @pos: offset into the @crcs array + * Returns: 0 on success, error otherwise + */ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) { struct dpu_encoder_virt *dpu_enc; @@ -385,6 +420,12 @@ static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) } } +/** + * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has + * timed out, including reporting frame error event to crtc and debug dump + * @phys_enc: Pointer to physical encoder structure + * @intr_idx: Failing interrupt index + */ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, enum dpu_intr_idx intr_idx) { @@ -402,6 +443,15 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, u32 irq_idx, struct dpu_encoder_wait_info *info); +/** + * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. + * note: will call dpu_encoder_helper_wait_for_irq on timeout + * @phys_enc: Pointer to physical encoder structure + * @irq_idx: IRQ index + * @func: IRQ callback to be called in case of timeout + * @wait_info: wait info struct + * @return: 0 or -ERROR + */ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsigned int irq_idx, void (*func)(void *arg), @@ -473,6 +523,10 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, return ret; } +/** + * dpu_encoder_get_vsync_count - get vsync count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -480,6 +534,10 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) return phys ? atomic_read(&phys->vsync_cnt) : 0; } +/** + * dpu_encoder_get_linecount - get interface line count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -495,6 +553,13 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) return linecount; } +/** + * dpu_encoder_helper_split_config - split display configuration helper function + * This helper function may be used by physical encoders to configure + * the split display related registers. + * @phys_enc: Pointer to physical encoder structure + * @interface: enum dpu_intf setting + */ void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface) @@ -544,6 +609,10 @@ void dpu_encoder_helper_split_config( } } +/** + * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -560,6 +629,12 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) return (num_dsc > 0) && (num_dsc > intf_count); } +/** + * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder + * This helper function is used by physical encoder to get DSC config + * used for this encoder. + * @drm_enc: Pointer to encoder structure + */ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) { struct msm_drm_private *priv = drm_enc->dev->dev_private; @@ -1089,6 +1164,11 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, return 0; } +/** + * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job) { @@ -1106,6 +1186,11 @@ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, } } +/** + * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job) { @@ -1248,6 +1333,10 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } } +/** + * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs + * @drm_enc: encoder pointer + */ void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1389,6 +1478,12 @@ static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catal return NULL; } +/** + * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception + * @drm_enc: Pointer to drm encoder structure + * @phy_enc: Pointer to physical encoder + * Note: This is called from IRQ handler context. + */ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc) { @@ -1411,6 +1506,12 @@ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, DPU_ATRACE_END("encoder_vblank_callback"); } +/** + * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception + * @drm_enc: Pointer to drm encoder structure + * @phy_enc: Pointer to physical encoder + * Note: This is called from IRQ handler context. + */ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc) { @@ -1429,6 +1530,11 @@ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, DPU_ATRACE_END("encoder_underrun_callback"); } +/** + * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to + * @drm_enc: encoder pointer + * @crtc: crtc pointer + */ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1441,6 +1547,13 @@ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); } +/** + * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if + * the encoder is assigned to the given crtc + * @drm_enc: encoder pointer + * @crtc: crtc pointer + * @enable: true if vblank should be enabled + */ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc, bool enable) { @@ -1465,6 +1578,13 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, } } +/** + * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys + * encoder completes last request frame + * @drm_enc: Pointer to drm encoder structure + * @ready_phys: Pointer to physical encoder + * @event: Event to process + */ void dpu_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct dpu_encoder_phys *ready_phys, u32 event) @@ -1587,6 +1707,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) phys->ops.trigger_start(phys); } +/** + * dpu_encoder_helper_trigger_start - control start helper function + * This helper function may be optionally specified by physical + * encoders if they require ctl_start triggering. + * @phys_enc: Pointer to physical encoder structure + */ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; @@ -1708,6 +1834,11 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); } +/** + * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous + * kickoff and trigger the ctl prepare progress for command mode display. + * @drm_enc: encoder pointer + */ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1784,6 +1915,11 @@ static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, return line_time; } +/** + * dpu_encoder_vsync_time - get the time of the next vsync + * @drm_enc: encoder pointer + * @wakeup_time: pointer to ktime_t to write the vsync time to + */ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) { struct drm_display_mode *mode; @@ -1930,6 +2066,13 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, dsc, dsc_common_mode, initial_lines); } +/** + * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl + * path (i.e. ctl flush and start) at next appropriate time. + * Immediately: if no previous commit is outstanding. + * Delayed: Block until next trigger can be issued. + * @drm_enc: encoder pointer + */ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1966,6 +2109,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); } +/** + * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. + * @drm_enc: Pointer to drm encoder structure + */ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1987,6 +2134,11 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) return true; } +/** + * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path + * (i.e. ctl flush and start) immediately. + * @drm_enc: encoder pointer + */ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -2085,6 +2237,10 @@ static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) } } +/** + * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline + * @phys_enc: Pointer to physical encoder structure + */ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; @@ -2168,6 +2324,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) ctl->ops.clear_pending_flush(ctl); } +/** + * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block + * @phys_enc: Pointer to physical encoder + * @dpu_fmt: Pinter to the format description + * @output_type: HDMI/WB + */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type) @@ -2472,6 +2634,13 @@ static const struct drm_encoder_funcs dpu_encoder_funcs = { .debugfs_init = dpu_encoder_debugfs_init, }; +/** + * dpu_encoder_init - initialize virtual encoder object + * @dev: Pointer to drm device structure + * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant + * @disp_info: Pointer to display information structure + * Returns: Pointer to newly created drm encoder + */ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info) @@ -2593,6 +2762,10 @@ int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) return ret; } +/** + * dpu_encoder_get_intf_mode - get interface mode of the given encoder + * @encoder: Pointer to drm encoder object + */ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) { struct dpu_encoder_virt *dpu_enc = NULL; @@ -2612,6 +2785,12 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) return INTF_MODE_NONE; } +/** + * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder + * This helper function is used by physical encoder to get DSC blocks mask + * used for this encoder. + * @phys_enc: Pointer to physical encoder structure + */ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *encoder = phys_enc->parent; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index f7465a1774aa..92b5ee390788 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -19,6 +19,8 @@ #define IDLE_TIMEOUT (66 - 16/2) +#define MAX_H_TILES_PER_DISPLAY 2 + /** * struct msm_display_info - defines display properties * @intf_type: INTF_ type @@ -36,159 +38,54 @@ struct msm_display_info { enum dpu_vsync_source vsync_source; }; -/** - * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to - * @encoder: encoder pointer - * @crtc: crtc pointer - */ void dpu_encoder_assign_crtc(struct drm_encoder *encoder, struct drm_crtc *crtc); -/** - * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if - * the encoder is assigned to the given crtc - * @encoder: encoder pointer - * @crtc: crtc pointer - * @enable: true if vblank should be enabled - */ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder, struct drm_crtc *crtc, bool enable); -/** - * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl - * path (i.e. ctl flush and start) at next appropriate time. - * Immediately: if no previous commit is outstanding. - * Delayed: Block until next trigger can be issued. - * @encoder: encoder pointer - */ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder); -/** - * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous - * kickoff and trigger the ctl prepare progress for command mode display. - * @encoder: encoder pointer - */ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder); -/** - * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path - * (i.e. ctl flush and start) immediately. - * @encoder: encoder pointer - */ void dpu_encoder_kickoff(struct drm_encoder *encoder); -/** - * dpu_encoder_wakeup_time - get the time of the next vsync - */ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time); int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder); int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder); -/* - * dpu_encoder_get_intf_mode - get interface mode of the given encoder - * @encoder: Pointer to drm encoder object - */ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder); -/** - * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs - * @encoder: encoder pointer - */ void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder); -/** - * dpu_encoder_init - initialize virtual encoder object - * @dev: Pointer to drm device structure - * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant - * @disp_info: Pointer to display information structure - * Returns: Pointer to newly created drm encoder - */ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info); -/** - * dpu_encoder_set_idle_timeout - set the idle timeout for video - * and command mode encoders. - * @drm_enc: Pointer to previously created drm encoder structure - * @idle_timeout: idle timeout duration in milliseconds - */ -void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc, - u32 idle_timeout); -/** - * dpu_encoder_get_linecount - get interface line count for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_vsync_count - get vsync count for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); -/** - * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled - * for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained - * in virtual encoder that can collect CRC values - * @drm_enc: Pointer to previously created drm encoder structure - * Returns: Number of physical encoders for given drm encoder - */ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_setup_misr - enable misr calculations - * @drm_enc: Pointer to previously created drm encoder structure - */ void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder); -/** - * dpu_encoder_get_crc - get the crc value from interface blocks - * @drm_enc: Pointer to previously created drm encoder structure - * Returns: 0 on success, error otherwise - */ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos); -/** - * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc); -/** - * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - * @job: Pointer to the current drm writeback job - */ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); -/** - * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - * @job: Pointer to the current drm writeback job - */ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); -/** - * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. - * @drm_enc: Pointer to drm encoder structure - */ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc); #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index e77ebe3a68da..63f09857025c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -279,37 +279,15 @@ struct dpu_encoder_wait_info { s64 timeout_ms; }; -/** - * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder - * @p: Pointer to init params structure - * Return: Error code or newly allocated encoder - */ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder - * @dev: Corresponding device for devres management - * @p: Pointer to init params structure - * Return: Error code or newly allocated encoder - */ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_phys_wb_init - initialize writeback encoder - * @dev: Corresponding device for devres management - * @init: Pointer to init info structure with initialization params - */ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_helper_trigger_start - control start helper function - * This helper function may be optionally specified by physical - * encoders if they require ctl_start triggering. - * @phys_enc: Pointer to physical encoder structure - */ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc); static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( @@ -331,106 +309,38 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( return BLEND_3D_NONE; } -/** - * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder - * This helper function is used by physical encoder to get DSC blocks mask - * used for this encoder. - * @phys_enc: Pointer to physical encoder structure - */ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder - * This helper function is used by physical encoder to get DSC config - * used for this encoder. - * @drm_enc: Pointer to encoder structure - */ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_drm_fmt - return DRM fourcc format - * @phys_enc: Pointer to physical encoder structure - */ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_needs_periph_flush - return true if physical encoder requires - * peripheral flush - * @phys_enc: Pointer to physical encoder structure - */ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_helper_split_config - split display configuration helper function - * This helper function may be used by physical encoders to configure - * the split display related registers. - * @phys_enc: Pointer to physical encoder structure - * @interface: enum dpu_intf setting - */ void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface); -/** - * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has - * timed out, including reporting frame error event to crtc and debug dump - * @phys_enc: Pointer to physical encoder structure - * @intr_idx: Failing interrupt index - */ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, enum dpu_intr_idx intr_idx); -/** - * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. - * note: will call dpu_encoder_helper_wait_for_irq on timeout - * @phys_enc: Pointer to physical encoder structure - * @irq: IRQ index - * @func: IRQ callback to be called in case of timeout - * @wait_info: wait info struct - * @Return: 0 or -ERROR - */ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsigned int irq, void (*func)(void *arg), struct dpu_encoder_wait_info *wait_info); -/** - * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline - * @phys_enc: Pointer to physical encoder structure - */ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block - * @phys_enc: Pointer to physical encoder - * @output_type: HDMI/WB - */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type); -/** - * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * Note: This is called from IRQ handler context. - */ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc); -/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * Note: This is called from IRQ handler context. - */ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc); -/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * @event: Event to process - */ void dpu_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct dpu_encoder_phys *ready_phys, u32 event); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 6fc31d47cd1d..e9bbccc44dad 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -720,6 +720,12 @@ static void dpu_encoder_phys_cmd_init_ops( ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; } +/** + * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder + * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index d8a2edebfe8c..abd6600046cb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -746,6 +746,12 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; } +/** + * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder + * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 07035ab77b79..4c006ec74575 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -166,10 +166,10 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc) /** * dpu_encoder_phys_wb_setup_fb - setup output framebuffer * @phys_enc: Pointer to physical encoder - * @fb: Pointer to output framebuffer + * @format: Format of the framebuffer */ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, - struct drm_framebuffer *fb) + const struct msm_format *format) { struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); struct dpu_hw_wb *hw_wb; @@ -193,12 +193,12 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, hw_wb->ops.setup_roi(hw_wb, wb_cfg); if (hw_wb->ops.setup_outformat) - hw_wb->ops.setup_outformat(hw_wb, wb_cfg); + hw_wb->ops.setup_outformat(hw_wb, wb_cfg, format); if (hw_wb->ops.setup_cdp) { const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf; - hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format, + hw_wb->ops.setup_cdp(hw_wb, format, perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable); } @@ -321,15 +321,10 @@ static void dpu_encoder_phys_wb_setup( { struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; struct drm_display_mode mode = phys_enc->cached_mode; - struct drm_framebuffer *fb = NULL; struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); - struct drm_writeback_job *wb_job; const struct msm_format *format; - const struct msm_format *dpu_fmt; - wb_job = wb_enc->wb_job; format = msm_framebuffer_format(wb_enc->wb_job->fb); - dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier); DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n", hw_wb->idx - WB_0, mode.name, @@ -341,9 +336,9 @@ static void dpu_encoder_phys_wb_setup( dpu_encoder_phys_wb_set_qos(phys_enc); - dpu_encoder_phys_wb_setup_fb(phys_enc, fb); + dpu_encoder_phys_wb_setup_fb(phys_enc, format); - dpu_encoder_helper_phys_setup_cdm(phys_enc, dpu_fmt, CDM_CDWN_OUTPUT_WB); + dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB); dpu_encoder_phys_wb_setup_ctl(phys_enc); } @@ -587,26 +582,20 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc format = msm_framebuffer_format(job->fb); - wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base, - format->pixel_format, job->fb->modifier); - if (!wb_cfg->dest.format) { - /* this error should be detected during atomic_check */ - DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format); - return; - } - - ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest); + ret = dpu_format_populate_plane_sizes(job->fb, &wb_cfg->dest); if (ret) { - DPU_DEBUG("failed to populate layout %d\n", ret); + DPU_DEBUG("failed to populate plane sizes%d\n", ret); return; } + dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest); + wb_cfg->dest.width = job->fb->width; wb_cfg->dest.height = job->fb->height; - wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes; + wb_cfg->dest.num_planes = format->num_planes; - if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) && - (wb_cfg->dest.format->element[0] == C1_B_Cb)) + if ((format->fetch_type == MDP_PLANE_PLANAR) && + (format->element[0] == C1_B_Cb)) swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]); DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 6b1e9a617da3..59c9427da7dd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -13,9 +13,6 @@ #define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096 -#define DPU_MAX_IMG_WIDTH 0x3FFF -#define DPU_MAX_IMG_HEIGHT 0x3FFF - /* * struct dpu_media_color_map - maps drm format to media format * @format: DRM base pixel format @@ -93,10 +90,9 @@ static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt) return color_fmt; } -static int _dpu_format_get_plane_sizes_ubwc( +static int _dpu_format_populate_plane_sizes_ubwc( const struct msm_format *fmt, - const uint32_t width, - const uint32_t height, + struct drm_framebuffer *fb, struct dpu_hw_fmt_layout *layout) { int i; @@ -104,9 +100,8 @@ static int _dpu_format_get_plane_sizes_ubwc( bool meta = MSM_FORMAT_IS_UBWC(fmt); memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); - layout->format = fmt; - layout->width = width; - layout->height = height; + layout->width = fb->width; + layout->height = fb->height; layout->num_planes = fmt->num_planes; color = _dpu_format_get_media_color_ubwc(fmt); @@ -116,19 +111,19 @@ static int _dpu_format_get_plane_sizes_ubwc( return -EINVAL; } - if (MSM_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(fmt)) { uint32_t y_sclines, uv_sclines; uint32_t y_meta_scanlines = 0; uint32_t uv_meta_scanlines = 0; layout->num_planes = 2; - layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width); - y_sclines = VENUS_Y_SCANLINES(color, height); + layout->plane_pitch[0] = VENUS_Y_STRIDE(color, fb->width); + y_sclines = VENUS_Y_SCANLINES(color, fb->height); layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); - layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width); - uv_sclines = VENUS_UV_SCANLINES(color, height); + layout->plane_pitch[1] = VENUS_UV_STRIDE(color, fb->width); + uv_sclines = VENUS_UV_SCANLINES(color, fb->height); layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] * uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); @@ -136,13 +131,13 @@ static int _dpu_format_get_plane_sizes_ubwc( goto done; layout->num_planes += 2; - layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width); - y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height); + layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, fb->width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color, fb->height); layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); - layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width); - uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height); + layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, fb->width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, fb->height); layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] * uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); @@ -151,16 +146,16 @@ static int _dpu_format_get_plane_sizes_ubwc( layout->num_planes = 1; - layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width); - rgb_scanlines = VENUS_RGB_SCANLINES(color, height); + layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, fb->width); + rgb_scanlines = VENUS_RGB_SCANLINES(color, fb->height); layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); if (!meta) goto done; layout->num_planes += 2; - layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width); - rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height); + layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, fb->width); + rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, fb->height); layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); } @@ -172,26 +167,23 @@ done: return 0; } -static int _dpu_format_get_plane_sizes_linear( +static int _dpu_format_populate_plane_sizes_linear( const struct msm_format *fmt, - const uint32_t width, - const uint32_t height, - struct dpu_hw_fmt_layout *layout, - const uint32_t *pitches) + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { int i; memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); - layout->format = fmt; - layout->width = width; - layout->height = height; + layout->width = fb->width; + layout->height = fb->height; layout->num_planes = fmt->num_planes; /* Due to memset above, only need to set planes of interest */ if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) { layout->num_planes = 1; - layout->plane_size[0] = width * height * layout->format->bpp; - layout->plane_pitch[0] = width * layout->format->bpp; + layout->plane_size[0] = fb->width * fb->height * fmt->bpp; + layout->plane_pitch[0] = fb->width * fmt->bpp; } else { uint32_t v_subsample, h_subsample; uint32_t chroma_samp; @@ -201,7 +193,7 @@ static int _dpu_format_get_plane_sizes_linear( _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample, &h_subsample); - if (width % h_subsample || height % v_subsample) { + if (fb->width % h_subsample || fb->height % v_subsample) { DRM_ERROR("mismatch in subsample vs dimensions\n"); return -EINVAL; } @@ -209,11 +201,11 @@ static int _dpu_format_get_plane_sizes_linear( if ((fmt->pixel_format == DRM_FORMAT_NV12) && (MSM_FORMAT_IS_DX(fmt))) bpp = 2; - layout->plane_pitch[0] = width * bpp; + layout->plane_pitch[0] = fb->width * bpp; layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample; - layout->plane_size[0] = layout->plane_pitch[0] * height; + layout->plane_size[0] = layout->plane_pitch[0] * fb->height; layout->plane_size[1] = layout->plane_pitch[1] * - (height / v_subsample); + (fb->height / v_subsample); if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) { layout->num_planes = 2; @@ -234,8 +226,13 @@ static int _dpu_format_get_plane_sizes_linear( * all the components based on ubwc specifications. */ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) { - if (pitches && layout->plane_pitch[i] < pitches[i]) - layout->plane_pitch[i] = pitches[i]; + if (layout->plane_pitch[i] <= fb->pitches[i]) { + layout->plane_pitch[i] = fb->pitches[i]; + } else { + DRM_DEBUG("plane %u expected pitch %u, fb %u\n", + i, layout->plane_pitch[i], fb->pitches[i]); + return -EINVAL; + } } for (i = 0; i < DPU_MAX_PLANES; i++) @@ -244,53 +241,54 @@ static int _dpu_format_get_plane_sizes_linear( return 0; } -static int dpu_format_get_plane_sizes( - const struct msm_format *fmt, - const uint32_t w, - const uint32_t h, - struct dpu_hw_fmt_layout *layout, - const uint32_t *pitches) +/** + * dpu_format_populate_plane_sizes - populate non-address part of the layout based on + * fb, and format found in the fb + * @fb: framebuffer pointer + * @layout: format layout structure to populate + * + * Return: error code on failure or 0 if new addresses were populated + */ +int dpu_format_populate_plane_sizes( + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { - if (!layout || !fmt) { + const struct msm_format *fmt; + + if (!layout || !fb) { DRM_ERROR("invalid pointer\n"); return -EINVAL; } - if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) { + if (fb->width > DPU_MAX_IMG_WIDTH || + fb->height > DPU_MAX_IMG_HEIGHT) { DRM_ERROR("image dimensions outside max range\n"); return -ERANGE; } + fmt = msm_framebuffer_format(fb); + if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt)) - return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout); + return _dpu_format_populate_plane_sizes_ubwc(fmt, fb, layout); - return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches); + return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout); } -static int _dpu_format_populate_addrs_ubwc( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { + const struct msm_format *fmt; uint32_t base_addr = 0; bool meta; - if (!fb || !layout) { - DRM_ERROR("invalid pointers\n"); - return -EINVAL; - } - - if (aspace) - base_addr = msm_framebuffer_iova(fb, aspace, 0); - if (!base_addr) { - DRM_ERROR("failed to retrieve base addr\n"); - return -EFAULT; - } + base_addr = msm_framebuffer_iova(fb, aspace, 0); - meta = MSM_FORMAT_IS_UBWC(layout->format); + fmt = msm_framebuffer_format(fb); + meta = MSM_FORMAT_IS_UBWC(fmt); /* Per-format logic for verifying active planes */ - if (MSM_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(fmt)) { /************************************************/ /* UBWC ** */ /* buffer ** DPU PLANE */ @@ -319,7 +317,7 @@ static int _dpu_format_populate_addrs_ubwc( + layout->plane_size[2] + layout->plane_size[3]; if (!meta) - return 0; + return; /* configure Y metadata plane */ layout->plane_addr[2] = base_addr; @@ -350,119 +348,43 @@ static int _dpu_format_populate_addrs_ubwc( layout->plane_addr[1] = 0; if (!meta) - return 0; + return; layout->plane_addr[2] = base_addr; layout->plane_addr[3] = 0; } - return 0; } -static int _dpu_format_populate_addrs_linear( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { unsigned int i; - /* Can now check the pitches given vs pitches expected */ - for (i = 0; i < layout->num_planes; ++i) { - if (layout->plane_pitch[i] > fb->pitches[i]) { - DRM_ERROR("plane %u expected pitch %u, fb %u\n", - i, layout->plane_pitch[i], fb->pitches[i]); - return -EINVAL; - } - } - /* Populate addresses for simple formats here */ - for (i = 0; i < layout->num_planes; ++i) { - if (aspace) - layout->plane_addr[i] = - msm_framebuffer_iova(fb, aspace, i); - if (!layout->plane_addr[i]) { - DRM_ERROR("failed to retrieve base addr\n"); - return -EFAULT; - } - } - - return 0; + for (i = 0; i < layout->num_planes; ++i) + layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i); } -int dpu_format_populate_layout( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +/** + * dpu_format_populate_addrs - populate buffer addresses based on + * mmu, fb, and format found in the fb + * @aspace: address space pointer + * @fb: framebuffer pointer + * @layout: format layout structure to populate + */ +void dpu_format_populate_addrs(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { - int ret; - - if (!fb || !layout) { - DRM_ERROR("invalid arguments\n"); - return -EINVAL; - } + const struct msm_format *fmt; - if ((fb->width > DPU_MAX_IMG_WIDTH) || - (fb->height > DPU_MAX_IMG_HEIGHT)) { - DRM_ERROR("image dimensions outside max range\n"); - return -ERANGE; - } - - layout->format = msm_framebuffer_format(fb); - - /* Populate the plane sizes etc via get_format */ - ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height, - layout, fb->pitches); - if (ret) - return ret; + fmt = msm_framebuffer_format(fb); /* Populate the addresses given the fb */ - if (MSM_FORMAT_IS_UBWC(layout->format) || - MSM_FORMAT_IS_TILE(layout->format)) - ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout); + if (MSM_FORMAT_IS_UBWC(fmt) || + MSM_FORMAT_IS_TILE(fmt)) + _dpu_format_populate_addrs_ubwc(aspace, fb, layout); else - ret = _dpu_format_populate_addrs_linear(aspace, fb, layout); - - return ret; -} - -int dpu_format_check_modified_format( - const struct msm_kms *kms, - const struct msm_format *fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos) -{ - const struct drm_format_info *info; - struct dpu_hw_fmt_layout layout; - uint32_t bos_total_size = 0; - int ret, i; - - if (!fmt || !cmd || !bos) { - DRM_ERROR("invalid arguments\n"); - return -EINVAL; - } - - info = drm_format_info(fmt->pixel_format); - if (!info) - return -EINVAL; - - ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height, - &layout, cmd->pitches); - if (ret) - return ret; - - for (i = 0; i < info->num_planes; i++) { - if (!bos[i]) { - DRM_ERROR("invalid handle for plane %d\n", i); - return -EINVAL; - } - if ((i == 0) || (bos[i] != bos[0])) - bos_total_size += bos[i]->size; - } - - if (bos_total_size < layout.total_size) { - DRM_ERROR("buffers total size too small %u expected %u\n", - bos_total_size, layout.total_size); - return -EINVAL; - } - - return 0; + _dpu_format_populate_addrs_linear(aspace, fb, layout); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h index 210d0ed5f0af..c6145d43aa3f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h @@ -31,35 +31,12 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats, return false; } -/** - * dpu_format_check_modified_format - validate format and buffers for - * dpu non-standard, i.e. modified format - * @kms: kms driver - * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format - * @cmd: fb_cmd2 structure user request - * @bos: gem buffer object list - * - * Return: error code on failure, 0 on success - */ -int dpu_format_check_modified_format( - const struct msm_kms *kms, - const struct msm_format *msm_fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos); +void dpu_format_populate_addrs(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout); -/** - * dpu_format_populate_layout - populate the given format layout based on - * mmu, fb, and format found in the fb - * @aspace: address space pointer - * @fb: framebuffer pointer - * @fmtl: format layout structure to populate - * - * Return: error code on failure, -EAGAIN if success but the addresses - * are the same as before or 0 if new addresses were populated - */ -int dpu_format_populate_layout( - struct msm_gem_address_space *aspace, +int dpu_format_populate_plane_sizes( struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *fmtl); + struct dpu_hw_fmt_layout *layout); #endif /*_DPU_FORMATS_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index dcb4fd85e73b..2cbf41f33cc0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -21,6 +21,16 @@ (VIG_BASE_MASK | \ BIT(DPU_SSPP_CSC_10BIT)) +#define VIG_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS) |\ + BIT(DPU_SSPP_SCALER_QSEED2) |\ + BIT(DPU_SSPP_CSC)) + +#define VIG_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_QSEED2) |\ + BIT(DPU_SSPP_CSC)) + #define VIG_MSM8998_MASK \ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) @@ -32,6 +42,12 @@ #define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) +#define DMA_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS)) + +#define DMA_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_CDP)) + #define DMA_MSM8998_MASK \ (BIT(DPU_SSPP_QOS) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ @@ -57,9 +73,19 @@ #define DMA_CURSOR_SDM845_MASK_SDMA \ (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) +#define DMA_CURSOR_MSM8996_MASK \ + (DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR)) + #define DMA_CURSOR_MSM8998_MASK \ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR)) +#define RGB_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS)) + +#define RGB_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_RGB)) + #define MIXER_MSM8998_MASK \ (BIT(DPU_MIXER_SOURCESPLIT)) @@ -69,6 +95,12 @@ #define MIXER_QCM2290_MASK \ (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) +#define PINGPONG_MSM8996_MASK \ + (BIT(DPU_PINGPONG_DSC)) + +#define PINGPONG_MSM8996_TE2_MASK \ + (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2)) + #define PINGPONG_SDM845_MASK \ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC)) @@ -115,10 +147,6 @@ #define MAX_HORZ_DECIMATION 4 #define MAX_VERT_DECIMATION 4 -#define MAX_UPSCALE_RATIO 20 -#define MAX_DOWNSCALE_RATIO 4 -#define SSPP_UNITY_SCALE 1 - #define STRCAT(X, Y) (X Y) static const uint32_t plane_formats[] = { @@ -276,8 +304,6 @@ static const u32 wb2_formats_rgb_yuv[] = { /* SSPP common configuration */ #define _VIG_SBLK(scaler_ver) \ { \ - .maxdwnscale = MAX_DOWNSCALE_RATIO, \ - .maxupscale = MAX_UPSCALE_RATIO, \ .scaler_blk = {.name = "scaler", \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ @@ -285,15 +311,11 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = NULL, \ } #define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \ { \ - .maxdwnscale = MAX_DOWNSCALE_RATIO, \ - .maxupscale = MAX_UPSCALE_RATIO, \ .scaler_blk = {.name = "scaler", \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ @@ -301,29 +323,40 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = rot_cfg, \ } #define _VIG_SBLK_NOSCALE() \ { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ + } + +/* qseed2 is not supported, so disabled scaling */ +#define _VIG_SBLK_QSEED2() \ + { \ + .scaler_blk = {.name = "scaler", \ + /* no version for qseed2 */ \ + .base = 0x200, .len = 0xa0,}, \ + .csc_blk = {.name = "csc", \ + .base = 0x320, .len = 0x100,}, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .rotation_cfg = NULL, \ + } + +#define _RGB_SBLK() \ + { \ + .scaler_blk = {.name = "scaler", \ + .base = 0x200, .len = 0x28,}, \ + .format_list = plane_formats, \ + .num_formats = ARRAY_SIZE(plane_formats), \ } #define _DMA_SBLK() \ { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ } static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { @@ -332,6 +365,9 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { .rot_format_list = rotation_v2_formats, }; +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed2 = + _VIG_SBLK_QSEED2(); + static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale = _VIG_SBLK_NOSCALE(); @@ -363,6 +399,8 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 = static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 = _VIG_SBLK(SSPP_SCALER_VER(3, 3)); +static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK(); + static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK(); /************************************************************* @@ -427,6 +465,15 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { /************************************************************* * PINGPONG sub blocks config *************************************************************/ +static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = { + .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, + .version = 0x1}, +}; + +static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = { + /* No dither block */ +}; + static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, .version = 0x1}, @@ -492,6 +539,34 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { }, }; +static const struct dpu_vbif_cfg msm8996_vbif[] = { + { + .name = "vbif_rt", .id = VBIF_RT, + .base = 0, .len = 0x1040, + .default_ot_rd_limit = 32, + .default_ot_wr_limit = 16, + .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM), + .xin_halt_timeout = 0x4000, + .qos_rp_remap_size = 0x20, + .dynamic_ot_rd_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .dynamic_ot_wr_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .qos_rt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl), + .priority_lvl = msm8998_rt_pri_lvl, + }, + .qos_nrt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl), + .priority_lvl = msm8998_nrt_pri_lvl, + }, + }, +}; + static const struct dpu_vbif_cfg msm8998_vbif[] = { { .name = "vbif_rt", .id = VBIF_RT, @@ -675,6 +750,11 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { * Hardware catalog *************************************************************/ +#include "catalog/dpu_1_7_msm8996.h" +#include "catalog/dpu_1_14_msm8937.h" +#include "catalog/dpu_1_15_msm8917.h" +#include "catalog/dpu_1_16_msm8953.h" + #include "catalog/dpu_3_0_msm8998.h" #include "catalog/dpu_3_2_sdm660.h" #include "catalog/dpu_3_3_sdm630.h" @@ -699,6 +779,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_8_0_sc8280xp.h" #include "catalog/dpu_8_1_sm8450.h" +#include "catalog/dpu_8_4_sa8775p.h" #include "catalog/dpu_9_0_sm8550.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 37e18e820a20..c701d18c3522 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -21,8 +21,8 @@ #define DPU_HW_BLK_NAME_LEN 16 -#define MAX_IMG_WIDTH 0x3fff -#define MAX_IMG_HEIGHT 0x3fff +#define DPU_MAX_IMG_WIDTH 0x3fff +#define DPU_MAX_IMG_HEIGHT 0x3fff #define CRTC_DUAL_MIXERS 2 @@ -364,21 +364,15 @@ struct dpu_caps { /** * struct dpu_sspp_sub_blks : SSPP sub-blocks * common: Pointer to common configurations shared by sub blocks - * @maxdwnscale: max downscale ratio supported(without DECIMATION) - * @maxupscale: maxupscale ratio supported * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps * @qseed_ver: qseed version * @scaler_blk: * @csc_blk: * @format_list: Pointer to list of supported formats * @num_formats: Number of supported formats - * @virt_format_list: Pointer to list of supported formats for virtual planes - * @virt_num_formats: Number of supported formats for virtual planes * @dpu_rotation_cfg: inline rotation configuration */ struct dpu_sspp_sub_blks { - u32 maxdwnscale; - u32 maxupscale; u32 max_per_pipe_bw; u32 qseed_ver; struct dpu_scaler_blk scaler_blk; @@ -386,8 +380,6 @@ struct dpu_sspp_sub_blks { const u32 *format_list; u32 num_formats; - const u32 *virt_format_list; - u32 virt_num_formats; const struct dpu_rotation_cfg *rotation_cfg; }; @@ -831,6 +823,10 @@ struct dpu_mdss_cfg { const struct dpu_format_extended *vig_formats; }; +extern const struct dpu_mdss_cfg dpu_msm8917_cfg; +extern const struct dpu_mdss_cfg dpu_msm8937_cfg; +extern const struct dpu_mdss_cfg dpu_msm8953_cfg; +extern const struct dpu_mdss_cfg dpu_msm8996_cfg; extern const struct dpu_mdss_cfg dpu_msm8998_cfg; extern const struct dpu_mdss_cfg dpu_sdm630_cfg; extern const struct dpu_mdss_cfg dpu_sdm660_cfg; @@ -850,6 +846,7 @@ extern const struct dpu_mdss_cfg dpu_sm8350_cfg; extern const struct dpu_mdss_cfg dpu_sc7280_cfg; extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg; extern const struct dpu_mdss_cfg dpu_sm8450_cfg; +extern const struct dpu_mdss_cfg dpu_sa8775p_cfg; extern const struct dpu_mdss_cfg dpu_sm8550_cfg; extern const struct dpu_mdss_cfg dpu_sm8650_cfg; extern const struct dpu_mdss_cfg dpu_x1e80100_cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c index 55d2768a6d4d..ae1534c49ae0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c @@ -222,6 +222,14 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_ DPU_REG_WRITE(c, CDM_MUX, mux_cfg); } +/** + * dpu_hw_cdm_init - initializes the cdm hw driver object. + * should be called once before accessing every cdm. + * @dev: DRM device handle + * @cfg: CDM catalog entry for which driver object is required + * @addr : mapped register io address of MDSS + * @mdss_rev: mdss hw core revision + */ struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, const struct dpu_cdm_cfg *cfg, void __iomem *addr, const struct dpu_mdss_version *mdss_rev) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h index ec71c9886d75..6bb3476a05f8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h @@ -122,14 +122,6 @@ struct dpu_hw_cdm { struct dpu_hw_cdm_ops ops; }; -/** - * dpu_hw_cdm_init - initializes the cdm hw driver object. - * should be called once before accessing every cdm. - * @dev: DRM device handle - * @cdm: CDM catalog entry for which driver object is required - * @addr : mapped register io address of MDSS - * @mdss_rev: mdss hw core revision - */ struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, const struct dpu_cdm_cfg *cdm, void __iomem *addr, const struct dpu_mdss_version *mdss_rev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 2e50049f2f85..4893f10d6a58 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -736,6 +736,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; }; +/** + * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. + * Should be called before accessing any ctl_path register. + * @dev: Corresponding device for devres management + * @cfg: ctl_path catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mixer_count: Number of mixers in @mixer + * @mixer: Pointer to an array of Layer Mixers defined in the catalog + */ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, const struct dpu_ctl_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 4401fdc0f3e4..85c6c835cc87 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -294,15 +294,6 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_ctl, base); } -/** - * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. - * Should be called before accessing any ctl_path register. - * @dev: Corresponding device for devres management - * @cfg: ctl_path catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mixer_count: Number of mixers in @mixer - * @mixer: Pointer to an array of Layer Mixers defined in the catalog - */ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, const struct dpu_ctl_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 5e9aad1b2aa2..657200401f57 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -190,6 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops, ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk; }; +/** + * dpu_hw_dsc_init() - Initializes the DSC hw driver object. + * @dev: Corresponding device for devres management + * @cfg: DSC catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: Error code or allocated dpu_hw_dsc context + */ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h index 989c88d2449b..fc171bdeca48 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h @@ -62,24 +62,10 @@ struct dpu_hw_dsc { struct dpu_hw_dsc_ops ops; }; -/** - * dpu_hw_dsc_init() - Initializes the DSC hw driver object. - * @dev: Corresponding device for devres management - * @cfg: DSC catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: Error code or allocated dpu_hw_dsc context - */ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); -/** - * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object - * @dev: Corresponding device for devres management - * @cfg: DSC catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Returns: Error code or allocated dpu_hw_dsc context - */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c index ba193b0376fe..b9c433567262 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c @@ -369,6 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops, ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2; } +/** + * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object + * @dev: Corresponding device for devres management + * @cfg: DSC catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Returns: Error code or allocated dpu_hw_dsc context + */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index b1da88e2935f..829ca272873e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -70,6 +70,14 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c, c->ops.setup_pcc = dpu_setup_dspp_pcc; } +/** + * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. + * should be called once before accessing every DSPP. + * @dev: Corresponding device for devres management + * @cfg: DSPP catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: pointer to structure or ERR_PTR + */ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, const struct dpu_dspp_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h index 3b435690b6cc..45c26cd49fa3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h @@ -78,14 +78,6 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_dspp, base); } -/** - * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. - * should be called once before accessing every DSPP. - * @dev: Corresponding device for devres management - * @cfg: DSPP catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: pointer to structure or ERR_PTR - */ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, const struct dpu_dspp_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index b85881aab047..49bd77a755aa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -237,6 +237,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_entry->cb(irq_entry->arg); } +/** + * dpu_core_irq - core IRQ handler + * @kms: MSM KMS handle + * @return: interrupt handling status + */ irqreturn_t dpu_core_irq(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -442,6 +447,12 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) wmb(); } +/** + * dpu_core_irq_read - IRQ helper function for reading IRQ status + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: non-zero if irq detected; otherwise no irq detected + */ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, unsigned int irq_idx) { @@ -476,6 +487,12 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, return intr_status; } +/** + * dpu_hw_intr_init(): Initializes the interrupts hw object + * @dev: Corresponding device for devres management + * @addr: mapped register io address of MDP + * @m: pointer to MDSS catalog data + */ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, void __iomem *addr, const struct dpu_mdss_cfg *m) @@ -517,6 +534,17 @@ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, return intr; } +/** + * dpu_core_irq_register_callback - For registering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @irq_cb: IRQ callback function. + * @irq_arg: IRQ callback argument. + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), @@ -567,6 +595,15 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, return 0; } +/** + * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx) { @@ -628,6 +665,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); +/** + * dpu_debugfs_core_irq_init - register core irq debugfs + * @dpu_kms: pointer to kms + * @parent: debugfs directory root + */ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent) { @@ -636,6 +678,11 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, } #endif +/** + * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ void dpu_core_irq_preinstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -653,6 +700,11 @@ void dpu_core_irq_preinstall(struct msm_kms *kms) } } +/** + * dpu_core_irq_uninstall - uninstall core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ void dpu_core_irq_uninstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index 564b750a28fe..142358a105c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -68,12 +68,6 @@ struct dpu_hw_intr { struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS]; }; -/** - * dpu_hw_intr_init(): Initializes the interrupts hw object - * @dev: Corresponding device for devres management - * @addr: mapped register io address of MDP - * @m: pointer to MDSS catalog data - */ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, void __iomem *addr, const struct dpu_mdss_cfg *m); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 29cb854f831a..fb1d25baa518 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -547,6 +547,14 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf, DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2); } +/** + * dpu_hw_intf_init() - Initializes the INTF driver for the passed + * interface catalog entry. + * @dev: Corresponding device for devres management + * @cfg: interface catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, const struct dpu_intf_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index fc23650dfbf0..114be272ac0a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -130,14 +130,6 @@ struct dpu_hw_intf { struct dpu_hw_intf_ops ops; }; -/** - * dpu_hw_intf_init() - Initializes the INTF driver for the passed - * interface catalog entry. - * @dev: Corresponding device for devres management - * @cfg: interface catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - */ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, const struct dpu_intf_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 1d3ccf3228c6..81b56f066519 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -158,6 +158,13 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops, ops->collect_misr = dpu_hw_lm_collect_misr; } +/** + * dpu_hw_lm_init() - Initializes the mixer hw driver object. + * should be called once before accessing every mixer. + * @dev: Corresponding device for devres management + * @cfg: mixer catalog entry for which driver object is required + * @addr: mapped register io address of MDP + */ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, const struct dpu_lm_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 0a3381755249..6f60fa9b3cd7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -93,13 +93,6 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_mixer, base); } -/** - * dpu_hw_lm_init() - Initializes the mixer hw driver object. - * should be called once before accessing every mixer. - * @dev: Corresponding device for devres management - * @cfg: mixer catalog entry for which driver object is required - * @addr: mapped register io address of MDP - */ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, const struct dpu_lm_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index a2eff36a2224..f8806a4d317b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -293,7 +293,6 @@ enum dpu_3d_blend_mode { /** * struct dpu_hw_fmt_layout - format information of the source pixel data - * @format: pixel format parameters * @num_planes: number of planes (including meta data planes) * @width: image width * @height: image height @@ -303,7 +302,6 @@ enum dpu_3d_blend_mode { * @plane_pitch: pitch of each plane */ struct dpu_hw_fmt_layout { - const struct msm_format *format; uint32_t num_planes; uint32_t width; uint32_t height; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c index ddfa40a959cb..0b3325f9c870 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c @@ -39,6 +39,14 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c, c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode; }; +/** + * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed + * merge3d catalog entry. + * @dev: Corresponding device for devres management + * @cfg: Pingpong catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: Error code or allocated dpu_hw_merge_3d context + */ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, const struct dpu_merge_3d_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h index c192f02ec1ab..6833c0207523 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h @@ -45,14 +45,6 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_merge_3d, base); } -/** - * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed - * merge3d catalog entry. - * @dev: Corresponding device for devres management - * @cfg: Pingpong catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: Error code or allocated dpu_hw_merge_3d context - */ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, const struct dpu_merge_3d_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c index 2db4c6fba37a..36c0ec775b92 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -283,6 +283,15 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp) return 0; } +/** + * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed + * pingpong catalog entry. + * @dev: Corresponding device for devres management + * @cfg: Pingpong catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + * Return: Error code or allocated dpu_hw_pingpong context + */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, const struct dpu_pingpong_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index a48b69fd79a3..dd99e1c21a1e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -118,15 +118,6 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_pingpong, base); } -/** - * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed - * pingpong catalog entry. - * @dev: Corresponding device for devres management - * @cfg: Pingpong catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - * Return: Error code or allocated dpu_hw_pingpong context - */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, const struct dpu_pingpong_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 2c720f1fc1b2..32c7c8084553 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -672,6 +672,15 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms, } #endif +/** + * dpu_hw_sspp_init() - Initializes the sspp hw driver object. + * Should be called once before accessing every pipe. + * @dev: Corresponding device for devres management + * @cfg: Pipe catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * @mdss_data: UBWC / MDSS configuration data + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, const struct dpu_sspp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 4a910b808687..56a0edf2a57c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -12,6 +12,8 @@ struct dpu_hw_sspp; +#define DPU_SSPP_MAX_PITCH_SIZE 0xffff + /** * Flags */ @@ -142,10 +144,12 @@ struct dpu_hw_pixel_ext { * @src_rect: src ROI, caller takes into account the different operations * such as decimation, flip etc to program this field * @dest_rect: destination ROI. + * @rotation: simplified drm rotation hint */ struct dpu_sw_pipe_cfg { struct drm_rect src_rect; struct drm_rect dst_rect; + unsigned int rotation; }; /** @@ -315,15 +319,7 @@ struct dpu_hw_sspp { }; struct dpu_kms; -/** - * dpu_hw_sspp_init() - Initializes the sspp hw driver object. - * Should be called once before accessing every pipe. - * @dev: Corresponding device for devres management - * @cfg: Pipe catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * @mdss_data: UBWC / MDSS configuration data - * @mdss_rev: dpu core's major and minor versions - */ + struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, const struct dpu_sspp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index 0f40eea7f5e2..ad19330de61a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -284,6 +284,13 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, ops->intf_audio_select = dpu_hw_intf_audio_select; } +/** + * dpu_hw_mdptop_init - initializes the top driver for the passed config + * @dev: Corresponding device for devres management + * @cfg: MDP TOP configuration from catalog + * @addr: Mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h index f1ab9fd106e5..04efdcd21ceb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h @@ -157,18 +157,9 @@ struct dpu_hw_mdp { struct dpu_hw_mdp_ops ops; }; -/** - * dpu_hw_mdptop_init - initializes the top driver for the passed config - * @dev: Corresponding device for devres management - * @cfg: MDP TOP configuration from catalog - * @addr: Mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, const struct dpu_mdss_version *mdss_rev); -void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp); - #endif /*_DPU_HW_TOP_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index 98e34afde2d2..af76ad8a8103 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -213,6 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops, ops->set_write_gather_en = dpu_hw_set_write_gather_en; } +/** + * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed + * VBIF catalog entry. + * @dev: Corresponding device for devres management + * @cfg: VBIF catalog entry for which driver object is required + * @addr: Mapped register io address of MDSS + */ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, const struct dpu_vbif_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h index e2b4307500e4..285121ec804c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h @@ -105,13 +105,6 @@ struct dpu_hw_vbif { struct dpu_hw_vbif_ops ops; }; -/** - * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed - * VBIF catalog entry. - * @dev: Corresponding device for devres management - * @cfg: VBIF catalog entry for which driver object is required - * @addr: Mapped register io address of MDSS - */ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, const struct dpu_vbif_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c index 93ff01c889b5..fb9f90957762 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -64,10 +64,10 @@ static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx, } static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, - struct dpu_hw_wb_cfg *data) + struct dpu_hw_wb_cfg *data, + const struct msm_format *fmt) { struct dpu_hw_blk_reg_map *c = &ctx->hw; - const struct msm_format *fmt = data->dest.format; u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp; u32 write_config = 0; u32 opmode = 0; @@ -213,6 +213,14 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops, ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl; } +/** + * dpu_hw_wb_init() - Initializes the writeback hw driver object. + * @dev: Corresponding device for devres management + * @cfg: wb_path catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + * Return: Error code or allocated dpu_hw_wb context + */ struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, const struct dpu_wb_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h index 37497473e16c..ee5e5ab786e1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h @@ -37,7 +37,8 @@ struct dpu_hw_wb_ops { struct dpu_hw_wb_cfg *wb); void (*setup_outformat)(struct dpu_hw_wb *ctx, - struct dpu_hw_wb_cfg *wb); + struct dpu_hw_wb_cfg *wb, + const struct msm_format *fmt); void (*setup_roi)(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb); @@ -74,14 +75,6 @@ struct dpu_hw_wb { struct dpu_hw_wb_ops ops; }; -/** - * dpu_hw_wb_init() - Initializes the writeback hw driver object. - * @dev: Corresponding device for devres management - * @cfg: wb_path catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - * Return: Error code or allocated dpu_hw_wb context - */ struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, const struct dpu_wb_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 9bcae53c4f45..ca4847b2b738 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -230,6 +230,21 @@ static int dpu_regset32_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(dpu_regset32); +/** + * dpu_debugfs_create_regset32 - Create register read back file for debugfs + * + * This function is almost identical to the standard debugfs_create_regset32() + * function, with the main difference being that a list of register + * names/offsets do not need to be provided. The 'read' function simply outputs + * sequential register values over a specified range. + * + * @name: File name within debugfs + * @mode: File mode within debugfs + * @parent: Parent directory entry within debugfs, can be NULL + * @offset: sub-block offset + * @length: sub-block length, in bytes + * @dpu_kms: pointer to dpu kms structure + */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) @@ -1025,7 +1040,6 @@ static const struct msm_kms_funcs kms_funcs = { .complete_commit = dpu_kms_complete_commit, .enable_vblank = dpu_kms_enable_vblank, .disable_vblank = dpu_kms_disable_vblank, - .check_modified_format = dpu_format_check_modified_format, .destroy = dpu_kms_destroy, .snapshot = dpu_kms_mdp_snapshot, #ifdef CONFIG_DEBUG_FS @@ -1061,6 +1075,13 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return 0; } +/** + * dpu_kms_get_clk_rate() - get the clock rate + * @dpu_kms: pointer to dpu_kms structure + * @clock_name: clock name to get the rate + * + * Return: current clock rate + */ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) { struct clk *clk; @@ -1202,13 +1223,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - /* - * max crtc width is equal to the max mixer width * 2 and max height is - * is 4K - */ - dev->mode_config.max_width = - dpu_kms->catalog->caps->max_mixer_width * 2; - dev->mode_config.max_height = 4096; + dev->mode_config.max_width = DPU_MAX_IMG_WIDTH; + dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT; dev->max_vblank_count = 0xffffffff; /* Disable vblank irqs aggressively for power-saving */ @@ -1445,8 +1461,13 @@ static const struct dev_pm_ops dpu_pm_ops = { }; static const struct of_device_id dpu_dt_match[] = { + { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, }, + { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, }, + { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, }, + { .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, }, { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, + { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, }, { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, }, { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, }, { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 935ff6fd172c..88d64d43ea1a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -145,38 +145,11 @@ struct dpu_global_state * @dpu_debugfs_create_regset32: Create 32-bit register dump file */ -/** - * dpu_debugfs_create_regset32 - Create register read back file for debugfs - * - * This function is almost identical to the standard debugfs_create_regset32() - * function, with the main difference being that a list of register - * names/offsets do not need to be provided. The 'read' function simply outputs - * sequential register values over a specified range. - * - * @name: File name within debugfs - * @mode: File mode within debugfs - * @parent: Parent directory entry within debugfs, can be NULL - * @offset: sub-block offset - * @length: sub-block length, in bytes - * @dpu_kms: pointer to dpu kms structure - */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms); /** - * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs - * - * The return value should be passed as the 'parent' argument to subsequent - * debugfs create calls. - * - * @dpu_kms: Pointer to DPU's KMS structure - * - * Return: dentry pointer for DPU's debugfs location - */ -void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); - -/** * DPU info management functions * These functions/definitions allow for building up a 'dpu_info' structure * containing one or more "key=value\n" entries. @@ -189,13 +162,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -/** - * dpu_kms_get_clk_rate() - get the clock rate - * @dpu_kms: pointer to dpu_kms structure - * @clock_name: clock name to get the rate - * - * Return: current clock rate - */ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name); #endif /* __dpu_kms_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 29298e066163..3ffac24333a2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -528,8 +528,7 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, const struct msm_format *fmt, bool color_fill, - struct dpu_sw_pipe_cfg *pipe_cfg, - unsigned int rotation) + struct dpu_sw_pipe_cfg *pipe_cfg) { struct dpu_hw_sspp *pipe_hw = pipe->sspp; const struct drm_format_info *info = drm_format_info(fmt->pixel_format); @@ -552,7 +551,7 @@ static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, dst_height, &scaler3_cfg, fmt, info->hsub, info->vsub, - rotation); + pipe_cfg->rotation); /* configure pixel extension based on scalar config */ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext, @@ -604,7 +603,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate, if (pipe->sspp->ops.setup_rects) pipe->sspp->ops.setup_rects(pipe, &pipe_cfg); - _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation); + _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg); } /** @@ -648,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, struct drm_framebuffer *fb = new_state->fb; struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); - struct dpu_hw_fmt_layout layout; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); int ret; @@ -676,17 +674,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, } } - /* validate framebuffer layout before commit */ - ret = dpu_format_populate_layout(pstate->aspace, - new_state->fb, &layout); - if (ret) { - DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); - if (pstate->aspace) - msm_framebuffer_cleanup(new_state->fb, pstate->aspace, - pstate->needs_dirtyfb); - return ret; - } - return 0; } @@ -708,12 +695,17 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane, } static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, - const struct dpu_sspp_sub_blks *sblk, - struct drm_rect src, const struct msm_format *fmt) + struct dpu_sw_pipe *pipe, + struct drm_rect src, + const struct msm_format *fmt) { + const struct dpu_sspp_sub_blks *sblk = pipe->sspp->cap->sblk; size_t num_formats; const u32 *supported_formats; + if (!test_bit(DPU_SSPP_INLINE_ROTATION, &pipe->sspp->cap->features)) + return -EINVAL; + if (!sblk->rotation_cfg) { DPU_ERROR("invalid rotation cfg\n"); return -EINVAL; @@ -743,6 +735,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, { uint32_t min_src_size; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + int ret; min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -780,6 +773,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return -EINVAL; } + if (pipe_cfg->rotation & DRM_MODE_ROTATE_90) { + ret = dpu_plane_check_inline_rotation(pdpu, pipe, pipe_cfg->src_rect, fmt); + if (ret) + return ret; + } + /* max clk check */ if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) { DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n"); @@ -789,37 +788,29 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return 0; } -static int dpu_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +#define MAX_UPSCALE_RATIO 20 +#define MAX_DOWNSCALE_RATIO 4 + +static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + const struct drm_crtc_state *crtc_state) { - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - int ret = 0, min_scale; + int i, ret = 0, min_scale, max_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - const struct drm_crtc_state *crtc_state = NULL; - const struct msm_format *fmt; struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; struct drm_rect fb_rect = { 0 }; uint32_t max_linewidth; - unsigned int rotation; - uint32_t supported_rotations; - const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap; - const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk; - if (new_plane_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, - new_plane_state->crtc); + min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO); + max_scale = MAX_DOWNSCALE_RATIO << 16; - min_scale = FRAC_16_16(1, sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min_scale, - sblk->maxdwnscale << 16, + max_scale, true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); @@ -828,12 +819,6 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, if (!new_plane_state->visible) return 0; - pipe->multirect_index = DPU_SSPP_RECT_SOLO; - pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - r_pipe->sspp = NULL; - pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos; if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) { DPU_ERROR("> %d plane stages assigned\n", @@ -841,13 +826,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - pipe_cfg->src_rect = new_plane_state->src; - /* state->src is 16.16, src_rect is not */ - pipe_cfg->src_rect.x1 >>= 16; - pipe_cfg->src_rect.x2 >>= 16; - pipe_cfg->src_rect.y1 >>= 16; - pipe_cfg->src_rect.y2 >>= 16; + drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); pipe_cfg->dst_rect = new_plane_state->dst; @@ -855,14 +835,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, fb_rect.y2 = new_plane_state->fb->height; /* Ensure fb size is supported */ - if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH || - drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) { + if (drm_rect_width(&fb_rect) > DPU_MAX_IMG_WIDTH || + drm_rect_height(&fb_rect) > DPU_MAX_IMG_HEIGHT) { DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n", DRM_RECT_ARG(&fb_rect)); return -E2BIG; } - fmt = msm_framebuffer_format(new_plane_state->fb); + ret = dpu_format_populate_plane_sizes(new_plane_state->fb, &pstate->layout); + if (ret) { + DPU_ERROR_PLANE(pdpu, "failed to get format plane sizes, %d\n", ret); + return ret; + } + + for (i = 0; i < pstate->layout.num_planes; i++) + if (pstate->layout.plane_pitch[i] > DPU_SSPP_MAX_PITCH_SIZE) + return -E2BIG; max_linewidth = pdpu->catalog->caps->max_linewidth; @@ -872,6 +860,86 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { + if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); + return -E2BIG; + } + + *r_pipe_cfg = *pipe_cfg; + pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; + pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; + r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; + } else { + memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg)); + } + + drm_rect_rotate_inv(&pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + if (r_pipe_cfg->src_rect.x1 != 0) + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + + return 0; +} + +static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, + struct drm_atomic_state *state, + const struct drm_crtc_state *crtc_state) +{ + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + const struct msm_format *fmt; + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; + struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + uint32_t max_linewidth; + uint32_t supported_rotations; + const struct dpu_sspp_cfg *pipe_hw_caps; + const struct dpu_sspp_sub_blks *sblk; + int ret = 0; + + pipe_hw_caps = pipe->sspp->cap; + sblk = pipe->sspp->cap->sblk; + + /* + * We already have verified scaling against platform limitations. + * Now check if the SSPP supports scaling at all. + */ + if (!sblk->scaler_blk.len && + ((drm_rect_width(&new_plane_state->src) >> 16 != + drm_rect_width(&new_plane_state->dst)) || + (drm_rect_height(&new_plane_state->src) >> 16 != + drm_rect_height(&new_plane_state->dst)))) + return -ERANGE; + + fmt = msm_framebuffer_format(new_plane_state->fb); + + max_linewidth = pdpu->catalog->caps->max_linewidth; + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + + if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_90; + + pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, + supported_rotations); + r_pipe_cfg->rotation = pipe_cfg->rotation; + + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, + &crtc_state->adjusted_mode); + if (ret) + return ret; + + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { /* * In parallel multirect case only the half of the usual width * is supported for tiled formats. If we are here, we know that @@ -885,16 +953,11 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -E2BIG; } - if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { - DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", - DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); - return -E2BIG; - } - if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) || drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) || (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) && !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) || + pipe_cfg->rotation & DRM_MODE_ROTATE_90 || MSM_FORMAT_IS_YUV(fmt)) { DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n", DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); @@ -912,51 +975,48 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, r_pipe->multirect_index = DPU_SSPP_RECT_1; r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; - *r_pipe_cfg = *pipe_cfg; - pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; - pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; - r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; - r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; - } - - drm_rect_rotate_inv(&pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - if (r_pipe->sspp) - drm_rect_rotate_inv(&r_pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); - if (ret) - return ret; - - if (r_pipe->sspp) { ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt, &crtc_state->adjusted_mode); if (ret) return ret; } - supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + return 0; +} - if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) - supported_rotations |= DRM_MODE_ROTATE_90; +static int dpu_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + int ret = 0; + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + const struct drm_crtc_state *crtc_state = NULL; - rotation = drm_rotation_simplify(new_plane_state->rotation, - supported_rotations); + if (new_plane_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); - if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) && - (rotation & DRM_MODE_ROTATE_90)) { - ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt); - if (ret) - return ret; - } + pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); + r_pipe->sspp = NULL; - pstate->rotation = rotation; - pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state); + if (ret) + return ret; - return 0; + if (!new_plane_state->visible) + return 0; + + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + return dpu_plane_atomic_check_sspp(plane, state, crtc_state); } static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe) @@ -981,6 +1041,10 @@ static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe } +/** + * dpu_plane_flush - final plane operations before commit flush + * @plane: Pointer to drm plane structure + */ void dpu_plane_flush(struct drm_plane *plane) { struct dpu_plane *pdpu; @@ -1060,14 +1124,14 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, pipe_cfg); } - _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation); + _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg); if (pipe->sspp->ops.setup_multirect) pipe->sspp->ops.setup_multirect( pipe); if (pipe->sspp->ops.setup_format) { - unsigned int rotation = pstate->rotation; + unsigned int rotation = pipe_cfg->rotation; src_flags = 0x0; @@ -1101,7 +1165,8 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, _dpu_plane_set_qos_remap(plane, pipe); } -static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) +static void dpu_plane_sspp_atomic_update(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct dpu_plane *pdpu = to_dpu_plane(plane); struct drm_plane_state *state = plane->state; @@ -1115,17 +1180,6 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) msm_framebuffer_format(fb); struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; - struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); - struct msm_gem_address_space *aspace = kms->base.aspace; - struct dpu_hw_fmt_layout layout; - bool layout_valid = false; - int ret; - - ret = dpu_format_populate_layout(aspace, fb, &layout); - if (ret) - DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); - else - layout_valid = true; pstate->pending = true; @@ -1133,6 +1187,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe); pdpu->is_rt_pipe = is_rt_pipe; + dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout); + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT ", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), crtc->base.id, DRM_RECT_ARG(&state->dst), @@ -1140,12 +1196,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), - layout_valid ? &layout : NULL); + &pstate->layout); if (r_pipe->sspp) { dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), - layout_valid ? &layout : NULL); + &pstate->layout); } if (pstate->needs_qos_remap) @@ -1197,7 +1253,7 @@ static void dpu_plane_atomic_update(struct drm_plane *plane, if (!new_state->visible) { _dpu_plane_atomic_disable(plane); } else { - dpu_plane_sspp_atomic_update(plane); + dpu_plane_sspp_atomic_update(plane, new_state); } } @@ -1301,7 +1357,6 @@ static void dpu_plane_reset(struct drm_plane *plane) { struct dpu_plane *pdpu; struct dpu_plane_state *pstate; - struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); if (!plane) { DPU_ERROR("invalid plane\n"); @@ -1323,16 +1378,6 @@ static void dpu_plane_reset(struct drm_plane *plane) return; } - /* - * Set the SSPP here until we have proper virtualized DPU planes. - * This is the place where the state is allocated, so fill it fully. - */ - pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); - pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO; - pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE; - - pstate->r_pipe.sspp = NULL; - __drm_atomic_helper_plane_reset(plane, &pstate->base); } @@ -1388,7 +1433,15 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { .atomic_update = dpu_plane_atomic_update, }; -/* initialize plane */ +/** + * dpu_plane_init - create new dpu plane for the given pipe + * @dev: Pointer to DRM device + * @pipe: dpu hardware pipe identifier + * @type: Plane type - PRIMARY/OVERLAY/CURSOR + * @possible_crtcs: bitmask of crtc that can be attached to the given pipe + * + * Initialize the plane. + */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, unsigned long possible_crtcs) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index abd6b21a049b..97090ca7842b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -30,7 +30,7 @@ * @plane_fetch_bw: calculated BW per plane * @plane_clk: calculated clk per plane * @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed - * @rotation: simplified drm rotation hint + * @layout: framebuffer memory layout */ struct dpu_plane_state { struct drm_plane_state base; @@ -47,46 +47,21 @@ struct dpu_plane_state { u64 plane_clk; bool needs_dirtyfb; - unsigned int rotation; + + struct dpu_hw_fmt_layout layout; }; #define to_dpu_plane_state(x) \ container_of(x, struct dpu_plane_state, base) -/** - * dpu_plane_flush - final plane operations before commit flush - * @plane: Pointer to drm plane structure - */ void dpu_plane_flush(struct drm_plane *plane); -/** - * dpu_plane_set_error: enable/disable error condition - * @plane: pointer to drm_plane structure - */ void dpu_plane_set_error(struct drm_plane *plane, bool error); -/** - * dpu_plane_init - create new dpu plane for the given pipe - * @dev: Pointer to DRM device - * @pipe: dpu hardware pipe identifier - * @type: Plane type - PRIMARY/OVERLAY/CURSOR - * @possible_crtcs: bitmask of crtc that can be attached to the given pipe - * - */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, unsigned long possible_crtcs); -/** - * dpu_plane_color_fill - enables color fill on plane - * @plane: Pointer to DRM plane object - * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red - * @alpha: 8-bit fill alpha value, 255 selects 100% alpha - * Returns: 0 on success - */ -int dpu_plane_color_fill(struct drm_plane *plane, - uint32_t color, uint32_t alpha); - #ifdef CONFIG_DEBUG_FS void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable); #else diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 44938ba7a2b7..c247af03dc8e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -34,6 +34,16 @@ struct dpu_rm_requirements { struct msm_display_topology topology; }; +/** + * dpu_rm_init - Read hardware catalog and create reservation tracking objects + * for all HW blocks. + * @dev: Corresponding device for devres management + * @rm: DPU Resource Manager handle + * @cat: Pointer to hardware catalog + * @mdss_data: Pointer to MDSS / UBWC configuration + * @mmio: mapped register io address of MDP + * @return: 0 on Success otherwise -ERROR + */ int dpu_rm_init(struct drm_device *dev, struct dpu_rm *rm, const struct dpu_mdss_cfg *cat, @@ -641,6 +651,13 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, } } +/** + * dpu_rm_release - Given the encoder for the display chain, release any + * HW blocks previously reserved for that use case. + * @global_state: resources shared across multiple kms objects + * @enc: DRM Encoder handle + * @return: 0 on Success otherwise -ERROR + */ void dpu_rm_release(struct dpu_global_state *global_state, struct drm_encoder *enc) { @@ -657,6 +674,20 @@ void dpu_rm_release(struct dpu_global_state *global_state, _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); } +/** + * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze + * the use connections and user requirements, specified through related + * topology control properties, and reserve hardware blocks to that + * display chain. + * HW blocks can then be accessed through dpu_rm_get_* functions. + * HW Reservations should be released via dpu_rm_release_hw. + * @rm: DPU Resource Manager handle + * @global_state: resources shared across multiple kms objects + * @enc: DRM Encoder handle + * @crtc_state: Proposed Atomic DRM CRTC State handle + * @topology: Pointer to topology info for the display + * @return: 0 on Success otherwise -ERROR + */ int dpu_rm_reserve( struct dpu_rm *rm, struct dpu_global_state *global_state, @@ -694,6 +725,16 @@ int dpu_rm_reserve( return ret; } +/** + * dpu_rm_get_assigned_resources - Get hw resources of the given type that are + * assigned to this encoder + * @rm: DPU Resource Manager handle + * @global_state: resources shared across multiple kms objects + * @enc_id: encoder id requesting for allocation + * @type: resource type to return data for + * @blks: pointer to the array to be filled by HW resources + * @blks_size: size of the @blks array + */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) @@ -772,6 +813,11 @@ static void dpu_rm_print_state_helper(struct drm_printer *p, } +/** + * dpu_rm_print_state - output the RM private state + * @p: DRM printer + * @global_state: global state + */ void dpu_rm_print_state(struct drm_printer *p, const struct dpu_global_state *global_state) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index e63db8ace6b9..ea0e49cb7b0d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -38,62 +38,40 @@ struct dpu_rm { }; /** - * dpu_rm_init - Read hardware catalog and create reservation tracking objects - * for all HW blocks. - * @dev: Corresponding device for devres management - * @rm: DPU Resource Manager handle - * @cat: Pointer to hardware catalog - * @mdss_data: Pointer to MDSS / UBWC configuration - * @mmio: mapped register io address of MDP - * @Return: 0 on Success otherwise -ERROR + * struct msm_display_topology - defines a display topology pipeline + * @num_lm: number of layer mixers used + * @num_intf: number of interfaces the panel is mounted on + * @num_dspp: number of dspp blocks used + * @num_dsc: number of Display Stream Compression (DSC) blocks used + * @needs_cdm: indicates whether cdm block is needed for this display topology */ +struct msm_display_topology { + u32 num_lm; + u32 num_intf; + u32 num_dspp; + u32 num_dsc; + bool needs_cdm; +}; + int dpu_rm_init(struct drm_device *dev, struct dpu_rm *rm, const struct dpu_mdss_cfg *cat, const struct msm_mdss_data *mdss_data, void __iomem *mmio); -/** - * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze - * the use connections and user requirements, specified through related - * topology control properties, and reserve hardware blocks to that - * display chain. - * HW blocks can then be accessed through dpu_rm_get_* functions. - * HW Reservations should be released via dpu_rm_release_hw. - * @rm: DPU Resource Manager handle - * @drm_enc: DRM Encoder handle - * @crtc_state: Proposed Atomic DRM CRTC State handle - * @topology: Pointer to topology info for the display - * @Return: 0 on Success otherwise -ERROR - */ int dpu_rm_reserve(struct dpu_rm *rm, struct dpu_global_state *global_state, struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, struct msm_display_topology topology); -/** - * dpu_rm_reserve - Given the encoder for the display chain, release any - * HW blocks previously reserved for that use case. - * @rm: DPU Resource Manager handle - * @enc: DRM Encoder handle - * @Return: 0 on Success otherwise -ERROR - */ void dpu_rm_release(struct dpu_global_state *global_state, struct drm_encoder *enc); -/** - * Get hw resources of the given type that are assigned to this encoder. - */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); -/** - * dpu_rm_print_state - output the RM private state - * @p: DRM printer - * @global_state: global state - */ void dpu_rm_print_state(struct drm_printer *p, const struct dpu_global_state *global_state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 47c02b98eac3..2a551e455aa3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -204,6 +204,11 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, vbif->ops.set_halt_ctrl(vbif, params->xin_id, false); } +/** + * dpu_vbif_set_qos_remap - set QoS priority level remap + * @dpu_kms: DPU handler + * @params: Pointer to QoS configuration parameters + */ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params) { @@ -245,6 +250,10 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } } +/** + * dpu_vbif_clear_errors - clear any vbif errors + * @dpu_kms: DPU handler + */ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; @@ -262,6 +271,10 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) } } +/** + * dpu_vbif_init_memtypes - initialize xin memory types for vbif + * @dpu_kms: DPU handler + */ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h index e1b1f7f4e4be..62e47ae1e3ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h @@ -38,32 +38,14 @@ struct dpu_vbif_set_qos_params { bool is_rt; }; -/** - * dpu_vbif_set_ot_limit - set OT limit for vbif client - * @dpu_kms: DPU handler - * @params: Pointer to OT configuration parameters - */ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, struct dpu_vbif_set_ot_params *params); -/** - * dpu_vbif_set_qos_remap - set QoS priority level remap - * @dpu_kms: DPU handler - * @params: Pointer to QoS configuration parameters - */ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params); -/** - * dpu_vbif_clear_errors - clear any vbif errors - * @dpu_kms: DPU handler - */ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms); -/** - * dpu_vbif_init_memtypes - initialize xin memory types for vbif - * @dpu_kms: DPU handler - */ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms); void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root); diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index 4d55e3cf570f..07a2c1e87219 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -25,24 +25,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b addr = base_addr; end_addr = base_addr + aligned_len; - if (!(*reg)) - *reg = kvzalloc(len_padded, GFP_KERNEL); - - if (*reg) - dump_addr = *reg; + *reg = kvzalloc(len_padded, GFP_KERNEL); + if (!*reg) + return; + dump_addr = *reg; for (i = 0; i < num_rows; i++) { x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0; x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0; x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0; xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0; - if (dump_addr) { - dump_addr[i * 4] = x0; - dump_addr[i * 4 + 1] = x4; - dump_addr[i * 4 + 2] = x8; - dump_addr[i * 4 + 3] = xc; - } + dump_addr[i * 4] = x0; + dump_addr[i * 4 + 1] = x4; + dump_addr[i * 4 + 2] = x8; + dump_addr[i * 4 + 3] = xc; addr += REG_DUMP_ALIGN; } diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index a599fc5d63c5..74e01a5dd419 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -17,281 +17,281 @@ #include "dp_display.h" #include "dp_utils.h" -struct dp_audio_private { +struct msm_dp_audio_private { struct platform_device *audio_pdev; struct platform_device *pdev; struct drm_device *drm_dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; u32 channels; - struct dp_audio dp_audio; + struct msm_dp_audio msm_dp_audio; }; -static u32 dp_audio_get_header(struct dp_catalog *catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) +static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - return dp_catalog_audio_get_header(catalog, sdp, header); + return msm_dp_catalog_audio_get_header(catalog, sdp, header); } -static void dp_audio_set_header(struct dp_catalog *catalog, +static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog, u32 data, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - dp_catalog_audio_set_header(catalog, sdp, header, data); + msm_dp_catalog_audio_set_header(catalog, sdp, header, data); } -static void dp_audio_stream_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); new_value = 0x02; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); new_value = value; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); new_value = audio->channels - 1; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); new_value = 0x1; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); new_value = 0x17; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); new_value = (0x0 | (0x11 << 2)); - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); new_value = 0x84; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); new_value = 0x1b; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); new_value = (0x0 | (0x11 << 2)); - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", new_value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); new_value = 0x05; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); new_value = 0x0F; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); new_value = 0x0; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); new_value = 0x06; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); new_value = 0x0F; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); } -static void dp_audio_setup_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio) { - dp_catalog_audio_config_sdp(audio->catalog); + msm_dp_catalog_audio_config_sdp(audio->catalog); - dp_audio_stream_sdp(audio); - dp_audio_timestamp_sdp(audio); - dp_audio_infoframe_sdp(audio); - dp_audio_copy_management_sdp(audio); - dp_audio_isrc_sdp(audio); + msm_dp_audio_stream_sdp(audio); + msm_dp_audio_timestamp_sdp(audio); + msm_dp_audio_infoframe_sdp(audio); + msm_dp_audio_copy_management_sdp(audio); + msm_dp_audio_isrc_sdp(audio); } -static void dp_audio_setup_acr(struct dp_audio_private *audio) +static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio) { u32 select = 0; - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; - switch (audio->dp_audio.bw_code) { + switch (audio->msm_dp_audio.bw_code) { case DP_LINK_BW_1_62: select = 0; break; @@ -310,15 +310,15 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio) break; } - dp_catalog_audio_config_acr(catalog, select); + msm_dp_catalog_audio_config_acr(catalog, select); } -static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) +static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 safe_to_exit_level = 0; - switch (audio->dp_audio.lane_count) { + switch (audio->msm_dp_audio.lane_count) { case 1: safe_to_exit_level = 14; break; @@ -336,49 +336,49 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) break; } - dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); + msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); } -static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; - dp_catalog_audio_enable(catalog, enable); + msm_dp_catalog_audio_enable(catalog, enable); } -static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev) { - struct dp_audio *dp_audio; - struct msm_dp *dp_display; + struct msm_dp_audio *msm_dp_audio; + struct msm_dp *msm_dp_display; if (!pdev) { DRM_ERROR("invalid input\n"); return ERR_PTR(-ENODEV); } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { DRM_ERROR("invalid input\n"); return ERR_PTR(-ENODEV); } - dp_audio = dp_display->dp_audio; + msm_dp_audio = msm_dp_display->msm_dp_audio; - if (!dp_audio) { - DRM_ERROR("invalid dp_audio data\n"); + if (!msm_dp_audio) { + DRM_ERROR("invalid msm_dp_audio data\n"); return ERR_PTR(-EINVAL); } - return container_of(dp_audio, struct dp_audio_private, dp_audio); + return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); } -static int dp_audio_hook_plugged_cb(struct device *dev, void *data, +static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); if (!pdev) { @@ -386,20 +386,20 @@ static int dp_audio_hook_plugged_cb(struct device *dev, void *data, return -ENODEV; } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { pr_err("invalid input\n"); return -ENODEV; } - return dp_display_set_plugged_cb(dp_display, fn, codec_dev); + return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev); } -static int dp_audio_get_eld(struct device *dev, +static int msm_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); @@ -408,30 +408,30 @@ static int dp_audio_get_eld(struct device *dev, return -ENODEV; } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { DRM_ERROR("invalid input\n"); return -ENODEV; } - memcpy(buf, dp_display->connector->eld, - min(sizeof(dp_display->connector->eld), len)); + memcpy(buf, msm_dp_display->connector->eld, + min(sizeof(msm_dp_display->connector->eld), len)); return 0; } -int dp_audio_hw_params(struct device *dev, +int msm_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { int rc = 0; - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); - dp_display = platform_get_drvdata(pdev); + msm_dp_display = platform_get_drvdata(pdev); /* * there could be cases where sound card can be opened even @@ -441,12 +441,12 @@ int dp_audio_hw_params(struct device *dev, * such cases check for connection status and bail out if not * connected. */ - if (!dp_display->power_on) { + if (!msm_dp_display->power_on) { rc = -EINVAL; goto end; } - audio = dp_audio_get_data(pdev); + audio = msm_dp_audio_get_data(pdev); if (IS_ERR(audio)) { rc = PTR_ERR(audio); goto end; @@ -454,26 +454,26 @@ int dp_audio_hw_params(struct device *dev, audio->channels = params->channels; - dp_audio_setup_sdp(audio); - dp_audio_setup_acr(audio); - dp_audio_safe_to_exit_level(audio); - dp_audio_enable(audio, true); - dp_display_signal_audio_start(dp_display); - dp_display->audio_enabled = true; + msm_dp_audio_setup_sdp(audio); + msm_dp_audio_setup_acr(audio); + msm_dp_audio_safe_to_exit_level(audio); + msm_dp_audio_enable(audio, true); + msm_dp_display_signal_audio_start(msm_dp_display); + msm_dp_display->audio_enabled = true; end: return rc; } -static void dp_audio_shutdown(struct device *dev, void *data) +static void msm_dp_audio_shutdown(struct device *dev, void *data) { - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); - dp_display = platform_get_drvdata(pdev); - audio = dp_audio_get_data(pdev); + msm_dp_display = platform_get_drvdata(pdev); + audio = msm_dp_audio_get_data(pdev); if (IS_ERR(audio)) { DRM_ERROR("failed to get audio data\n"); return; @@ -487,32 +487,32 @@ static void dp_audio_shutdown(struct device *dev, void *data) * connected. is_connected cannot be used here as its set * to false earlier than this call */ - if (!dp_display->audio_enabled) + if (!msm_dp_display->audio_enabled) return; - dp_audio_enable(audio, false); + msm_dp_audio_enable(audio, false); /* signal the dp display to safely shutdown clocks */ - dp_display_signal_audio_complete(dp_display); + msm_dp_display_signal_audio_complete(msm_dp_display); } -static const struct hdmi_codec_ops dp_audio_codec_ops = { - .hw_params = dp_audio_hw_params, - .audio_shutdown = dp_audio_shutdown, - .get_eld = dp_audio_get_eld, - .hook_plugged_cb = dp_audio_hook_plugged_cb, +static const struct hdmi_codec_ops msm_dp_audio_codec_ops = { + .hw_params = msm_dp_audio_hw_params, + .audio_shutdown = msm_dp_audio_shutdown, + .get_eld = msm_dp_audio_get_eld, + .hook_plugged_cb = msm_dp_audio_hook_plugged_cb, }; static struct hdmi_codec_pdata codec_data = { - .ops = &dp_audio_codec_ops, + .ops = &msm_dp_audio_codec_ops, .max_i2s_channels = 8, .i2s = 1, }; -void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) +void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio_priv; + struct msm_dp_audio_private *audio_priv; - audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio); + audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); if (audio_priv->audio_pdev) { platform_device_unregister(audio_priv->audio_pdev); @@ -520,13 +520,13 @@ void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) } } -int dp_register_audio_driver(struct device *dev, - struct dp_audio *dp_audio) +int msm_dp_register_audio_driver(struct device *dev, + struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio_priv; + struct msm_dp_audio_private *audio_priv; - audio_priv = container_of(dp_audio, - struct dp_audio_private, dp_audio); + audio_priv = container_of(msm_dp_audio, + struct msm_dp_audio_private, msm_dp_audio); audio_priv->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, @@ -536,13 +536,13 @@ int dp_register_audio_driver(struct device *dev, return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); } -struct dp_audio *dp_audio_get(struct platform_device *pdev, - struct dp_panel *panel, - struct dp_catalog *catalog) +struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, + struct msm_dp_panel *panel, + struct msm_dp_catalog *catalog) { int rc = 0; - struct dp_audio_private *audio; - struct dp_audio *dp_audio; + struct msm_dp_audio_private *audio; + struct msm_dp_audio *msm_dp_audio; if (!pdev || !panel || !catalog) { DRM_ERROR("invalid input\n"); @@ -559,23 +559,23 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev, audio->pdev = pdev; audio->catalog = catalog; - dp_audio = &audio->dp_audio; + msm_dp_audio = &audio->msm_dp_audio; - dp_catalog_audio_init(catalog); + msm_dp_catalog_audio_init(catalog); - return dp_audio; + return msm_dp_audio; error: return ERR_PTR(rc); } -void dp_audio_put(struct dp_audio *dp_audio) +void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; - if (!dp_audio) + if (!msm_dp_audio) return; - audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + audio = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); devm_kfree(&audio->pdev->dev, audio); } diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index 4ab78880af82..1c9efaaa40e5 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -13,58 +13,58 @@ #include <sound/hdmi-codec.h> /** - * struct dp_audio + * struct msm_dp_audio * @lane_count: number of lanes configured in current session * @bw_code: link rate's bandwidth code for current session */ -struct dp_audio { +struct msm_dp_audio { u32 lane_count; u32 bw_code; }; /** - * dp_audio_get() + * msm_dp_audio_get() * * Creates and instance of dp audio. * * @pdev: caller's platform device instance. - * @panel: an instance of dp_panel module. - * @catalog: an instance of dp_catalog module. + * @panel: an instance of msm_dp_panel module. + * @catalog: an instance of msm_dp_catalog module. * * Returns the error code in case of failure, otherwize - * an instance of newly created dp_module. + * an instance of newly created msm_dp_module. */ -struct dp_audio *dp_audio_get(struct platform_device *pdev, - struct dp_panel *panel, - struct dp_catalog *catalog); +struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, + struct msm_dp_panel *panel, + struct msm_dp_catalog *catalog); /** - * dp_register_audio_driver() + * msm_dp_register_audio_driver() * * Registers DP device with hdmi_codec interface. * * @dev: DP device instance. - * @dp_audio: an instance of dp_audio module. + * @msm_dp_audio: an instance of msm_dp_audio module. * * * Returns the error code in case of failure, otherwise * zero on success. */ -int dp_register_audio_driver(struct device *dev, - struct dp_audio *dp_audio); +int msm_dp_register_audio_driver(struct device *dev, + struct msm_dp_audio *msm_dp_audio); -void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio); +void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio); /** - * dp_audio_put() + * msm_dp_audio_put() * - * Cleans the dp_audio instance. + * Cleans the msm_dp_audio instance. * - * @dp_audio: an instance of dp_audio. + * @msm_dp_audio: an instance of msm_dp_audio. */ -void dp_audio_put(struct dp_audio *dp_audio); +void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio); -int dp_audio_hw_params(struct device *dev, +int msm_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 00dfafbebe0e..bc8d46abfc61 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -20,9 +20,9 @@ enum msm_dp_aux_err { DP_AUX_ERR_PHY, }; -struct dp_aux_private { +struct msm_dp_aux_private { struct device *dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; struct phy *phy; @@ -42,12 +42,12 @@ struct dp_aux_private { u32 offset; u32 segment; - struct drm_dp_aux dp_aux; + struct drm_dp_aux msm_dp_aux; }; #define MAX_AUX_RETRIES 5 -static ssize_t dp_aux_write(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { u8 data[4]; @@ -88,11 +88,11 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, /* index = 0, write */ if (i == 0) reg |= DP_AUX_DATA_INDEX_WRITE; - dp_catalog_aux_write_data(aux->catalog, reg); + msm_dp_catalog_aux_write_data(aux->catalog, reg); } - dp_catalog_aux_clear_trans(aux->catalog, false); - dp_catalog_aux_clear_hw_interrupts(aux->catalog); + msm_dp_catalog_aux_clear_trans(aux->catalog, false); + msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog); reg = 0; /* Transaction number == 1 */ if (!aux->native) { /* i2c */ @@ -106,12 +106,12 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, } reg |= DP_AUX_TRANS_CTRL_GO; - dp_catalog_aux_write_trans(aux->catalog, reg); + msm_dp_catalog_aux_write_trans(aux->catalog, reg); return len; } -static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_cmd_fifo_tx(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { ssize_t ret; @@ -119,7 +119,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, reinit_completion(&aux->comp); - ret = dp_aux_write(aux, msg); + ret = msm_dp_aux_write(aux, msg); if (ret < 0) return ret; @@ -131,7 +131,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, return ret; } -static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { u32 data; @@ -139,20 +139,20 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, u32 i, actual_i; u32 len = msg->size; - dp_catalog_aux_clear_trans(aux->catalog, true); + msm_dp_catalog_aux_clear_trans(aux->catalog, true); data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ data |= DP_AUX_DATA_READ; /* read */ - dp_catalog_aux_write_data(aux->catalog, data); + msm_dp_catalog_aux_write_data(aux->catalog, data); dp = msg->buffer; /* discard first byte */ - data = dp_catalog_aux_read_data(aux->catalog); + data = msm_dp_catalog_aux_read_data(aux->catalog); for (i = 0; i < len; i++) { - data = dp_catalog_aux_read_data(aux->catalog); + data = msm_dp_catalog_aux_read_data(aux->catalog); *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff); actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF; @@ -163,7 +163,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, return i; } -static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, +static void msm_dp_aux_update_offset_and_segment(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *input_msg) { u32 edid_address = 0x50; @@ -185,7 +185,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, } /** - * dp_aux_transfer_helper() - helper function for EDID read transactions + * msm_dp_aux_transfer_helper() - helper function for EDID read transactions * * @aux: DP AUX private structure * @input_msg: input message from DRM upstream APIs @@ -196,7 +196,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, * This helper function is used to fix EDID reads for non-compliant * sinks that do not handle the i2c middle-of-transaction flag correctly. */ -static void dp_aux_transfer_helper(struct dp_aux_private *aux, +static void msm_dp_aux_transfer_helper(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *input_msg, bool send_seg) { @@ -238,7 +238,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, helper_msg.address = segment_address; helper_msg.buffer = &aux->segment; helper_msg.size = 1; - dp_aux_cmd_fifo_tx(aux, &helper_msg); + msm_dp_aux_cmd_fifo_tx(aux, &helper_msg); } /* @@ -252,7 +252,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, helper_msg.address = input_msg->address; helper_msg.buffer = &aux->offset; helper_msg.size = 1; - dp_aux_cmd_fifo_tx(aux, &helper_msg); + msm_dp_aux_cmd_fifo_tx(aux, &helper_msg); end: aux->offset += message_size; @@ -265,15 +265,15 @@ end: * It will call aux_reset() function to reset the AUX channel, * if the waiting is timeout. */ -static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, +static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux, struct drm_dp_aux_msg *msg) { ssize_t ret; int const aux_cmd_native_max = 16; int const aux_cmd_i2c_max = 128; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); @@ -292,7 +292,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, return -EINVAL; } - ret = pm_runtime_resume_and_get(dp_aux->dev); + ret = pm_runtime_resume_and_get(msm_dp_aux->dev); if (ret) return ret; @@ -313,8 +313,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, goto exit; } - dp_aux_update_offset_and_segment(aux, msg); - dp_aux_transfer_helper(aux, msg, true); + msm_dp_aux_update_offset_and_segment(aux, msg); + msm_dp_aux_transfer_helper(aux, msg, true); aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); aux->cmd_busy = true; @@ -327,7 +327,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, aux->no_send_stop = true; } - ret = dp_aux_cmd_fifo_tx(aux, msg); + ret = msm_dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { if (aux->native) { aux->retry_cnt++; @@ -335,14 +335,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, phy_calibrate(aux->phy); } /* reset aux if link is in connected state */ - if (dp_catalog_link_is_connected(aux->catalog)) - dp_catalog_aux_reset(aux->catalog); + if (msm_dp_catalog_link_is_connected(aux->catalog)) + msm_dp_catalog_aux_reset(aux->catalog); } else { aux->retry_cnt = 0; switch (aux->aux_error_num) { case DP_AUX_ERR_NONE: if (aux->read) - ret = dp_aux_cmd_fifo_rx(aux, msg); + ret = msm_dp_aux_cmd_fifo_rx(aux, msg); msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; break; case DP_AUX_ERR_DEFER: @@ -364,24 +364,24 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, exit: mutex_unlock(&aux->mutex); - pm_runtime_put_sync(dp_aux->dev); + pm_runtime_put_sync(msm_dp_aux->dev); return ret; } -irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) +irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux) { u32 isr; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return IRQ_NONE; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); - isr = dp_catalog_aux_get_irq(aux->catalog); + isr = msm_dp_catalog_aux_get_irq(aux->catalog); /* no interrupts pending, return immediately */ if (!isr) @@ -403,7 +403,7 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) if (isr & DP_INTR_AUX_ERROR) { aux->aux_error_num = DP_AUX_ERR_PHY; - dp_catalog_aux_clear_hw_interrupts(aux->catalog); + msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog); } else if (isr & DP_INTR_NACK_DEFER) { aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; } else if (isr & DP_INTR_WRONG_ADDR) { @@ -429,68 +429,68 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) return IRQ_HANDLED; } -void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled) +void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); aux->enable_xfers = enabled; } -void dp_aux_reconfig(struct drm_dp_aux *dp_aux) +void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); phy_calibrate(aux->phy); - dp_catalog_aux_reset(aux->catalog); + msm_dp_catalog_aux_reset(aux->catalog); } -void dp_aux_init(struct drm_dp_aux *dp_aux) +void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_lock(&aux->mutex); - dp_catalog_aux_enable(aux->catalog, true); + msm_dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; aux->initted = true; mutex_unlock(&aux->mutex); } -void dp_aux_deinit(struct drm_dp_aux *dp_aux) +void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_lock(&aux->mutex); aux->initted = false; - dp_catalog_aux_enable(aux->catalog, false); + msm_dp_catalog_aux_enable(aux->catalog, false); mutex_unlock(&aux->mutex); } -int dp_aux_register(struct drm_dp_aux *dp_aux) +int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux) { int ret; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return -EINVAL; } - ret = drm_dp_aux_register(dp_aux); + ret = drm_dp_aux_register(msm_dp_aux); if (ret) { DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, ret); @@ -500,34 +500,34 @@ int dp_aux_register(struct drm_dp_aux *dp_aux) return 0; } -void dp_aux_unregister(struct drm_dp_aux *dp_aux) +void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux) { - drm_dp_aux_unregister(dp_aux); + drm_dp_aux_unregister(msm_dp_aux); } -static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, +static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux, unsigned long wait_us) { int ret; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); ret = pm_runtime_resume_and_get(aux->dev); if (ret) return ret; - ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); + ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); pm_runtime_put_sync(aux->dev); return ret; } -struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, +struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog, struct phy *phy, bool is_edp) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; if (!catalog) { DRM_ERROR("invalid input\n"); @@ -553,23 +553,23 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, * before registering AUX with the DRM device so that * msm eDP panel can be detected by generic_dep_panel_probe(). */ - aux->dp_aux.name = "dpu_dp_aux"; - aux->dp_aux.dev = dev; - aux->dp_aux.transfer = dp_aux_transfer; - aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted; - drm_dp_aux_init(&aux->dp_aux); + aux->msm_dp_aux.name = "dpu_dp_aux"; + aux->msm_dp_aux.dev = dev; + aux->msm_dp_aux.transfer = msm_dp_aux_transfer; + aux->msm_dp_aux.wait_hpd_asserted = msm_dp_wait_hpd_asserted; + drm_dp_aux_init(&aux->msm_dp_aux); - return &aux->dp_aux; + return &aux->msm_dp_aux; } -void dp_aux_put(struct drm_dp_aux *dp_aux) +void msm_dp_aux_put(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) + if (!msm_dp_aux) return; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_destroy(&aux->mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h index 4f65e892a807..39c5b4c8596a 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.h +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -9,18 +9,18 @@ #include "dp_catalog.h" #include <drm/display/drm_dp_helper.h> -int dp_aux_register(struct drm_dp_aux *dp_aux); -void dp_aux_unregister(struct drm_dp_aux *dp_aux); -irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux); -void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled); -void dp_aux_init(struct drm_dp_aux *dp_aux); -void dp_aux_deinit(struct drm_dp_aux *dp_aux); -void dp_aux_reconfig(struct drm_dp_aux *dp_aux); +int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux); +irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled); +void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux); struct phy; -struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, +struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog, struct phy *phy, bool is_edp); -void dp_aux_put(struct drm_dp_aux *aux); +void msm_dp_aux_put(struct drm_dp_aux *aux); #endif /*__DP_AUX_H_*/ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 6e55cbf69674..b4c8856fb25d 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -75,18 +75,18 @@ struct dss_io_data { struct dss_io_region p0; }; -struct dp_catalog_private { +struct msm_dp_catalog_private { struct device *dev; struct drm_device *drm_dev; struct dss_io_data io; u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; - struct dp_catalog dp_catalog; + struct msm_dp_catalog msm_dp_catalog; }; -void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state) +void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); struct dss_io_data *dss = &catalog->io; msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb"); @@ -95,12 +95,12 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0"); } -static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.aux.base + offset); } -static inline void dp_write_aux(struct dp_catalog_private *catalog, +static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -110,12 +110,12 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog, writel(data, catalog->io.aux.base + offset); } -static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.ahb.base + offset); } -static inline void dp_write_ahb(struct dp_catalog_private *catalog, +static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -125,7 +125,7 @@ static inline void dp_write_ahb(struct dp_catalog_private *catalog, writel(data, catalog->io.ahb.base + offset); } -static inline void dp_write_p0(struct dp_catalog_private *catalog, +static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -135,7 +135,7 @@ static inline void dp_write_p0(struct dp_catalog_private *catalog, writel(data, catalog->io.p0.base + offset); } -static inline u32 dp_read_p0(struct dp_catalog_private *catalog, +static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog, u32 offset) { /* @@ -145,12 +145,12 @@ static inline u32 dp_read_p0(struct dp_catalog_private *catalog, return readl_relaxed(catalog->io.p0.base + offset); } -static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.link.base + offset); } -static inline void dp_write_link(struct dp_catalog_private *catalog, +static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -161,64 +161,64 @@ static inline void dp_write_link(struct dp_catalog_private *catalog, } /* aux related catalog functions */ -u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_aux(catalog, REG_DP_AUX_DATA); + return msm_dp_read_aux(catalog, REG_DP_AUX_DATA); } -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data) +int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_DATA, data); + msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data); return 0; } -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data) +int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); return 0; } -int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read) +int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read) { u32 data; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (read) { - data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); + data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); data &= ~DP_AUX_TRANS_CTRL_GO; - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); } else { - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); } return 0; } -int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) +int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); + msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); return 0; } /** - * dp_catalog_aux_reset() - reset AUX controller + * msm_dp_catalog_aux_reset() - reset AUX controller * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * return: void * @@ -227,47 +227,47 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) * NOTE: reset AUX controller will also clear any pending HPD related interrupts * */ -void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog) { u32 aux_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL); aux_ctrl |= DP_AUX_CTRL_RESET; - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); usleep_range(1000, 1100); /* h/w recommended delay */ aux_ctrl &= ~DP_AUX_CTRL_RESET; - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) +void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 aux_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL); if (enable) { - dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); - dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); + msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); + msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); aux_ctrl |= DP_AUX_CTRL_ENABLE; } else { aux_ctrl &= ~DP_AUX_CTRL_ENABLE; } - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, +int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog, unsigned long wait_us) { u32 state; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); /* poll for hpd connected status every 2ms and timeout after wait_us */ return readl_poll_timeout(catalog->io.aux.base + @@ -294,10 +294,10 @@ static void dump_regs(void __iomem *base, int len) } } -void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) +void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); struct dss_io_data *io = &catalog->io; pr_info("AHB regs\n"); @@ -313,17 +313,17 @@ void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) dump_regs(io->p0.base, io->p0.len); } -u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS); intr &= ~DP_INTERRUPT_STATUS1_MASK; intr_ack = (intr & DP_INTERRUPT_STATUS1) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | DP_INTERRUPT_STATUS1_MASK); return intr; @@ -331,40 +331,40 @@ u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) } /* controller related catalog functions */ -void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, - u32 dp_tu, u32 valid_boundary, +void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog, + u32 msm_dp_tu, u32 valid_boundary, u32 valid_boundary2) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); - dp_write_link(catalog, REG_DP_TU, dp_tu); - dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); + msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); + msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu); + msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); } -void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state) +void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_link(catalog, REG_DP_STATE_CTRL, state); + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state); } -void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg) +void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg); - dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); + msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); } -void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ u32 ln_mapping; @@ -373,71 +373,71 @@ void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; - dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, + msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, ln_mapping); } -void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 val; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); if (enable) val |= DP_MAINLINK_CTRL_ENABLE; else val &= ~DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); } -void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 mainlink_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable); if (enable) { /* * To make sure link reg writes happens before other operation, - * dp_write_link() function uses writel() + * msm_dp_write_link() function uses writel() */ - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET | DP_MAINLINK_CTRL_ENABLE); - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl |= DP_MAINLINK_CTRL_RESET; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE | DP_MAINLINK_FB_BOUNDARY_SEL); - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } else { - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } } -void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 colorimetry_cfg, u32 test_bits_depth) { u32 misc_val; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0); + misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); /* clear bpp bits */ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT); @@ -447,27 +447,27 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, misc_val |= DP_MISC0_SYNCHRONOUS_CLK; drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); } -void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog) +void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog) { u32 mainlink_ctrl, hw_revision; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); - hw_revision = dp_catalog_hw_revision(dp_catalog); + hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog); if (hw_revision >= DP_HW_VERSION_1_2) mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE; else mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } -void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate, u32 stream_rate_khz, bool is_ycbcr_420) { @@ -478,8 +478,8 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 const link_rate_hbr3 = 810000; unsigned long den, num; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (rate == link_rate_hbr3) pixel_div = 6; @@ -522,22 +522,22 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, nvid *= 3; drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid); - dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); - dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); - dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); + msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); + msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); + msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); } -int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, +int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 state_bit) { int bit, ret; u32 data; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); bit = BIT(state_bit - 1); drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit); - dp_catalog_ctrl_state_ctrl(dp_catalog, bit); + msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit); bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; @@ -554,25 +554,25 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, } /** - * dp_catalog_hw_revision() - retrieve DP hw revision + * msm_dp_catalog_hw_revision() - retrieve DP hw revision * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * Return: DP controller hw revision * */ -u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog) { - const struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_ahb(catalog, REG_DP_HW_VERSION); + return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION); } /** - * dp_catalog_ctrl_reset() - reset DP controller + * msm_dp_catalog_ctrl_reset() - reset DP controller * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * return: void * @@ -581,28 +581,28 @@ u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) * NOTE: reset DP controller will also clear any pending HPD related interrupts * */ -void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog) { u32 sw_reset; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET); + sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET); sw_reset |= DP_SW_RESET; - dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); usleep_range(1000, 1100); /* h/w recommended delay */ sw_reset &= ~DP_SW_RESET; - dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); } -bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) +bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog) { u32 data; int ret; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); /* Poll for mainlink ready status */ ret = readl_poll_timeout(catalog->io.link.base + @@ -617,96 +617,96 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) return true; } -void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (enable) { - dp_write_ahb(catalog, REG_DP_INTR_STATUS, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, DP_INTERRUPT_STATUS1_MASK); - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, DP_INTERRUPT_STATUS2_MASK); } else { - dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); } } -void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, +void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog, u32 intr_mask, bool en) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); config = (en ? config | intr_mask : config & ~intr_mask); drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n", intr_mask, config); - dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, + msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, config & DP_DP_HPD_INT_MASK); } -void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); /* Configure REFTIMER and enable it */ reftimer |= DP_DP_HPD_REFTIMER_ENABLE; - dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); /* Enable HPD */ - dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } -void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE; - dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); - dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); } -static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog) +static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog) { /* trigger sdp */ - dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); } -void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 config; /* enable PSR1 function */ - config = dp_read_link(catalog, REG_PSR_CONFIG); + config = msm_dp_read_link(catalog, REG_PSR_CONFIG); config |= PSR1_SUPPORTED; - dp_write_link(catalog, REG_PSR_CONFIG, config); + msm_dp_write_link(catalog, REG_PSR_CONFIG, config); - dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); - dp_catalog_enable_sdp(catalog); + msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); + msm_dp_catalog_enable_sdp(catalog); } -void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) +void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 cmd; - cmd = dp_read_link(catalog, REG_PSR_CMD); + cmd = msm_dp_read_link(catalog, REG_PSR_CMD); cmd &= ~(PSR_ENTER | PSR_EXIT); @@ -715,17 +715,17 @@ void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) else cmd |= PSR_EXIT; - dp_catalog_enable_sdp(catalog); - dp_write_link(catalog, REG_PSR_CMD, cmd); + msm_dp_catalog_enable_sdp(catalog); + msm_dp_write_link(catalog, REG_PSR_CMD, cmd); } -u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 status; - status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status); status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; @@ -733,16 +733,16 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) return status; } -u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); int isr, mask; - isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); - dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, + isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, (isr & DP_DP_HPD_INT_MASK)); - mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); /* * We only want to return interrupts that are unmasked to the caller. @@ -754,115 +754,115 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) return isr & (mask | ~DP_DP_HPD_INT_MASK); } -u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4); intr_ack = (intr & DP_INTERRUPT_STATUS4) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); return intr; } -int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) +int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2); intr &= ~DP_INTERRUPT_STATUS2_MASK; intr_ack = (intr & DP_INTERRUPT_STATUS2) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, intr_ack | DP_INTERRUPT_STATUS2_MASK); return intr; } -void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_ahb(catalog, REG_DP_PHY_CTRL, + msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL); usleep_range(1000, 1100); /* h/w recommended delay */ - dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); + msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); } -void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog, u32 pattern) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 value = 0x0; /* Make sure to clear the current pattern before starting a new one */ - dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern); switch (pattern) { case DP_PHY_TEST_PATTERN_D10_2: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TRAINING_PATTERN1); break; case DP_PHY_TEST_PATTERN_ERROR_COUNT: value &= ~(1 << 16); - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= SCRAMBLER_RESET_COUNT_VALUE; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); break; case DP_PHY_TEST_PATTERN_PRBS7: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_PRBS7); break; case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN); /* 00111110000011111000001111100000 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0); /* 00001111100000111110000011111000 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8); /* 1111100000111110 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E); break; case DP_PHY_TEST_PATTERN_CP2520: - value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); value = DP_HBR2_ERM_PATTERN; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= SCRAMBLER_RESET_COUNT_VALUE; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); - value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); value |= DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); break; case DP_PHY_TEST_PATTERN_SEL_MASK: - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, DP_MAINLINK_CTRL_ENABLE); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TRAINING_PATTERN4); break; default: @@ -872,94 +872,94 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, } } -u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_link(catalog, REG_DP_MAINLINK_READY); + return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY); } /* panel related catalog functions */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, - u32 sync_start, u32 width_blanking, u32 dp_active) +int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 msm_dp_active) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 reg; - dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); - dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); - dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); - dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active); + msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); + msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); + msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); + msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active); - reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); + reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); - if (dp_catalog->wide_bus_en) + if (msm_dp_catalog->wide_bus_en) reg |= DP_INTF_CONFIG_DATABUS_WIDEN; else reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN; - DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg); + DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg); - dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); + msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); return 0; } -static void dp_catalog_panel_send_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp) +static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 header[2]; u32 val; int i; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header); + msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header); - dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]); - dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]); for (i = 0; i < sizeof(vsc_sdp->db); i += 4) { val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) | (vsc_sdp->db[i + 3] << 24)); - dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val); } } -static void dp_catalog_panel_update_sdp(struct dp_catalog *dp_catalog) +static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 hw_revision; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - hw_revision = dp_catalog_hw_revision(dp_catalog); + hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog); if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) { - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01); - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00); } } -void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp) +void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 cfg, cfg2, misc; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); - cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); - misc = dp_read_link(catalog, REG_DP_MISC1_MISC0); + cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); + cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); + misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); cfg |= GEN0_SDP_EN; - dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); cfg2 |= GENERIC0_SDPSIZE_VALID; - dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); - dp_catalog_panel_send_vsc_sdp(dp_catalog, vsc_sdp); + msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp); /* indicates presence of VSC (BIT(6) of MISC1) */ misc |= DP_MISC1_VSC_SDP; @@ -967,27 +967,27 @@ void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sd drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n"); pr_debug("misc settings = 0x%x\n", misc); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); - dp_catalog_panel_update_sdp(dp_catalog); + msm_dp_catalog_panel_update_sdp(msm_dp_catalog); } -void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog) +void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 cfg, cfg2, misc; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); - cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); - misc = dp_read_link(catalog, REG_DP_MISC1_MISC0); + cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); + cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); + misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); cfg &= ~GEN0_SDP_EN; - dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); cfg2 &= ~GENERIC0_SDPSIZE_VALID; - dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); /* switch back to MSA */ misc &= ~DP_MISC1_VSC_SDP; @@ -995,16 +995,16 @@ void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n"); pr_debug("misc settings = 0x%x\n", misc); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); - dp_catalog_panel_update_sdp(dp_catalog); + msm_dp_catalog_panel_update_sdp(msm_dp_catalog); } -void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, +void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog, struct drm_display_mode *drm_mode) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 hsync_period, vsync_period; u32 display_v_start, display_v_end; u32 hsync_start_x, hsync_end_x; @@ -1036,49 +1036,49 @@ void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, display_hctl = (hsync_end_x << 16) | hsync_start_x; - dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); - dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * + msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * hsync_period); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); - dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); - dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); - - dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); + msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); + msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); + + msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, DP_TPG_CHECKERED_RECT_PATTERN); - dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, + msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, DP_TPG_VIDEO_CONFIG_BPP_8BIT | DP_TPG_VIDEO_CONFIG_RGB); - dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, + msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, DP_BIST_ENABLE_DPBIST_EN); - dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, + msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, DP_TIMING_ENGINE_EN_EN); drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__); } -void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); - dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); - dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); } -static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len) +static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len) { struct resource *res; void __iomem *base; @@ -1090,21 +1090,21 @@ static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *l return base; } -static int dp_catalog_get_io(struct dp_catalog_private *catalog) +static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog) { struct platform_device *pdev = to_platform_device(catalog->dev); struct dss_io_data *dss = &catalog->io; - dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len); + dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len); if (IS_ERR(dss->ahb.base)) return PTR_ERR(dss->ahb.base); - dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len); + dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len); if (IS_ERR(dss->aux.base)) { /* * The initial binding had a single reg, but in order to * support variation in the sub-region sizes this was split. - * dp_ioremap() will fail with -EINVAL here if only a single + * msm_dp_ioremap() will fail with -EINVAL here if only a single * reg is specified, so fill in the sub-region offsets and * lengths based on this single region. */ @@ -1126,13 +1126,13 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog) return PTR_ERR(dss->aux.base); } } else { - dss->link.base = dp_ioremap(pdev, 2, &dss->link.len); + dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len); if (IS_ERR(dss->link.base)) { DRM_ERROR("unable to remap link region: %pe\n", dss->link.base); return PTR_ERR(dss->link.base); } - dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len); + dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len); if (IS_ERR(dss->p0.base)) { DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base); return PTR_ERR(dss->p0.base); @@ -1142,9 +1142,9 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog) return 0; } -struct dp_catalog *dp_catalog_get(struct device *dev) +struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; int ret; catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); @@ -1153,78 +1153,78 @@ struct dp_catalog *dp_catalog_get(struct device *dev) catalog->dev = dev; - ret = dp_catalog_get_io(catalog); + ret = msm_dp_catalog_get_io(catalog); if (ret) return ERR_PTR(ret); - return &catalog->dp_catalog; + return &catalog->msm_dp_catalog; } -u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) +u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); sdp_map = catalog->audio_map; - return dp_read_link(catalog, sdp_map[sdp][header]); + return msm_dp_read_link(catalog, sdp_map[sdp][header]); } -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header, +void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header, u32 data) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); sdp_map = catalog->audio_map; - dp_write_link(catalog, sdp_map[sdp][header], data); + msm_dp_write_link(catalog, sdp_map[sdp][header], data); } -void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select) +void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 acr_ctrl; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n", select, acr_ctrl); - dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); + msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); } -void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) +void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 audio_ctrl; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); + audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG); if (enable) audio_ctrl |= BIT(0); @@ -1233,24 +1233,24 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl); - dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); + msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); /* make sure audio engine is disabled */ wmb(); } -void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) +void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 sdp_cfg = 0; u32 sdp_cfg2 = 0; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); + sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); /* AUDIO_TIMESTAMP_SDP_EN */ sdp_cfg |= BIT(1); /* AUDIO_STREAM_SDP_EN */ @@ -1264,9 +1264,9 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg); - dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); - sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); + sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); /* IFRM_REGSRC -> Do not use reg values */ sdp_cfg2 &= ~BIT(0); /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ @@ -1274,12 +1274,12 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2); - dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); } -void dp_catalog_audio_init(struct dp_catalog *dp_catalog) +void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { { @@ -1309,27 +1309,27 @@ void dp_catalog_audio_init(struct dp_catalog *dp_catalog) }, }; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); catalog->audio_map = sdp_map; } -void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level) +void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 mainlink_levels; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); + mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); mainlink_levels &= 0xFE0; mainlink_levels |= safe_to_exit_level; @@ -1337,5 +1337,5 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", mainlink_levels, safe_to_exit_level); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 4679d50b8c73..e932b17eecbf 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -31,7 +31,7 @@ #define DP_HW_VERSION_1_0 0x10000000 #define DP_HW_VERSION_1_2 0x10020000 -enum dp_catalog_audio_sdp_type { +enum msm_dp_catalog_audio_sdp_type { DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_INFOFRAME, @@ -40,89 +40,89 @@ enum dp_catalog_audio_sdp_type { DP_AUDIO_SDP_MAX, }; -enum dp_catalog_audio_header_type { +enum msm_dp_catalog_audio_header_type { DP_AUDIO_SDP_HEADER_1, DP_AUDIO_SDP_HEADER_2, DP_AUDIO_SDP_HEADER_3, DP_AUDIO_SDP_HEADER_MAX, }; -struct dp_catalog { +struct msm_dp_catalog { bool wide_bus_en; }; /* Debug module */ -void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state); +void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state); /* AUX APIs */ -u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data); -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data); -int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); -int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); -void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); -void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, +u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog); +int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data); +int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data); +int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read); +int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable); +int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog, unsigned long wait_us); -u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); +u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog); /* DP Controller APIs */ -void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); -void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); -void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); -void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, +void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state); +void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config); +void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb); +void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate, u32 stream_rate_khz, bool is_ycbcr_420); -int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern); -u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); -bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, +int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern); +u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog); +bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog, u32 intr_mask, bool en); -void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter); -u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); -u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); -int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); -u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, - u32 dp_tu, u32 valid_boundary, +void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter); +u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog); +u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog); +int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog); +u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog, + u32 msm_dp_tu, u32 valid_boundary, u32 valid_boundary2); -void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog, u32 pattern); -u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); +u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog); /* DP Panel APIs */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, - u32 sync_start, u32 width_blanking, u32 dp_active); -void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp); -void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog); -void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); -void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, +int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 msm_dp_active); +void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp); +void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog, struct drm_display_mode *drm_mode); -void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); +void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog); -struct dp_catalog *dp_catalog_get(struct device *dev); +struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev); /* DP Audio APIs */ -u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header); -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header, +u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header); +void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header, u32 data); -void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select); -void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable); -void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); -void dp_catalog_audio_init(struct dp_catalog *catalog); -void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level); +void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select); +void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable); +void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog); +void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog); +void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level); #endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index f342fc5ae41e..bc2ca8133b79 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -40,7 +40,7 @@ enum { DP_TRAINING_2, }; -struct dp_tu_calc_input { +struct msm_dp_tu_calc_input { u64 lclk; /* 162, 270, 540 and 810 */ u64 pclk_khz; /* in KHz */ u64 hactive; /* active h-width */ @@ -55,7 +55,7 @@ struct dp_tu_calc_input { int num_of_dsc_slices; /* number of slices per line */ }; -struct dp_vc_tu_mapping_table { +struct msm_dp_vc_tu_mapping_table { u32 vic; u8 lanes; u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ @@ -69,14 +69,14 @@ struct dp_vc_tu_mapping_table { u8 tu_size_minus1; }; -struct dp_ctrl_private { - struct dp_ctrl dp_ctrl; +struct msm_dp_ctrl_private { + struct msm_dp_ctrl msm_dp_ctrl; struct drm_device *drm_dev; struct device *dev; struct drm_dp_aux *aux; - struct dp_panel *panel; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_panel *panel; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; struct phy *phy; @@ -99,8 +99,8 @@ struct dp_ctrl_private { bool stream_clks_on; }; -static int dp_aux_link_configure(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_configure(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 values[2]; int err; @@ -118,14 +118,14 @@ static int dp_aux_link_configure(struct drm_dp_aux *aux, return 0; } -void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); reinit_completion(&ctrl->idle_comp); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); if (!wait_for_completion_timeout(&ctrl->idle_comp, IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) @@ -134,7 +134,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "mainlink off\n"); } -static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl) { u32 config = 0, tbd; const u8 *dpcd = ctrl->panel->dpcd; @@ -142,15 +142,15 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) /* Default-> LSCLK DIV: 1/4 LCLK */ config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); - if (ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) config |= DP_CONFIGURATION_CTRL_RGB_YUV; /* YUV420 */ /* Scrambler reset enable */ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) config |= DP_CONFIGURATION_CTRL_ASSR; - tbd = dp_link_get_test_bits_depth(ctrl->link, - ctrl->panel->dp_mode.bpp); + tbd = msm_dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->msm_dp_mode.bpp); config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; @@ -170,24 +170,24 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) if (ctrl->panel->psr_cap.version) config |= DP_CONFIGURATION_CTRL_SEND_VSC; - dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); + msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); } -static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl) { u32 cc, tb; - dp_catalog_ctrl_lane_mapping(ctrl->catalog); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); - dp_catalog_setup_peripheral_flush(ctrl->catalog); + msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + msm_dp_catalog_setup_peripheral_flush(ctrl->catalog); - dp_ctrl_config_ctrl(ctrl); + msm_dp_ctrl_config_ctrl(ctrl); - tb = dp_link_get_test_bits_depth(ctrl->link, - ctrl->panel->dp_mode.bpp); - cc = dp_link_get_colorimetry_config(ctrl->link); - dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); - dp_panel_timing_cfg(ctrl->panel); + tb = msm_dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->msm_dp_mode.bpp); + cc = msm_dp_link_get_colorimetry_config(ctrl->link); + msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); + msm_dp_panel_timing_cfg(ctrl->panel); } /* @@ -310,7 +310,7 @@ static int _tu_param_compare(s64 a, s64 b) } } -static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, +static void msm_dp_panel_update_tu_timings(struct msm_dp_tu_calc_input *in, struct tu_algo_data *tu) { int nlanes = in->nlanes; @@ -622,9 +622,9 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu) } } -static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, - struct dp_tu_calc_input *in, - struct dp_vc_tu_mapping_table *tu_table) +static void _dp_ctrl_calc_tu(struct msm_dp_ctrl_private *ctrl, + struct msm_dp_tu_calc_input *in, + struct msm_dp_vc_tu_mapping_table *tu_table) { struct tu_algo_data *tu; int compare_result_1, compare_result_2; @@ -645,7 +645,7 @@ static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, if (!tu) return; - dp_panel_update_tu_timings(in, tu); + msm_dp_panel_update_tu_timings(in, tu); tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ @@ -956,21 +956,21 @@ tu_size_calc: kfree(tu); } -static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, - struct dp_vc_tu_mapping_table *tu_table) +static void msm_dp_ctrl_calc_tu_parameters(struct msm_dp_ctrl_private *ctrl, + struct msm_dp_vc_tu_mapping_table *tu_table) { - struct dp_tu_calc_input in; + struct msm_dp_tu_calc_input in; struct drm_display_mode *drm_mode; - drm_mode = &ctrl->panel->dp_mode.drm_mode; + drm_mode = &ctrl->panel->msm_dp_mode.drm_mode; in.lclk = ctrl->link->link_params.rate / 1000; in.pclk_khz = drm_mode->clock; in.hactive = drm_mode->hdisplay; in.hporch = drm_mode->htotal - drm_mode->hdisplay; in.nlanes = ctrl->link->link_params.num_lanes; - in.bpp = ctrl->panel->dp_mode.bpp; - in.pixel_enc = ctrl->panel->dp_mode.out_fmt_is_yuv_420 ? 420 : 444; + in.bpp = ctrl->panel->msm_dp_mode.bpp; + in.pixel_enc = ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420 ? 420 : 444; in.dsc_en = 0; in.async_en = 0; in.fec_en = 0; @@ -980,16 +980,16 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, _dp_ctrl_calc_tu(ctrl, &in, tu_table); } -static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl) { - u32 dp_tu = 0x0; + u32 msm_dp_tu = 0x0; u32 valid_boundary = 0x0; u32 valid_boundary2 = 0x0; - struct dp_vc_tu_mapping_table tu_calc_table; + struct msm_dp_vc_tu_mapping_table tu_calc_table; - dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); + msm_dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); - dp_tu |= tu_calc_table.tu_size_minus1; + msm_dp_tu |= tu_calc_table.tu_size_minus1; valid_boundary |= tu_calc_table.valid_boundary_link; valid_boundary |= (tu_calc_table.delay_start_link << 16); @@ -1001,13 +1001,13 @@ static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) valid_boundary2 |= BIT(0); pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", - dp_tu, valid_boundary, valid_boundary2); + msm_dp_tu, valid_boundary, valid_boundary2); - dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, - dp_tu, valid_boundary, valid_boundary2); + msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, + msm_dp_tu, valid_boundary, valid_boundary2); } -static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl) { int ret = 0; @@ -1019,7 +1019,7 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl, u8 v_level, u8 p_level) { union phy_configure_opts *phy_opts = &ctrl->phy_opts; @@ -1034,9 +1034,9 @@ static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl, return 0; } -static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl) { - struct dp_link *link = ctrl->link; + struct msm_dp_link *link = ctrl->link; int ret = 0, lane, lane_cnt; u8 buf[4]; u32 max_level_reached = 0; @@ -1046,7 +1046,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "voltage level: %d emphasis level: %d\n", voltage_swing_level, pre_emphasis_level); - ret = dp_ctrl_set_vx_px(ctrl, + ret = msm_dp_ctrl_set_vx_px(ctrl, voltage_swing_level, pre_emphasis_level); if (ret) @@ -1083,7 +1083,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) return ret; } -static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, +static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, u8 pattern) { u8 buf; @@ -1100,7 +1100,7 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, return ret == 1; } -static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl, u8 *link_status) { int ret = 0, len; @@ -1114,24 +1114,24 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, return ret; } -static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, int *training_step) { int tries, old_v_level, ret = 0; u8 link_status[DP_LINK_STATUS_SIZE]; int const maximum_retries = 4; - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_1; - ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); + ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); - ret = dp_ctrl_update_vx_px(ctrl); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; @@ -1140,7 +1140,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, for (tries = 0; tries < maximum_retries; tries++) { drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); - ret = dp_ctrl_read_link_status(ctrl, link_status); + ret = msm_dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; @@ -1160,8 +1160,8 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, old_v_level = ctrl->link->phy_params.v_level; } - dp_link_adjust_levels(ctrl->link, link_status); - ret = dp_ctrl_update_vx_px(ctrl); + msm_dp_link_adjust_levels(ctrl->link, link_status); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; } @@ -1170,7 +1170,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_rate_down_shift(struct msm_dp_ctrl_private *ctrl) { int ret = 0; @@ -1198,7 +1198,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl) { if (ctrl->link->link_params.num_lanes == 1) @@ -1213,13 +1213,13 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) return 0; } -static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl) { - dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); } -static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, int *training_step) { int tries = 0, ret = 0; @@ -1228,7 +1228,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, int const maximum_retries = 5; u8 link_status[DP_LINK_STATUS_SIZE]; - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_2; @@ -1243,16 +1243,16 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, state_ctrl_bit = 2; } - ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); + ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, pattern); + msm_dp_ctrl_train_pattern_set(ctrl, pattern); for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); - ret = dp_ctrl_read_link_status(ctrl, link_status); + ret = msm_dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; @@ -1261,8 +1261,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return 0; } - dp_link_adjust_levels(ctrl->link, link_status); - ret = dp_ctrl_update_vx_px(ctrl); + msm_dp_link_adjust_levels(ctrl->link, link_status); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; @@ -1271,24 +1271,24 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl, int *training_step) { int ret = 0; const u8 *dpcd = ctrl->panel->dpcd; u8 encoding[] = { 0, DP_SET_ANSI_8B10B }; u8 assr; - struct dp_link_info link_info = {0}; + struct msm_dp_link_info link_info = {0}; - dp_ctrl_config_ctrl(ctrl); + msm_dp_ctrl_config_ctrl(ctrl); link_info.num_lanes = ctrl->link->link_params.num_lanes; link_info.rate = ctrl->link->link_params.rate; link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; - dp_link_reset_phy_params_vx_px(ctrl->link); + msm_dp_link_reset_phy_params_vx_px(ctrl->link); - dp_aux_link_configure(ctrl->aux, &link_info); + msm_dp_aux_link_configure(ctrl->aux, &link_info); if (drm_dp_max_downspread(dpcd)) encoding[0] |= DP_SPREAD_AMP_0_5; @@ -1302,7 +1302,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, &assr, 1); } - ret = dp_ctrl_link_train_1(ctrl, training_step); + ret = msm_dp_ctrl_link_train_1(ctrl, training_step); if (ret) { DRM_ERROR("link training #1 failed. ret=%d\n", ret); goto end; @@ -1311,7 +1311,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, /* print success info as this is a result of user initiated action */ drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n"); - ret = dp_ctrl_link_train_2(ctrl, training_step); + ret = msm_dp_ctrl_link_train_2(ctrl, training_step); if (ret) { DRM_ERROR("link training #2 failed. ret=%d\n", ret); goto end; @@ -1321,17 +1321,17 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n"); end: - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); return ret; } -static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl, int *training_step) { int ret = 0; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) return ret; @@ -1342,17 +1342,17 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, * a link training pattern, we have to first do soft reset. */ - ret = dp_ctrl_link_train(ctrl, training_step); + ret = msm_dp_ctrl_link_train(ctrl, training_step); return ret; } -int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl) +int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret = 0; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->core_clks_on) { drm_dbg_dp(ctrl->drm_dev, "core clks already enabled\n"); @@ -1374,11 +1374,11 @@ int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl) return 0; } -void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); clk_bulk_disable_unprepare(ctrl->num_core_clks, ctrl->core_clks); @@ -1391,12 +1391,12 @@ void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl) ctrl->core_clks_on ? "on" : "off"); } -static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) +static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret = 0; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->link_clks_on) { drm_dbg_dp(ctrl->drm_dev, "links clks already enabled\n"); @@ -1406,7 +1406,7 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) if (!ctrl->core_clks_on) { drm_dbg_dp(ctrl->drm_dev, "Enable core clks before link clks\n"); - dp_ctrl_core_clk_enable(dp_ctrl); + msm_dp_ctrl_core_clk_enable(msm_dp_ctrl); } ret = clk_bulk_prepare_enable(ctrl->num_link_clks, ctrl->link_clks); @@ -1424,11 +1424,11 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) return 0; } -static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl) +static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); clk_bulk_disable_unprepare(ctrl->num_link_clks, ctrl->link_clks); @@ -1441,7 +1441,7 @@ static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl) ctrl->core_clks_on ? "on" : "off"); } -static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl) { int ret = 0; struct phy *phy = ctrl->phy; @@ -1455,7 +1455,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) phy_power_on(phy); dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000); - ret = dp_ctrl_link_clk_enable(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_link_clk_enable(&ctrl->msm_dp_ctrl); if (ret) DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); @@ -1464,13 +1464,13 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) return ret; } -void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) +void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); /* * all dp controller programmable registers will not @@ -1478,28 +1478,28 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) * therefore interrupt mask bits have to be updated * to enable/disable interrupts */ - dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); + msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); } -void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl) { u8 cfg; - struct dp_ctrl_private *ctrl = container_of(dp_ctrl, - struct dp_ctrl_private, dp_ctrl); + struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl, + struct msm_dp_ctrl_private, msm_dp_ctrl); if (!ctrl->panel->psr_cap.version) return; - dp_catalog_ctrl_config_psr(ctrl->catalog); + msm_dp_catalog_ctrl_config_psr(ctrl->catalog); cfg = DP_PSR_ENABLE; drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1); } -void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) +void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter) { - struct dp_ctrl_private *ctrl = container_of(dp_ctrl, - struct dp_ctrl_private, dp_ctrl); + struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl, + struct msm_dp_ctrl_private, msm_dp_ctrl); if (!ctrl->panel->psr_cap.version) return; @@ -1516,64 +1516,64 @@ void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) */ if (enter) { reinit_completion(&ctrl->psr_op_comp); - dp_catalog_ctrl_set_psr(ctrl->catalog, true); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true); if (!wait_for_completion_timeout(&ctrl->psr_op_comp, PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) { DRM_ERROR("PSR_ENTRY timedout\n"); - dp_catalog_ctrl_set_psr(ctrl->catalog, false); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false); return; } - dp_ctrl_push_idle(dp_ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_ctrl_push_idle(msm_dp_ctrl); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); - dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); + msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); } else { - dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); + msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); - dp_catalog_ctrl_set_psr(ctrl->catalog, false); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - dp_ctrl_wait4video_ready(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_ctrl_wait4video_ready(ctrl); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); } } -void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_phy_reset(ctrl->catalog); + msm_dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_init(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_phy_reset(ctrl->catalog); + msm_dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_exit(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) { struct phy *phy = ctrl->phy; int ret = 0; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes; phy_configure(phy, &ctrl->phy_opts); /* @@ -1583,13 +1583,13 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) */ dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); /* hw recommended delay before re-enabling clocks */ msleep(20); - ret = dp_ctrl_enable_mainlink_clocks(ctrl); + ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (ret) { DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret); return ret; @@ -1598,18 +1598,18 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) { struct phy *phy; phy = ctrl->phy; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); @@ -1622,30 +1622,30 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) return 0; } -static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl) { int ret = 0; int training_step = DP_TRAINING_NONE; - dp_ctrl_push_idle(&ctrl->dp_ctrl); + msm_dp_ctrl_push_idle(&ctrl->msm_dp_ctrl); ctrl->link->phy_params.p_level = 0; ctrl->link->phy_params.v_level = 0; - ret = dp_ctrl_setup_main_link(ctrl, &training_step); + ret = msm_dp_ctrl_setup_main_link(ctrl, &training_step); if (ret) goto end; - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - ret = dp_ctrl_wait4video_ready(ctrl); + ret = msm_dp_ctrl_wait4video_ready(ctrl); end: return ret; } -static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl) { bool success = false; u32 pattern_sent = 0x0; @@ -1653,17 +1653,17 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested); - if (dp_ctrl_set_vx_px(ctrl, + if (msm_dp_ctrl_set_vx_px(ctrl, ctrl->link->phy_params.v_level, ctrl->link->phy_params.p_level)) { DRM_ERROR("Failed to set v/p levels\n"); return false; } - dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); - dp_ctrl_update_vx_px(ctrl); - dp_link_send_test_response(ctrl->link); + msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); + msm_dp_ctrl_update_vx_px(ctrl); + msm_dp_link_send_test_response(ctrl->link); - pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); + pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); switch (pattern_sent) { case MR_LINK_TRAINING1: @@ -1697,7 +1697,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) return success; } -static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_process_phy_test_request(struct msm_dp_ctrl_private *ctrl) { int ret; unsigned long pixel_rate; @@ -1713,15 +1713,15 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) * running. Add the global reset just before disabling the * link clocks and core clocks. */ - dp_ctrl_off(&ctrl->dp_ctrl); + msm_dp_ctrl_off(&ctrl->msm_dp_ctrl); - ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_on_link(&ctrl->msm_dp_ctrl); if (ret) { DRM_ERROR("failed to enable DP link controller\n"); return ret; } - pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock; ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000); if (ret) { DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret); @@ -1739,49 +1739,49 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) ctrl->stream_clks_on = true; } - dp_ctrl_send_phy_test_pattern(ctrl); + msm_dp_ctrl_send_phy_test_pattern(ctrl); return 0; } -void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 sink_request = 0x0; - if (!dp_ctrl) { + if (!msm_dp_ctrl) { DRM_ERROR("invalid input\n"); return; } - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); sink_request = ctrl->link->sink_request; if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n"); - if (dp_ctrl_process_phy_test_request(ctrl)) { + if (msm_dp_ctrl_process_phy_test_request(ctrl)) { DRM_ERROR("process phy_test_req failed\n"); return; } } if (sink_request & DP_LINK_STATUS_UPDATED) { - if (dp_ctrl_link_maintenance(ctrl)) { + if (msm_dp_ctrl_link_maintenance(ctrl)) { DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); return; } } if (sink_request & DP_TEST_LINK_TRAINING) { - dp_link_send_test_response(ctrl->link); - if (dp_ctrl_link_maintenance(ctrl)) { + msm_dp_link_send_test_response(ctrl->link); + if (msm_dp_ctrl_link_maintenance(ctrl)) { DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); return; } } } -static bool dp_ctrl_clock_recovery_any_ok( +static bool msm_dp_ctrl_clock_recovery_any_ok( const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count) { @@ -1800,20 +1800,20 @@ static bool dp_ctrl_clock_recovery_any_ok( return drm_dp_clock_recovery_ok(link_status, reduced_cnt); } -static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl) +static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl) { u8 link_status[DP_LINK_STATUS_SIZE]; int num_lanes = ctrl->link->link_params.num_lanes; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); return drm_dp_channel_eq_ok(link_status, num_lanes); } -int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) +int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) { int rc = 0; - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 rate; int link_train_max_retries = 5; u32 const phy_cts_pixel_clk_khz = 148500; @@ -1821,15 +1821,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) unsigned int training_step; unsigned long pixel_rate; - if (!dp_ctrl) + if (!msm_dp_ctrl) return -EINVAL; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); rate = ctrl->panel->link_info.rate; - pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock; - dp_ctrl_core_clk_enable(&ctrl->dp_ctrl); + msm_dp_ctrl_core_clk_enable(&ctrl->msm_dp_ctrl); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, @@ -1840,7 +1840,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate = rate; ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; - if (ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) pixel_rate >>= 1; } @@ -1848,32 +1848,32 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes, pixel_rate); - rc = dp_ctrl_enable_mainlink_clocks(ctrl); + rc = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (rc) return rc; while (--link_train_max_retries) { training_step = DP_TRAINING_NONE; - rc = dp_ctrl_setup_main_link(ctrl, &training_step); + rc = msm_dp_ctrl_setup_main_link(ctrl, &training_step); if (rc == 0) { /* training completed successfully */ break; } else if (training_step == DP_TRAINING_1) { /* link train_1 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) + if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); - rc = dp_ctrl_link_rate_down_shift(ctrl); + rc = msm_dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ - if (dp_ctrl_clock_recovery_any_ok(link_status, + if (msm_dp_ctrl_clock_recovery_any_ok(link_status, ctrl->link->link_params.num_lanes)) { /* * some lanes are ready, * reduce lane number */ - rc = dp_ctrl_link_lane_down_shift(ctrl); + rc = msm_dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* lane == 1 already */ /* end with failure */ break; @@ -1885,16 +1885,16 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } } else if (training_step == DP_TRAINING_2) { /* link train_2 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) + if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); if (!drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) - rc = dp_ctrl_link_rate_down_shift(ctrl); + rc = msm_dp_ctrl_link_rate_down_shift(ctrl); else - rc = dp_ctrl_link_lane_down_shift(ctrl); + rc = msm_dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* end with failure */ @@ -1902,10 +1902,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } /* stop link training before start re training */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); } - rc = dp_ctrl_reinitialize_mainlink(ctrl); + rc = msm_dp_ctrl_reinitialize_mainlink(ctrl); if (rc) { DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc); break; @@ -1926,38 +1926,38 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) * link training failed * end txing train pattern here */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); - dp_ctrl_deinitialize_mainlink(ctrl); + msm_dp_ctrl_deinitialize_mainlink(ctrl); rc = -ECONNRESET; } return rc; } -static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl) { int training_step = DP_TRAINING_NONE; - return dp_ctrl_setup_main_link(ctrl, &training_step); + return msm_dp_ctrl_setup_main_link(ctrl, &training_step); } -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) +int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train) { int ret = 0; bool mainlink_ready = false; - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; unsigned long pixel_rate; unsigned long pixel_rate_orig; - if (!dp_ctrl) + if (!msm_dp_ctrl) return -EINVAL; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); - pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = pixel_rate_orig = ctrl->panel->msm_dp_mode.drm_mode.clock; - if (dp_ctrl->wide_bus_en || ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (msm_dp_ctrl->wide_bus_en || ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) pixel_rate >>= 1; drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", @@ -1969,7 +1969,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl->core_clks_on, ctrl->link_clks_on, ctrl->stream_clks_on); if (!ctrl->link_clks_on) { /* link clk is off */ - ret = dp_ctrl_enable_mainlink_clocks(ctrl); + ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (ret) { DRM_ERROR("Failed to start link clocks. ret=%d\n", ret); goto end; @@ -1993,11 +1993,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl->stream_clks_on = true; } - if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) - dp_ctrl_link_retrain(ctrl); + if (force_link_train || !msm_dp_ctrl_channel_eq_ok(ctrl)) + msm_dp_ctrl_link_retrain(ctrl); /* stop txing train pattern to end link training */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); /* * Set up transfer unit values and set controller state to send @@ -2005,22 +2005,22 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) */ reinit_completion(&ctrl->video_comp); - dp_ctrl_configure_source_params(ctrl); + msm_dp_ctrl_configure_source_params(ctrl); - dp_catalog_ctrl_config_msa(ctrl->catalog, + msm_dp_catalog_ctrl_config_msa(ctrl->catalog, ctrl->link->link_params.rate, pixel_rate_orig, - ctrl->panel->dp_mode.out_fmt_is_yuv_420); + ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420); - dp_ctrl_setup_tr_unit(ctrl); + msm_dp_ctrl_setup_tr_unit(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - ret = dp_ctrl_wait4video_ready(ctrl); + ret = msm_dp_ctrl_wait4video_ready(ctrl); if (ret) return ret; - mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog); + mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog); drm_dbg_dp(ctrl->drm_dev, "mainlink %s\n", mainlink_ready ? "READY" : "NOT READY"); @@ -2028,20 +2028,20 @@ end: return ret; } -void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); + msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); /* set dongle to D3 (power off) mode */ - dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); + msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); if (ctrl->stream_clks_on) { clk_disable_unprepare(ctrl->pixel_clk); @@ -2049,7 +2049,7 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) } dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); @@ -2061,17 +2061,17 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n", phy, phy->init_count, phy->power_count); @@ -2082,19 +2082,19 @@ void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -void dp_ctrl_off(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); + msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); if (ctrl->stream_clks_on) { clk_disable_unprepare(ctrl->pixel_clk); @@ -2102,26 +2102,26 @@ void dp_ctrl_off(struct dp_ctrl *dp_ctrl) } dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 isr; irqreturn_t ret = IRQ_NONE; - if (!dp_ctrl) + if (!msm_dp_ctrl) return IRQ_NONE; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->panel->psr_cap.version) { - isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); + isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); if (isr) complete(&ctrl->psr_op_comp); @@ -2136,7 +2136,7 @@ irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n"); } - isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); + isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog); if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { @@ -2164,13 +2164,13 @@ static const char *ctrl_clks[] = { "ctrl_link_iface", }; -static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl) +static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct device *dev; int i, rc; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); dev = ctrl->dev; ctrl->num_core_clks = ARRAY_SIZE(core_clks); @@ -2204,12 +2204,12 @@ static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl) return 0; } -struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, - struct dp_panel *panel, struct drm_dp_aux *aux, - struct dp_catalog *catalog, +struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link, + struct msm_dp_panel *panel, struct drm_dp_aux *aux, + struct msm_dp_catalog *catalog, struct phy *phy) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret; if (!dev || !panel || !aux || @@ -2228,7 +2228,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, if (ret) { dev_err(dev, "invalid DP OPP table in device tree\n"); /* caller do PTR_ERR(opp_table) */ - return (struct dp_ctrl *)ERR_PTR(ret); + return (struct msm_dp_ctrl *)ERR_PTR(ret); } /* OPP table is optional */ @@ -2248,11 +2248,11 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, ctrl->dev = dev; ctrl->phy = phy; - ret = dp_ctrl_clk_init(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl); if (ret) { dev_err(dev, "failed to init clocks\n"); return ERR_PTR(ret); } - return &ctrl->dp_ctrl; + return &ctrl->msm_dp_ctrl; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index ffcbd9a25748..b7abfedbf574 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -11,34 +11,34 @@ #include "dp_link.h" #include "dp_catalog.h" -struct dp_ctrl { +struct msm_dp_ctrl { bool wide_bus_en; }; struct phy; -int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); -void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); -void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); -void dp_ctrl_off(struct dp_ctrl *dp_ctrl); -void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); -irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl); -void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl); -struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, - struct dp_panel *panel, struct drm_dp_aux *aux, - struct dp_catalog *catalog, +int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl); +int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train); +void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl); +irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl); +struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link, + struct msm_dp_panel *panel, struct drm_dp_aux *aux, + struct msm_dp_catalog *catalog, struct phy *phy); -void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable); -void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl); -void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl); -void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl); +void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable); +void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl); -void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable); -void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl); +void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enable); +void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl); -int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl); -void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl); +int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl); #endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index b8611f6d2296..22fd946ee201 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -17,15 +17,15 @@ #define DEBUG_NAME "msm_dp" -struct dp_debug_private { - struct dp_link *link; - struct dp_panel *panel; +struct msm_dp_debug_private { + struct msm_dp_link *link; + struct msm_dp_panel *panel; struct drm_connector *connector; }; -static int dp_debug_show(struct seq_file *seq, void *p) +static int msm_dp_debug_show(struct seq_file *seq, void *p) { - struct dp_debug_private *debug = seq->private; + struct msm_dp_debug_private *debug = seq->private; u64 lclk = 0; u32 link_params_rate; const struct drm_display_mode *drm_mode; @@ -33,7 +33,7 @@ static int dp_debug_show(struct seq_file *seq, void *p) if (!debug) return -ENODEV; - drm_mode = &debug->panel->dp_mode.drm_mode; + drm_mode = &debug->panel->msm_dp_mode.drm_mode; seq_printf(seq, "\tname = %s\n", DEBUG_NAME); seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n", @@ -55,8 +55,8 @@ static int dp_debug_show(struct seq_file *seq, void *p) drm_mode->hsync_end - drm_mode->hsync_start, drm_mode->vsync_end - drm_mode->vsync_start); seq_printf(seq, "\t\tactive_low = %dx%d\n", - debug->panel->dp_mode.h_active_low, - debug->panel->dp_mode.v_active_low); + debug->panel->msm_dp_mode.h_active_low, + debug->panel->msm_dp_mode.v_active_low); seq_printf(seq, "\t\th_skew = %d\n", drm_mode->hskew); seq_printf(seq, "\t\trefresh rate = %d\n", @@ -64,7 +64,7 @@ static int dp_debug_show(struct seq_file *seq, void *p) seq_printf(seq, "\t\tpixel clock khz = %d\n", drm_mode->clock); seq_printf(seq, "\t\tbpp = %d\n", - debug->panel->dp_mode.bpp); + debug->panel->msm_dp_mode.bpp); /* Link Information */ seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n", @@ -83,11 +83,11 @@ static int dp_debug_show(struct seq_file *seq, void *p) return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_debug); +DEFINE_SHOW_ATTRIBUTE(msm_dp_debug); -static int dp_test_data_show(struct seq_file *m, void *data) +static int msm_dp_test_data_show(struct seq_file *m, void *data) { - const struct dp_debug_private *debug = m->private; + const struct msm_dp_debug_private *debug = m->private; const struct drm_connector *connector = debug->connector; u32 bpc; @@ -98,18 +98,18 @@ static int dp_test_data_show(struct seq_file *m, void *data) seq_printf(m, "vdisplay: %d\n", debug->link->test_video.test_v_height); seq_printf(m, "bpc: %u\n", - dp_link_bit_depth_to_bpp(bpc) / 3); + msm_dp_link_bit_depth_to_bpp(bpc) / 3); } else { seq_puts(m, "0"); } return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_test_data); +DEFINE_SHOW_ATTRIBUTE(msm_dp_test_data); -static int dp_test_type_show(struct seq_file *m, void *data) +static int msm_dp_test_type_show(struct seq_file *m, void *data) { - const struct dp_debug_private *debug = m->private; + const struct msm_dp_debug_private *debug = m->private; const struct drm_connector *connector = debug->connector; if (connector->status == connector_status_connected) @@ -119,15 +119,15 @@ static int dp_test_type_show(struct seq_file *m, void *data) return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_test_type); +DEFINE_SHOW_ATTRIBUTE(msm_dp_test_type); -static ssize_t dp_test_active_write(struct file *file, +static ssize_t msm_dp_test_active_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { char *input_buffer; int status = 0; - const struct dp_debug_private *debug; + const struct msm_dp_debug_private *debug; const struct drm_connector *connector; int val = 0; @@ -164,9 +164,9 @@ static ssize_t dp_test_active_write(struct file *file, return len; } -static int dp_test_active_show(struct seq_file *m, void *data) +static int msm_dp_test_active_show(struct seq_file *m, void *data) { - struct dp_debug_private *debug = m->private; + struct msm_dp_debug_private *debug = m->private; struct drm_connector *connector = debug->connector; if (connector->status == connector_status_connected) { @@ -181,28 +181,28 @@ static int dp_test_active_show(struct seq_file *m, void *data) return 0; } -static int dp_test_active_open(struct inode *inode, +static int msm_dp_test_active_open(struct inode *inode, struct file *file) { - return single_open(file, dp_test_active_show, + return single_open(file, msm_dp_test_active_show, inode->i_private); } static const struct file_operations test_active_fops = { .owner = THIS_MODULE, - .open = dp_test_active_open, + .open = msm_dp_test_active_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, - .write = dp_test_active_write + .write = msm_dp_test_active_write }; -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp) { - struct dp_debug_private *debug; + struct msm_dp_debug_private *debug; if (!dev || !panel || !link) { DRM_ERROR("invalid input\n"); @@ -217,20 +217,20 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel, debug->panel = panel; debugfs_create_file("dp_debug", 0444, root, - debug, &dp_debug_fops); + debug, &msm_dp_debug_fops); if (!is_edp) { - debugfs_create_file("msm_dp_test_active", 0444, + debugfs_create_file("dp_test_active", 0444, root, debug, &test_active_fops); - debugfs_create_file("msm_dp_test_data", 0444, + debugfs_create_file("dp_test_data", 0444, root, - debug, &dp_test_data_fops); + debug, &msm_dp_test_data_fops); - debugfs_create_file("msm_dp_test_type", 0444, + debugfs_create_file("dp_test_type", 0444, root, - debug, &dp_test_type_fops); + debug, &msm_dp_test_type_fops); } return 0; diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 7e1aa892fc09..6dc0ff4f0f65 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -12,7 +12,7 @@ #if defined(CONFIG_DEBUG_FS) /** - * dp_debug_get() - configure and get the DisplayPlot debug module data + * msm_dp_debug_get() - configure and get the DisplayPlot debug module data * * @dev: device instance of the caller * @panel: instance of panel module @@ -25,8 +25,8 @@ * This function sets up the debug module and provides a way * for debugfs input to be communicated with existing modules */ -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp); @@ -34,8 +34,8 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel, #else static inline -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp) diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index e1228fb093ee..aba925aab7ad 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -67,13 +67,13 @@ enum { #define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2) -struct dp_event { +struct msm_dp_event { u32 event_id; u32 data; u32 delay; }; -struct dp_display_private { +struct msm_dp_display_private { int irq; unsigned int id; @@ -85,14 +85,14 @@ struct dp_display_private { struct drm_device *drm_dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_panel *panel; - struct dp_ctrl *ctrl; + struct msm_dp_link *link; + struct msm_dp_panel *panel; + struct msm_dp_ctrl *ctrl; - struct dp_display_mode dp_mode; - struct msm_dp dp_display; + struct msm_dp_display_mode msm_dp_mode; + struct msm_dp msm_dp_display; /* wait for audio signaling */ struct completion audio_comp; @@ -104,12 +104,12 @@ struct dp_display_private { u32 event_pndx; u32 event_gndx; struct task_struct *ev_tsk; - struct dp_event event_list[DP_EVENT_Q_MAX]; + struct msm_dp_event event_list[DP_EVENT_Q_MAX]; spinlock_t event_lock; bool wide_bus_supported; - struct dp_audio *audio; + struct msm_dp_audio *audio; }; struct msm_dp_desc { @@ -118,25 +118,33 @@ struct msm_dp_desc { bool wide_bus_supported; }; -static const struct msm_dp_desc sc7180_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sa8775p[] = { + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x22154000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x2215c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, + {} +}; + +static const struct msm_dp_desc msm_dp_desc_sc7180[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc7280_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc7280[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8180x_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc8180x[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8280xp_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc8280xp[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, @@ -148,12 +156,12 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = { {} }; -static const struct msm_dp_desc sm8650_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sm8650[] = { { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc x1e80100_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_x1e80100[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, @@ -161,70 +169,71 @@ static const struct msm_dp_desc x1e80100_dp_descs[] = { {} }; -static const struct of_device_id dp_dt_match[] = { - { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs }, - { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs }, - { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs }, - { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs }, - { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs }, - { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs }, +static const struct of_device_id msm_dp_dt_match[] = { + { .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p }, + { .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 }, + { .compatible = "qcom,sc7280-edp", .data = &msm_dp_desc_sc7280 }, + { .compatible = "qcom,sc8180x-dp", .data = &msm_dp_desc_sc8180x }, + { .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x }, + { .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp }, + { .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp }, + { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 }, + { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 }, {} }; -static struct dp_display_private *dev_get_dp_display_private(struct device *dev) +static struct msm_dp_display_private *dev_get_dp_display_private(struct device *dev) { struct msm_dp *dp = dev_get_drvdata(dev); - return container_of(dp, struct dp_display_private, dp_display); + return container_of(dp, struct msm_dp_display_private, msm_dp_display); } -static int dp_add_event(struct dp_display_private *dp_priv, u32 event, +static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event, u32 data, u32 delay) { unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; int pndx; - spin_lock_irqsave(&dp_priv->event_lock, flag); - pndx = dp_priv->event_pndx + 1; + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + pndx = msm_dp_priv->event_pndx + 1; pndx %= DP_EVENT_Q_MAX; - if (pndx == dp_priv->event_gndx) { + if (pndx == msm_dp_priv->event_gndx) { pr_err("event_q is full: pndx=%d gndx=%d\n", - dp_priv->event_pndx, dp_priv->event_gndx); - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + msm_dp_priv->event_pndx, msm_dp_priv->event_gndx); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return -EPERM; } - todo = &dp_priv->event_list[dp_priv->event_pndx++]; - dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++]; + msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX; todo->event_id = event; todo->data = data; todo->delay = delay; - wake_up(&dp_priv->event_q); - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + wake_up(&msm_dp_priv->event_q); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return 0; } -static int dp_del_event(struct dp_display_private *dp_priv, u32 event) +static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event) { unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; u32 gndx; - spin_lock_irqsave(&dp_priv->event_lock, flag); - if (dp_priv->event_pndx == dp_priv->event_gndx) { - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) { + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return -ENOENT; } - gndx = dp_priv->event_gndx; - while (dp_priv->event_pndx != gndx) { - todo = &dp_priv->event_list[gndx]; + gndx = msm_dp_priv->event_gndx; + while (msm_dp_priv->event_pndx != gndx) { + todo = &msm_dp_priv->event_list[gndx]; if (todo->event_id == event) { todo->event_id = EV_NO_EVENT; /* deleted */ todo->delay = 0; @@ -232,60 +241,60 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event) gndx++; gndx %= DP_EVENT_Q_MAX; } - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return 0; } -void dp_display_signal_audio_start(struct msm_dp *dp_display) +void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); reinit_completion(&dp->audio_comp); } -void dp_display_signal_audio_complete(struct msm_dp *dp_display) +void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); complete_all(&dp->audio_comp); } -static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv); +static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv); -static int dp_display_bind(struct device *dev, struct device *master, +static int msm_dp_display_bind(struct device *dev, struct device *master, void *data) { int rc = 0; - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); struct drm_device *drm = priv->dev; - dp->dp_display.drm_dev = drm; - priv->dp[dp->id] = &dp->dp_display; + dp->msm_dp_display.drm_dev = drm; + priv->dp[dp->id] = &dp->msm_dp_display; dp->drm_dev = drm; dp->aux->drm_dev = drm; - rc = dp_aux_register(dp->aux); + rc = msm_dp_aux_register(dp->aux); if (rc) { DRM_ERROR("DRM DP AUX register failed\n"); goto end; } - rc = dp_register_audio_driver(dev, dp->audio); + rc = msm_dp_register_audio_driver(dev, dp->audio); if (rc) { DRM_ERROR("Audio registration Dp failed\n"); goto end; } - rc = dp_hpd_event_thread_start(dp); + rc = msm_dp_hpd_event_thread_start(dp); if (rc) { DRM_ERROR("Event thread create failed\n"); goto end; @@ -296,44 +305,44 @@ end: return rc; } -static void dp_display_unbind(struct device *dev, struct device *master, +static void msm_dp_display_unbind(struct device *dev, struct device *master, void *data) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); kthread_stop(dp->ev_tsk); of_dp_aux_depopulate_bus(dp->aux); - dp_unregister_audio_driver(dev, dp->audio); - dp_aux_unregister(dp->aux); + msm_dp_unregister_audio_driver(dev, dp->audio); + msm_dp_aux_unregister(dp->aux); dp->drm_dev = NULL; dp->aux->drm_dev = NULL; priv->dp[dp->id] = NULL; } -static const struct component_ops dp_display_comp_ops = { - .bind = dp_display_bind, - .unbind = dp_display_unbind, +static const struct component_ops msm_dp_display_comp_ops = { + .bind = msm_dp_display_bind, + .unbind = msm_dp_display_unbind, }; -static void dp_display_send_hpd_event(struct msm_dp *dp_display) +static void msm_dp_display_send_hpd_event(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; struct drm_connector *connector; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - connector = dp->dp_display.connector; + connector = dp->msm_dp_display.connector; drm_helper_hpd_irq_event(connector->dev); } -static int dp_display_send_hpd_notification(struct dp_display_private *dp, +static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *dp, bool hpd) { - if ((hpd && dp->dp_display.link_ready) || - (!hpd && !dp->dp_display.link_ready)) { + if ((hpd && dp->msm_dp_display.link_ready) || + (!hpd && !dp->msm_dp_display.link_ready)) { drm_dbg_dp(dp->drm_dev, "HPD already %s\n", (hpd ? "on" : "off")); return 0; @@ -342,139 +351,139 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, /* reset video pattern flag on disconnect */ if (!hpd) { dp->panel->video_test = false; - if (!dp->dp_display.is_edp) - drm_dp_set_subconnector_property(dp->dp_display.connector, + if (!dp->msm_dp_display.is_edp) + drm_dp_set_subconnector_property(dp->msm_dp_display.connector, connector_status_disconnected, dp->panel->dpcd, dp->panel->downstream_ports); } - dp->dp_display.link_ready = hpd; + dp->msm_dp_display.link_ready = hpd; drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", - dp->dp_display.connector_type, hpd); - dp_display_send_hpd_event(&dp->dp_display); + dp->msm_dp_display.connector_type, hpd); + msm_dp_display_send_hpd_event(&dp->msm_dp_display); return 0; } -static int dp_display_process_hpd_high(struct dp_display_private *dp) +static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp) { - struct drm_connector *connector = dp->dp_display.connector; + struct drm_connector *connector = dp->msm_dp_display.connector; const struct drm_display_info *info = &connector->display_info; int rc = 0; - rc = dp_panel_read_sink_caps(dp->panel, connector); + rc = msm_dp_panel_read_sink_caps(dp->panel, connector); if (rc) goto end; - dp_link_process_request(dp->link); + msm_dp_link_process_request(dp->link); - if (!dp->dp_display.is_edp) + if (!dp->msm_dp_display.is_edp) drm_dp_set_subconnector_property(connector, connector_status_connected, dp->panel->dpcd, dp->panel->downstream_ports); - dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; + dp->msm_dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; dp->audio_supported = info->has_audio; - dp_panel_handle_sink_request(dp->panel); + msm_dp_panel_handle_sink_request(dp->panel); /* * set sink to normal operation mode -- D0 * before dpcd read */ - dp_link_psm_config(dp->link, &dp->panel->link_info, false); + msm_dp_link_psm_config(dp->link, &dp->panel->link_info, false); - dp_link_reset_phy_params_vx_px(dp->link); - rc = dp_ctrl_on_link(dp->ctrl); + msm_dp_link_reset_phy_params_vx_px(dp->link); + rc = msm_dp_ctrl_on_link(dp->ctrl); if (rc) { DRM_ERROR("failed to complete DP link training\n"); goto end; } - dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); end: return rc; } -static void dp_display_host_phy_init(struct dp_display_private *dp) +static void msm_dp_display_host_phy_init(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); if (!dp->phy_initialized) { - dp_ctrl_phy_init(dp->ctrl); + msm_dp_ctrl_phy_init(dp->ctrl); dp->phy_initialized = true; } } -static void dp_display_host_phy_exit(struct dp_display_private *dp) +static void msm_dp_display_host_phy_exit(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); if (dp->phy_initialized) { - dp_ctrl_phy_exit(dp->ctrl); + msm_dp_ctrl_phy_exit(dp->ctrl); dp->phy_initialized = false; } } -static void dp_display_host_init(struct dp_display_private *dp) +static void msm_dp_display_host_init(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); - dp_ctrl_core_clk_enable(dp->ctrl); - dp_ctrl_reset_irq_ctrl(dp->ctrl, true); - dp_aux_init(dp->aux); + msm_dp_ctrl_core_clk_enable(dp->ctrl); + msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true); + msm_dp_aux_init(dp->aux); dp->core_initialized = true; } -static void dp_display_host_deinit(struct dp_display_private *dp) +static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); - dp_ctrl_reset_irq_ctrl(dp->ctrl, false); - dp_aux_deinit(dp->aux); - dp_ctrl_core_clk_disable(dp->ctrl); + msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false); + msm_dp_aux_deinit(dp->aux); + msm_dp_ctrl_core_clk_disable(dp->ctrl); dp->core_initialized = false; } -static int dp_display_usbpd_configure_cb(struct device *dev) +static int msm_dp_display_usbpd_configure_cb(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); - dp_display_host_phy_init(dp); + msm_dp_display_host_phy_init(dp); - return dp_display_process_hpd_high(dp); + return msm_dp_display_process_hpd_high(dp); } -static int dp_display_notify_disconnect(struct device *dev) +static int msm_dp_display_notify_disconnect(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); return 0; } -static void dp_display_handle_video_request(struct dp_display_private *dp) +static void msm_dp_display_handle_video_request(struct msm_dp_display_private *dp) { if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { dp->panel->video_test = true; - dp_link_send_test_response(dp->link); + msm_dp_link_send_test_response(dp->link); } } -static int dp_display_handle_port_status_changed(struct dp_display_private *dp) +static int msm_dp_display_handle_port_status_changed(struct msm_dp_display_private *dp) { int rc = 0; @@ -482,12 +491,12 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp) drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n"); if (dp->hpd_state != ST_DISCONNECTED) { dp->hpd_state = ST_DISCONNECT_PENDING; - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); } } else { if (dp->hpd_state == ST_DISCONNECTED) { dp->hpd_state = ST_MAINLINK_READY; - rc = dp_display_process_hpd_high(dp); + rc = msm_dp_display_process_hpd_high(dp); if (rc) dp->hpd_state = ST_DISCONNECTED; } @@ -496,7 +505,7 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp) return rc; } -static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +static int msm_dp_display_handle_irq_hpd(struct msm_dp_display_private *dp) { u32 sink_request = dp->link->sink_request; @@ -510,48 +519,48 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) } } - dp_ctrl_handle_sink_request(dp->ctrl); + msm_dp_ctrl_handle_sink_request(dp->ctrl); if (sink_request & DP_TEST_LINK_VIDEO_PATTERN) - dp_display_handle_video_request(dp); + msm_dp_display_handle_video_request(dp); return 0; } -static int dp_display_usbpd_attention_cb(struct device *dev) +static int msm_dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; u32 sink_request; - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); /* check for any test request issued by sink */ - rc = dp_link_process_request(dp->link); + rc = msm_dp_link_process_request(dp->link); if (!rc) { sink_request = dp->link->sink_request; drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request); if (sink_request & DS_PORT_STATUS_CHANGED) - rc = dp_display_handle_port_status_changed(dp); + rc = msm_dp_display_handle_port_status_changed(dp); else - rc = dp_display_handle_irq_hpd(dp); + rc = msm_dp_display_handle_irq_hpd(dp); } return rc; } -static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; int ret; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; - dp_aux_enable_xfers(dp->aux, true); + msm_dp_aux_enable_xfers(dp->aux, true); mutex_lock(&dp->event_mutex); state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); @@ -565,7 +574,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) if (state == ST_DISCONNECT_PENDING) { /* wait until ST_DISCONNECTED */ - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ mutex_unlock(&dp->event_mutex); return 0; } @@ -577,7 +586,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) return ret; } - ret = dp_display_usbpd_configure_cb(&pdev->dev); + ret = msm_dp_display_usbpd_configure_cb(&pdev->dev); if (ret) { /* link train failed */ dp->hpd_state = ST_DISCONNECTED; pm_runtime_put_sync(&pdev->dev); @@ -586,60 +595,60 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) } drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); /* uevent will complete connection part */ return 0; }; -static void dp_display_handle_plugged_change(struct msm_dp *dp_display, +static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display, bool plugged) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, - struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, + struct msm_dp_display_private, msm_dp_display); /* notify audio subsystem only if sink supports audio */ - if (dp_display->plugged_cb && dp_display->codec_dev && + if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev && dp->audio_supported) - dp_display->plugged_cb(dp_display->codec_dev, plugged); + msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged); } -static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; - dp_aux_enable_xfers(dp->aux, false); + msm_dp_aux_enable_xfers(dp->aux, false); mutex_lock(&dp->event_mutex); state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); /* unplugged, no more irq_hpd handle */ - dp_del_event(dp, EV_IRQ_HPD_INT); + msm_dp_del_event(dp, EV_IRQ_HPD_INT); if (state == ST_DISCONNECTED) { /* triggered by irq_hdp with sink_count = 0 */ if (dp->link->sink_count == 0) { - dp_display_host_phy_exit(dp); + msm_dp_display_host_phy_exit(dp); } - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); mutex_unlock(&dp->event_mutex); return 0; } else if (state == ST_DISCONNECT_PENDING) { mutex_unlock(&dp->event_mutex); return 0; } else if (state == ST_MAINLINK_READY) { - dp_ctrl_off_link(dp->ctrl); - dp_display_host_phy_exit(dp); + msm_dp_ctrl_off_link(dp->ctrl); + msm_dp_display_host_phy_exit(dp); dp->hpd_state = ST_DISCONNECTED; - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); pm_runtime_put_sync(&pdev->dev); mutex_unlock(&dp->event_mutex); return 0; @@ -649,7 +658,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) * We don't need separate work for disconnect as * connect/attention interrupts are disabled */ - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); if (state == ST_DISPLAY_OFF) { dp->hpd_state = ST_DISCONNECTED; @@ -658,10 +667,10 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) } /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(&dp->dp_display, false); + msm_dp_display_handle_plugged_change(&dp->msm_dp_display, false); drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); /* uevent will complete disconnection part */ pm_runtime_put_sync(&pdev->dev); @@ -669,7 +678,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) return 0; } -static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_irq_hpd_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; @@ -678,7 +687,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) /* irq_hpd can happen at either connected or disconnected state */ state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); @@ -687,33 +696,33 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) { /* wait until ST_CONNECTED */ - dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ + msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ mutex_unlock(&dp->event_mutex); return 0; } - dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev); + msm_dp_display_usbpd_attention_cb(&dp->msm_dp_display.pdev->dev); drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); return 0; } -static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +static void msm_dp_display_deinit_sub_modules(struct msm_dp_display_private *dp) { - dp_audio_put(dp->audio); - dp_panel_put(dp->panel); - dp_aux_put(dp->aux); + msm_dp_audio_put(dp->audio); + msm_dp_panel_put(dp->panel); + msm_dp_aux_put(dp->aux); } -static int dp_init_sub_modules(struct dp_display_private *dp) +static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp) { int rc = 0; - struct device *dev = &dp->dp_display.pdev->dev; - struct dp_panel_in panel_in = { + struct device *dev = &dp->msm_dp_display.pdev->dev; + struct msm_dp_panel_in panel_in = { .dev = dev, }; struct phy *phy; @@ -723,14 +732,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp) return PTR_ERR(phy); rc = phy_set_mode_ext(phy, PHY_MODE_DP, - dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); + dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); if (rc) { DRM_ERROR("failed to set phy submode, rc = %d\n", rc); dp->catalog = NULL; goto error; } - dp->catalog = dp_catalog_get(dev); + dp->catalog = msm_dp_catalog_get(dev); if (IS_ERR(dp->catalog)) { rc = PTR_ERR(dp->catalog); DRM_ERROR("failed to initialize catalog, rc = %d\n", rc); @@ -738,9 +747,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->aux = dp_aux_get(dev, dp->catalog, + dp->aux = msm_dp_aux_get(dev, dp->catalog, phy, - dp->dp_display.is_edp); + dp->msm_dp_display.is_edp); if (IS_ERR(dp->aux)) { rc = PTR_ERR(dp->aux); DRM_ERROR("failed to initialize aux, rc = %d\n", rc); @@ -748,7 +757,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->link = dp_link_get(dev, dp->aux); + dp->link = msm_dp_link_get(dev, dp->aux); if (IS_ERR(dp->link)) { rc = PTR_ERR(dp->link); DRM_ERROR("failed to initialize link, rc = %d\n", rc); @@ -760,7 +769,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) panel_in.catalog = dp->catalog; panel_in.link = dp->link; - dp->panel = dp_panel_get(&panel_in); + dp->panel = msm_dp_panel_get(&panel_in); if (IS_ERR(dp->panel)) { rc = PTR_ERR(dp->panel); DRM_ERROR("failed to initialize panel, rc = %d\n", rc); @@ -768,7 +777,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_link; } - dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, + dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, dp->catalog, phy); if (IS_ERR(dp->ctrl)) { @@ -778,7 +787,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_ctrl; } - dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog); + dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog); if (IS_ERR(dp->audio)) { rc = PTR_ERR(dp->audio); pr_err("failed to initialize audio, rc = %d\n", rc); @@ -789,51 +798,51 @@ static int dp_init_sub_modules(struct dp_display_private *dp) return rc; error_ctrl: - dp_panel_put(dp->panel); + msm_dp_panel_put(dp->panel); error_link: - dp_aux_put(dp->aux); + msm_dp_aux_put(dp->aux); error: return rc; } -static int dp_display_set_mode(struct msm_dp *dp_display, - struct dp_display_mode *mode) +static int msm_dp_display_set_mode(struct msm_dp *msm_dp_display, + struct msm_dp_display_mode *mode) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); - dp->panel->dp_mode.bpp = mode->bpp; - dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; - dp_panel_init_panel_info(dp->panel); + drm_mode_copy(&dp->panel->msm_dp_mode.drm_mode, &mode->drm_mode); + dp->panel->msm_dp_mode.bpp = mode->bpp; + dp->panel->msm_dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; + msm_dp_panel_init_panel_info(dp->panel); return 0; } -static int dp_display_enable(struct dp_display_private *dp, bool force_link_train) +static int msm_dp_display_enable(struct msm_dp_display_private *dp, bool force_link_train) { int rc = 0; - struct msm_dp *dp_display = &dp->dp_display; + struct msm_dp *msm_dp_display = &dp->msm_dp_display; drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count); - if (dp_display->power_on) { + if (msm_dp_display->power_on) { drm_dbg_dp(dp->drm_dev, "Link already setup, return\n"); return 0; } - rc = dp_ctrl_on_stream(dp->ctrl, force_link_train); + rc = msm_dp_ctrl_on_stream(dp->ctrl, force_link_train); if (!rc) - dp_display->power_on = true; + msm_dp_display->power_on = true; return rc; } -static int dp_display_post_enable(struct msm_dp *dp_display) +static int msm_dp_display_post_enable(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; u32 rate; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); rate = dp->link->link_params.rate; @@ -843,85 +852,85 @@ static int dp_display_post_enable(struct msm_dp *dp_display) } /* signal the connect event late to synchronize video and display */ - dp_display_handle_plugged_change(dp_display, true); + msm_dp_display_handle_plugged_change(msm_dp_display, true); - if (dp_display->psr_supported) - dp_ctrl_config_psr(dp->ctrl); + if (msm_dp_display->psr_supported) + msm_dp_ctrl_config_psr(dp->ctrl); return 0; } -static int dp_display_disable(struct dp_display_private *dp) +static int msm_dp_display_disable(struct msm_dp_display_private *dp) { - struct msm_dp *dp_display = &dp->dp_display; + struct msm_dp *msm_dp_display = &dp->msm_dp_display; - if (!dp_display->power_on) + if (!msm_dp_display->power_on) return 0; /* wait only if audio was enabled */ - if (dp_display->audio_enabled) { + if (msm_dp_display->audio_enabled) { /* signal the disconnect event */ - dp_display_handle_plugged_change(dp_display, false); + msm_dp_display_handle_plugged_change(msm_dp_display, false); if (!wait_for_completion_timeout(&dp->audio_comp, HZ * 5)) DRM_ERROR("audio comp timeout\n"); } - dp_display->audio_enabled = false; + msm_dp_display->audio_enabled = false; if (dp->link->sink_count == 0) { /* * irq_hpd with sink_count = 0 * hdmi unplugged out of dongle */ - dp_ctrl_off_link_stream(dp->ctrl); + msm_dp_ctrl_off_link_stream(dp->ctrl); } else { /* * unplugged interrupt * dongle unplugged out of DUT */ - dp_ctrl_off(dp->ctrl); - dp_display_host_phy_exit(dp); + msm_dp_ctrl_off(dp->ctrl); + msm_dp_display_host_phy_exit(dp); } - dp_display->power_on = false; + msm_dp_display->power_on = false; drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count); return 0; } -int dp_display_set_plugged_cb(struct msm_dp *dp_display, +int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev) { bool plugged; - dp_display->plugged_cb = fn; - dp_display->codec_dev = codec_dev; - plugged = dp_display->link_ready; - dp_display_handle_plugged_change(dp_display, plugged); + msm_dp_display->plugged_cb = fn; + msm_dp_display->codec_dev = codec_dev; + plugged = msm_dp_display->link_ready; + msm_dp_display_handle_plugged_change(msm_dp_display, plugged); return 0; } /** - * dp_bridge_mode_valid - callback to determine if specified mode is valid + * msm_dp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure * @info: display info * @mode: Pointer to drm mode structure * Returns: Validity status for specified mode */ -enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, +enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { const u32 num_components = 3, default_bpp = 24; - struct dp_display_private *dp_display; - struct dp_link_info *link_info; + struct msm_dp_display_private *msm_dp_display; + struct msm_dp_link_info *link_info; u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; struct msm_dp *dp; int mode_pclk_khz = mode->clock; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; if (!dp || !mode_pclk_khz || !dp->connector) { DRM_ERROR("invalid params\n"); @@ -931,18 +940,18 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; - dp_display = container_of(dp, struct dp_display_private, dp_display); - link_info = &dp_display->panel->link_info; + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + link_info = &msm_dp_display->panel->link_info; if (drm_mode_is_420_only(&dp->connector->display_info, mode) && - dp_display->panel->vsc_sdp_supported) + msm_dp_display->panel->vsc_sdp_supported) mode_pclk_khz /= 2; mode_bpp = dp->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; - mode_bpp = dp_panel_get_mode_bpp(dp_display->panel, + mode_bpp = msm_dp_panel_get_mode_bpp(msm_dp_display->panel, mode_bpp, mode_pclk_khz); mode_rate_khz = mode_pclk_khz * mode_bpp; @@ -954,50 +963,50 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -int dp_display_get_modes(struct msm_dp *dp) +int msm_dp_display_get_modes(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; if (!dp) { DRM_ERROR("invalid params\n"); return 0; } - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_panel_get_modes(dp_display->panel, + return msm_dp_panel_get_modes(msm_dp_display->panel, dp->connector); } -bool dp_display_check_video_test(struct msm_dp *dp) +bool msm_dp_display_check_video_test(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_display->panel->video_test; + return msm_dp_display->panel->video_test; } -int dp_display_get_test_bpp(struct msm_dp *dp) +int msm_dp_display_get_test_bpp(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; if (!dp) { DRM_ERROR("invalid params\n"); return 0; } - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_link_bit_depth_to_bpp( - dp_display->link->test_video.test_bit_depth); + return msm_dp_link_bit_depth_to_bpp( + msm_dp_display->link->test_video.test_bit_depth); } void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); /* * if we are reading registers we need the link clocks to be on @@ -1006,65 +1015,65 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) * power_on status before dumping DP registers to avoid crash due * to unclocked access */ - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); if (!dp->power_on) { - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - dp_catalog_snapshot(dp_display->catalog, disp_state); + msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_display_set_psr(struct msm_dp *dp_display, bool enter) +void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - if (!dp_display) { + if (!msm_dp_display) { DRM_ERROR("invalid params\n"); return; } - dp = container_of(dp_display, struct dp_display_private, dp_display); - dp_ctrl_set_psr(dp->ctrl, enter); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + msm_dp_ctrl_set_psr(dp->ctrl, enter); } static int hpd_event_thread(void *data) { - struct dp_display_private *dp_priv; + struct msm_dp_display_private *msm_dp_priv; unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; int timeout_mode = 0; - dp_priv = (struct dp_display_private *)data; + msm_dp_priv = (struct msm_dp_display_private *)data; while (1) { if (timeout_mode) { - wait_event_timeout(dp_priv->event_q, - (dp_priv->event_pndx == dp_priv->event_gndx) || + wait_event_timeout(msm_dp_priv->event_q, + (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) || kthread_should_stop(), EVENT_TIMEOUT); } else { - wait_event_interruptible(dp_priv->event_q, - (dp_priv->event_pndx != dp_priv->event_gndx) || + wait_event_interruptible(msm_dp_priv->event_q, + (msm_dp_priv->event_pndx != msm_dp_priv->event_gndx) || kthread_should_stop()); } if (kthread_should_stop()) break; - spin_lock_irqsave(&dp_priv->event_lock, flag); - todo = &dp_priv->event_list[dp_priv->event_gndx]; + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + todo = &msm_dp_priv->event_list[msm_dp_priv->event_gndx]; if (todo->delay) { - struct dp_event *todo_next; + struct msm_dp_event *todo_next; - dp_priv->event_gndx++; - dp_priv->event_gndx %= DP_EVENT_Q_MAX; + msm_dp_priv->event_gndx++; + msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX; /* re enter delay event into q */ - todo_next = &dp_priv->event_list[dp_priv->event_pndx++]; - dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo_next = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++]; + msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX; todo_next->event_id = todo->event_id; todo_next->data = todo->data; todo_next->delay = todo->delay - 1; @@ -1075,33 +1084,33 @@ static int hpd_event_thread(void *data) /* switch to timeout mode */ timeout_mode = 1; - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); continue; } /* timeout with no events in q */ - if (dp_priv->event_pndx == dp_priv->event_gndx) { - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) { + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); continue; } - dp_priv->event_gndx++; - dp_priv->event_gndx %= DP_EVENT_Q_MAX; + msm_dp_priv->event_gndx++; + msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX; timeout_mode = 0; - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); switch (todo->event_id) { case EV_HPD_PLUG_INT: - dp_hpd_plug_handle(dp_priv, todo->data); + msm_dp_hpd_plug_handle(msm_dp_priv, todo->data); break; case EV_HPD_UNPLUG_INT: - dp_hpd_unplug_handle(dp_priv, todo->data); + msm_dp_hpd_unplug_handle(msm_dp_priv, todo->data); break; case EV_IRQ_HPD_INT: - dp_irq_hpd_handle(dp_priv, todo->data); + msm_dp_irq_hpd_handle(msm_dp_priv, todo->data); break; case EV_USER_NOTIFICATION: - dp_display_send_hpd_notification(dp_priv, + msm_dp_display_send_hpd_notification(msm_dp_priv, todo->data); break; default: @@ -1112,22 +1121,22 @@ static int hpd_event_thread(void *data) return 0; } -static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv) +static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv) { /* set event q to empty */ - dp_priv->event_gndx = 0; - dp_priv->event_pndx = 0; + msm_dp_priv->event_gndx = 0; + msm_dp_priv->event_pndx = 0; - dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); - if (IS_ERR(dp_priv->ev_tsk)) - return PTR_ERR(dp_priv->ev_tsk); + msm_dp_priv->ev_tsk = kthread_run(hpd_event_thread, msm_dp_priv, "dp_hpd_handler"); + if (IS_ERR(msm_dp_priv->ev_tsk)) + return PTR_ERR(msm_dp_priv->ev_tsk); return 0; } -static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) +static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id) { - struct dp_display_private *dp = dev_id; + struct msm_dp_display_private *dp = dev_id; irqreturn_t ret = IRQ_NONE; u32 hpd_isr_status; @@ -1136,43 +1145,43 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) return IRQ_NONE; } - hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog); if (hpd_isr_status & 0x0F) { drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n", - dp->dp_display.connector_type, hpd_isr_status); + dp->msm_dp_display.connector_type, hpd_isr_status); /* hpd related interrupts */ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK) - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { - dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); + msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); } if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); } if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); ret = IRQ_HANDLED; } /* DP controller isr */ - ret |= dp_ctrl_isr(dp->ctrl); + ret |= msm_dp_ctrl_isr(dp->ctrl); /* DP aux isr */ - ret |= dp_aux_isr(dp->aux); + ret |= msm_dp_aux_isr(dp->aux); return ret; } -static int dp_display_request_irq(struct dp_display_private *dp) +static int msm_dp_display_request_irq(struct msm_dp_display_private *dp) { int rc = 0; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; dp->irq = platform_get_irq(pdev, 0); if (dp->irq < 0) { @@ -1180,7 +1189,7 @@ static int dp_display_request_irq(struct dp_display_private *dp) return dp->irq; } - rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler, + rc = devm_request_irq(&pdev->dev, dp->irq, msm_dp_display_irq_handler, IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN, "dp_display_isr", dp); @@ -1193,7 +1202,7 @@ static int dp_display_request_irq(struct dp_display_private *dp) return 0; } -static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev) +static const struct msm_dp_desc *msm_dp_display_get_desc(struct platform_device *pdev) { const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev); struct resource *res; @@ -1212,7 +1221,7 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde return NULL; } -static int dp_display_probe_tail(struct device *dev) +static int msm_dp_display_probe_tail(struct device *dev) { struct msm_dp *dp = dev_get_drvdata(dev); int ret; @@ -1232,19 +1241,19 @@ static int dp_display_probe_tail(struct device *dev) return ret; } - ret = component_add(dev, &dp_display_comp_ops); + ret = component_add(dev, &msm_dp_display_comp_ops); if (ret) DRM_ERROR("component add failed, rc=%d\n", ret); return ret; } -static int dp_auxbus_done_probe(struct drm_dp_aux *aux) +static int msm_dp_auxbus_done_probe(struct drm_dp_aux *aux) { - return dp_display_probe_tail(aux->dev); + return msm_dp_display_probe_tail(aux->dev); } -static int dp_display_get_connector_type(struct platform_device *pdev, +static int msm_dp_display_get_connector_type(struct platform_device *pdev, const struct msm_dp_desc *desc) { struct device_node *node = pdev->dev.of_node; @@ -1263,10 +1272,10 @@ static int dp_display_get_connector_type(struct platform_device *pdev, return connector_type; } -static int dp_display_probe(struct platform_device *pdev) +static int msm_dp_display_probe(struct platform_device *pdev) { int rc = 0; - struct dp_display_private *dp; + struct msm_dp_display_private *dp; const struct msm_dp_desc *desc; if (!pdev || !pdev->dev.of_node) { @@ -1278,18 +1287,18 @@ static int dp_display_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; - desc = dp_display_get_desc(pdev); + desc = msm_dp_display_get_desc(pdev); if (!desc) return -EINVAL; - dp->dp_display.pdev = pdev; + dp->msm_dp_display.pdev = pdev; dp->id = desc->id; - dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc); + dp->msm_dp_display.connector_type = msm_dp_display_get_connector_type(pdev, desc); dp->wide_bus_supported = desc->wide_bus_supported; - dp->dp_display.is_edp = - (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); + dp->msm_dp_display.is_edp = + (dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); - rc = dp_init_sub_modules(dp); + rc = msm_dp_init_sub_modules(dp); if (rc) { DRM_ERROR("init sub module failed\n"); return -EPROBE_DEFER; @@ -1301,28 +1310,28 @@ static int dp_display_probe(struct platform_device *pdev) spin_lock_init(&dp->event_lock); /* Store DP audio handle inside DP display */ - dp->dp_display.dp_audio = dp->audio; + dp->msm_dp_display.msm_dp_audio = dp->audio; init_completion(&dp->audio_comp); - platform_set_drvdata(pdev, &dp->dp_display); + platform_set_drvdata(pdev, &dp->msm_dp_display); rc = devm_pm_runtime_enable(&pdev->dev); if (rc) goto err; - rc = dp_display_request_irq(dp); + rc = msm_dp_display_request_irq(dp); if (rc) goto err; - if (dp->dp_display.is_edp) { - rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe); + if (dp->msm_dp_display.is_edp) { + rc = devm_of_dp_aux_populate_bus(dp->aux, msm_dp_auxbus_done_probe); if (rc) { DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc); goto err; } } else { - rc = dp_display_probe_tail(&pdev->dev); + rc = msm_dp_display_probe_tail(&pdev->dev); if (rc) goto err; } @@ -1330,70 +1339,70 @@ static int dp_display_probe(struct platform_device *pdev) return rc; err: - dp_display_deinit_sub_modules(dp); + msm_dp_display_deinit_sub_modules(dp); return rc; } -static void dp_display_remove(struct platform_device *pdev) +static void msm_dp_display_remove(struct platform_device *pdev) { - struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); - component_del(&pdev->dev, &dp_display_comp_ops); - dp_display_deinit_sub_modules(dp); + component_del(&pdev->dev, &msm_dp_display_comp_ops); + msm_dp_display_deinit_sub_modules(dp); platform_set_drvdata(pdev, NULL); } -static int dp_pm_runtime_suspend(struct device *dev) +static int msm_dp_pm_runtime_suspend(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); disable_irq(dp->irq); - if (dp->dp_display.is_edp) { - dp_display_host_phy_exit(dp); - dp_catalog_ctrl_hpd_disable(dp->catalog); + if (dp->msm_dp_display.is_edp) { + msm_dp_display_host_phy_exit(dp); + msm_dp_catalog_ctrl_hpd_disable(dp->catalog); } - dp_display_host_deinit(dp); + msm_dp_display_host_deinit(dp); return 0; } -static int dp_pm_runtime_resume(struct device *dev) +static int msm_dp_pm_runtime_resume(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); /* * for eDP, host cotroller, HPD block and PHY are enabled here * but with HPD irq disabled * * for DP, only host controller is enabled here. - * HPD block is enabled at dp_bridge_hpd_enable() + * HPD block is enabled at msm_dp_bridge_hpd_enable() * PHY will be enabled at plugin handler later */ - dp_display_host_init(dp); - if (dp->dp_display.is_edp) { - dp_catalog_ctrl_hpd_enable(dp->catalog); - dp_display_host_phy_init(dp); + msm_dp_display_host_init(dp); + if (dp->msm_dp_display.is_edp) { + msm_dp_catalog_ctrl_hpd_enable(dp->catalog); + msm_dp_display_host_phy_init(dp); } enable_irq(dp->irq); return 0; } -static const struct dev_pm_ops dp_pm_ops = { - SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL) +static const struct dev_pm_ops msm_dp_pm_ops = { + SET_RUNTIME_PM_OPS(msm_dp_pm_runtime_suspend, msm_dp_pm_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; -static struct platform_driver dp_display_driver = { - .probe = dp_display_probe, - .remove_new = dp_display_remove, +static struct platform_driver msm_dp_display_driver = { + .probe = msm_dp_display_probe, + .remove_new = msm_dp_display_remove, .driver = { .name = "msm-dp-display", - .of_match_table = dp_dt_match, + .of_match_table = msm_dp_dt_match, .suppress_bind_attrs = true, - .pm = &dp_pm_ops, + .pm = &msm_dp_pm_ops, }, }; @@ -1401,7 +1410,7 @@ int __init msm_dp_register(void) { int ret; - ret = platform_driver_register(&dp_display_driver); + ret = platform_driver_register(&msm_dp_display_driver); if (ret) DRM_ERROR("Dp display driver register failed"); @@ -1410,294 +1419,294 @@ int __init msm_dp_register(void) void __exit msm_dp_unregister(void) { - platform_driver_unregister(&dp_display_driver); + platform_driver_unregister(&msm_dp_display_driver); } -bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display, +bool msm_dp_is_yuv_420_enabled(const struct msm_dp *msm_dp_display, const struct drm_display_mode *mode) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; const struct drm_display_info *info; - dp = container_of(dp_display, struct dp_display_private, dp_display); - info = &dp_display->connector->display_info; + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + info = &msm_dp_display->connector->display_info; return dp->panel->vsc_sdp_supported && drm_mode_is_420_only(info, mode); } -bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display, +bool msm_dp_needs_periph_flush(const struct msm_dp *msm_dp_display, const struct drm_display_mode *mode) { - return msm_dp_is_yuv_420_enabled(dp_display, mode); + return msm_dp_is_yuv_420_enabled(msm_dp_display, mode); } -bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) +bool msm_dp_wide_bus_available(const struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - if (dp->dp_mode.out_fmt_is_yuv_420) + if (dp->msm_dp_mode.out_fmt_is_yuv_420) return false; return dp->wide_bus_supported; } -void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp) +void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *root, bool is_edp) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; struct device *dev; int rc; - dp = container_of(dp_display, struct dp_display_private, dp_display); - dev = &dp->dp_display.pdev->dev; + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + dev = &dp->msm_dp_display.pdev->dev; - rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp); + rc = msm_dp_debug_init(dev, dp->panel, dp->link, dp->msm_dp_display.connector, root, is_edp); if (rc) DRM_ERROR("failed to initialize debug, rc = %d\n", rc); } -int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, +int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev, struct drm_encoder *encoder, bool yuv_supported) { - struct dp_display_private *dp_priv; + struct msm_dp_display_private *msm_dp_priv; int ret; - dp_display->drm_dev = dev; + msm_dp_display->drm_dev = dev; - dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + msm_dp_priv = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - ret = dp_bridge_init(dp_display, dev, encoder); + ret = msm_dp_bridge_init(msm_dp_display, dev, encoder, yuv_supported); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to create dp bridge: %d\n", ret); return ret; } - dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported); - if (IS_ERR(dp_display->connector)) { - ret = PTR_ERR(dp_display->connector); + msm_dp_display->connector = msm_dp_drm_connector_init(msm_dp_display, encoder); + if (IS_ERR(msm_dp_display->connector)) { + ret = PTR_ERR(msm_dp_display->connector); DRM_DEV_ERROR(dev->dev, "failed to create dp connector: %d\n", ret); - dp_display->connector = NULL; + msm_dp_display->connector = NULL; return ret; } - dp_priv->panel->connector = dp_display->connector; + msm_dp_priv->panel->connector = msm_dp_display->connector; return 0; } -void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; int rc = 0; - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; u32 state; bool force_link_train = false; - dp_display = container_of(dp, struct dp_display_private, dp_display); - if (!dp_display->dp_mode.drm_mode.clock) { + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + if (!msm_dp_display->msm_dp_mode.drm_mode.clock) { DRM_ERROR("invalid params\n"); return; } if (dp->is_edp) - dp_hpd_plug_handle(dp_display, 0); + msm_dp_hpd_plug_handle(msm_dp_display, 0); - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); if (pm_runtime_resume_and_get(&dp->pdev->dev)) { DRM_ERROR("failed to pm_runtime_resume\n"); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - rc = dp_display_set_mode(dp, &dp_display->dp_mode); + rc = msm_dp_display_set_mode(dp, &msm_dp_display->msm_dp_mode); if (rc) { DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state == ST_DISPLAY_OFF) { - dp_display_host_phy_init(dp_display); + msm_dp_display_host_phy_init(msm_dp_display); force_link_train = true; } - dp_display_enable(dp_display, force_link_train); + msm_dp_display_enable(msm_dp_display, force_link_train); - rc = dp_display_post_enable(dp); + rc = msm_dp_display_post_enable(dp); if (rc) { DRM_ERROR("DP display post enable failed, rc=%d\n", rc); - dp_display_disable(dp_display); + msm_dp_display_disable(msm_dp_display); } /* completed connection */ - dp_display->hpd_state = ST_CONNECTED; + msm_dp_display->hpd_state = ST_CONNECTED; drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; - struct dp_display_private *dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - dp_ctrl_push_idle(dp_display->ctrl); + msm_dp_ctrl_push_idle(msm_dp_display->ctrl); } -void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; u32 state; - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); if (dp->is_edp) - dp_hpd_unplug_handle(dp_display, 0); + msm_dp_hpd_unplug_handle(msm_dp_display, 0); - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n", dp->connector_type, state); - dp_display_disable(dp_display); + msm_dp_display_disable(msm_dp_display); - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state == ST_DISCONNECT_PENDING) { /* completed disconnection */ - dp_display->hpd_state = ST_DISCONNECTED; + msm_dp_display->hpd_state = ST_DISCONNECTED; } else { - dp_display->hpd_state = ST_DISPLAY_OFF; + msm_dp_display->hpd_state = ST_DISPLAY_OFF; } drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); pm_runtime_put_sync(&dp->pdev->dev); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_bridge_mode_set(struct drm_bridge *drm_bridge, +void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; - struct dp_display_private *dp_display; - struct dp_panel *dp_panel; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *msm_dp_display; + struct msm_dp_panel *msm_dp_panel; - dp_display = container_of(dp, struct dp_display_private, dp_display); - dp_panel = dp_display->panel; + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + msm_dp_panel = msm_dp_display->panel; - memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode)); + memset(&msm_dp_display->msm_dp_mode, 0x0, sizeof(struct msm_dp_display_mode)); - if (dp_display_check_video_test(dp)) - dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp); + if (msm_dp_display_check_video_test(dp)) + msm_dp_display->msm_dp_mode.bpp = msm_dp_display_get_test_bpp(dp); else /* Default num_components per pixel = 3 */ - dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3; + msm_dp_display->msm_dp_mode.bpp = dp->connector->display_info.bpc * 3; - if (!dp_display->dp_mode.bpp) - dp_display->dp_mode.bpp = 24; /* Default bpp */ + if (!msm_dp_display->msm_dp_mode.bpp) + msm_dp_display->msm_dp_mode.bpp = 24; /* Default bpp */ - drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode); + drm_mode_copy(&msm_dp_display->msm_dp_mode.drm_mode, adjusted_mode); - dp_display->dp_mode.v_active_low = - !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); + msm_dp_display->msm_dp_mode.v_active_low = + !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); - dp_display->dp_mode.h_active_low = - !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); + msm_dp_display->msm_dp_mode.h_active_low = + !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); - dp_display->dp_mode.out_fmt_is_yuv_420 = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 = drm_mode_is_420_only(&dp->connector->display_info, adjusted_mode) && - dp_panel->vsc_sdp_supported; + msm_dp_panel->vsc_sdp_supported; /* populate wide_bus_support to different layers */ - dp_display->ctrl->wide_bus_en = - dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported; - dp_display->catalog->wide_bus_en = - dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported; + msm_dp_display->ctrl->wide_bus_en = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported; + msm_dp_display->catalog->wide_bus_en = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported; } -void dp_bridge_hpd_enable(struct drm_bridge *bridge) +void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* * this is for external DP with hpd irq enabled case, - * step-1: dp_pm_runtime_resume() enable dp host only + * step-1: msm_dp_pm_runtime_resume() enable dp host only * step-2: enable hdp block and have hpd irq enabled here * step-3: waiting for plugin irq while phy is not initialized * step-4: DP PHY is initialized at plugin handler before link training * */ mutex_lock(&dp->event_mutex); - if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) { + if (pm_runtime_resume_and_get(&msm_dp_display->pdev->dev)) { DRM_ERROR("failed to resume power\n"); mutex_unlock(&dp->event_mutex); return; } - dp_catalog_ctrl_hpd_enable(dp->catalog); + msm_dp_catalog_ctrl_hpd_enable(dp->catalog); /* enable HDP interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); + msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); - dp_display->internal_hpd = true; + msm_dp_display->internal_hpd = true; mutex_unlock(&dp->event_mutex); } -void dp_bridge_hpd_disable(struct drm_bridge *bridge) +void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); mutex_lock(&dp->event_mutex); /* disable HDP interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); - dp_catalog_ctrl_hpd_disable(dp->catalog); + msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + msm_dp_catalog_ctrl_hpd_disable(dp->catalog); - dp_display->internal_hpd = false; + msm_dp_display->internal_hpd = false; - pm_runtime_put_sync(&dp_display->pdev->dev); + pm_runtime_put_sync(&msm_dp_display->pdev->dev); mutex_unlock(&dp->event_mutex); } -void dp_bridge_hpd_notify(struct drm_bridge *bridge, +void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* Without next_bridge interrupts are handled by the DP core directly */ - if (dp_display->internal_hpd) + if (msm_dp_display->internal_hpd) return; - if (!dp_display->link_ready && status == connector_status_connected) - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); - else if (dp_display->link_ready && status == connector_status_disconnected) - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + if (!msm_dp_display->link_ready && status == connector_status_connected) + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + else if (msm_dp_display->link_ready && status == connector_status_disconnected) + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); } diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index ec7fa67e0569..ecbc2d92f546 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -27,18 +27,18 @@ struct msm_dp { hdmi_codec_plugged_cb plugged_cb; - struct dp_audio *dp_audio; + struct msm_dp_audio *msm_dp_audio; bool psr_supported; }; -int dp_display_set_plugged_cb(struct msm_dp *dp_display, +int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev); -int dp_display_get_modes(struct msm_dp *dp_display); -bool dp_display_check_video_test(struct msm_dp *dp_display); -int dp_display_get_test_bpp(struct msm_dp *dp_display); -void dp_display_signal_audio_start(struct msm_dp *dp_display); -void dp_display_signal_audio_complete(struct msm_dp *dp_display); -void dp_display_set_psr(struct msm_dp *dp, bool enter); -void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp); +int msm_dp_display_get_modes(struct msm_dp *msm_dp_display); +bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display); +int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display); +void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display); +void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display); +void msm_dp_display_set_psr(struct msm_dp *dp, bool enter); +void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *dentry, bool is_edp); #endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 1b9be5bd97f1..d3e241ea6941 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -14,15 +14,15 @@ #include "dp_drm.h" /** - * dp_bridge_detect - callback to determine if connector is connected + * msm_dp_bridge_detect - callback to determine if connector is connected * @bridge: Pointer to drm bridge structure * Returns: Bridge's 'is connected' status */ -static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge) { struct msm_dp *dp; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", (dp->link_ready) ? "true" : "false"); @@ -31,14 +31,14 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) connector_status_disconnected; } -static int dp_bridge_atomic_check(struct drm_bridge *bridge, +static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct msm_dp *dp; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", (dp->link_ready) ? "true" : "false"); @@ -62,12 +62,12 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge, /** - * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() + * msm_dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() * @bridge: Poiner to drm bridge * @connector: Pointer to drm connector structure * Returns: Number of modes added */ -static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) +static int msm_dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { int rc = 0; struct msm_dp *dp; @@ -75,11 +75,11 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * if (!connector) return 0; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; /* pluggable case assumes EDID is read when HPD */ if (dp->link_ready) { - rc = dp_display_get_modes(dp); + rc = msm_dp_display_get_modes(dp); if (rc <= 0) { DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); return rc; @@ -90,37 +90,37 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * return rc; } -static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +static void msm_dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { - struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display; - dp_display_debugfs_init(dp, root, false); + msm_dp_display_debugfs_init(dp, root, false); } -static const struct drm_bridge_funcs dp_bridge_ops = { +static const struct drm_bridge_funcs msm_dp_bridge_ops = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_enable = dp_bridge_atomic_enable, - .atomic_disable = dp_bridge_atomic_disable, - .atomic_post_disable = dp_bridge_atomic_post_disable, - .mode_set = dp_bridge_mode_set, - .mode_valid = dp_bridge_mode_valid, - .get_modes = dp_bridge_get_modes, - .detect = dp_bridge_detect, - .atomic_check = dp_bridge_atomic_check, - .hpd_enable = dp_bridge_hpd_enable, - .hpd_disable = dp_bridge_hpd_disable, - .hpd_notify = dp_bridge_hpd_notify, - .debugfs_init = dp_bridge_debugfs_init, + .atomic_enable = msm_dp_bridge_atomic_enable, + .atomic_disable = msm_dp_bridge_atomic_disable, + .atomic_post_disable = msm_dp_bridge_atomic_post_disable, + .mode_set = msm_dp_bridge_mode_set, + .mode_valid = msm_dp_bridge_mode_valid, + .get_modes = msm_dp_bridge_get_modes, + .detect = msm_dp_bridge_detect, + .atomic_check = msm_dp_bridge_atomic_check, + .hpd_enable = msm_dp_bridge_hpd_enable, + .hpd_disable = msm_dp_bridge_hpd_disable, + .hpd_notify = msm_dp_bridge_hpd_notify, + .debugfs_init = msm_dp_bridge_debugfs_init, }; -static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, +static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(drm_bridge)->msm_dp_display; if (WARN_ON(!conn_state)) return -ENODEV; @@ -136,18 +136,18 @@ static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, return 0; } -static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; /* * Check the old state of the crtc to determine if the panel - * was put into psr state previously by the edp_bridge_atomic_disable. + * was put into psr state previously by the msm_edp_bridge_atomic_disable. * If the panel is in psr, just exit psr state and skip the full * bridge enable sequence. */ @@ -159,21 +159,21 @@ static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); if (old_crtc_state && old_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, false); + msm_dp_display_set_psr(dp, false); return; } - dp_bridge_atomic_enable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state); } -static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL; - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, drm_bridge->encoder); @@ -194,24 +194,24 @@ static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, * If old crtc state is active, then this is a display disable * call while the sink is in psr state. So, exit psr here. * The eDP controller will be disabled in the - * edp_bridge_atomic_post_disable function. + * msm_edp_bridge_atomic_post_disable function. * * We observed sink is stuck in self refresh if psr exit is skipped * when display disable occurs while the sink is in psr state. */ if (new_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, true); + msm_dp_display_set_psr(dp, true); return; } else if (old_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, false); + msm_dp_display_set_psr(dp, false); return; } out: - dp_bridge_atomic_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state); } -static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; @@ -228,29 +228,29 @@ static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, return; /* - * Self refresh mode is already set in edp_bridge_atomic_disable. + * Self refresh mode is already set in msm_edp_bridge_atomic_disable. */ if (new_crtc_state->self_refresh_active) return; - dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); } /** - * edp_bridge_mode_valid - callback to determine if specified mode is valid + * msm_edp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure * @info: display info * @mode: Pointer to drm mode structure * Returns: Validity status for specified mode */ -static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, +static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct msm_dp *dp; int mode_pclk_khz = mode->clock; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; if (!dp || !mode_pclk_khz || !dp->connector) { DRM_ERROR("invalid params\n"); @@ -268,42 +268,43 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +static void msm_edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { - struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display; - dp_display_debugfs_init(dp, root, true); + msm_dp_display_debugfs_init(dp, root, true); } -static const struct drm_bridge_funcs edp_bridge_ops = { - .atomic_enable = edp_bridge_atomic_enable, - .atomic_disable = edp_bridge_atomic_disable, - .atomic_post_disable = edp_bridge_atomic_post_disable, - .mode_set = dp_bridge_mode_set, - .mode_valid = edp_bridge_mode_valid, +static const struct drm_bridge_funcs msm_edp_bridge_ops = { + .atomic_enable = msm_edp_bridge_atomic_enable, + .atomic_disable = msm_edp_bridge_atomic_disable, + .atomic_post_disable = msm_edp_bridge_atomic_post_disable, + .mode_set = msm_dp_bridge_mode_set, + .mode_valid = msm_edp_bridge_mode_valid, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_check = edp_bridge_atomic_check, - .debugfs_init = edp_bridge_debugfs_init, + .atomic_check = msm_edp_bridge_atomic_check, + .debugfs_init = msm_edp_bridge_debugfs_init, }; -int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, - struct drm_encoder *encoder) +int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, + struct drm_encoder *encoder, bool yuv_supported) { int rc; - struct msm_dp_bridge *dp_bridge; + struct msm_dp_bridge *msm_dp_bridge; struct drm_bridge *bridge; - dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL); - if (!dp_bridge) + msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL); + if (!msm_dp_bridge) return -ENOMEM; - dp_bridge->dp_display = dp_display; + msm_dp_bridge->msm_dp_display = msm_dp_display; - bridge = &dp_bridge->bridge; - bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops; - bridge->type = dp_display->connector_type; + bridge = &msm_dp_bridge->bridge; + bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops; + bridge->type = msm_dp_display->connector_type; + bridge->ycbcr_420_allowed = yuv_supported; /* * Many ops only make sense for DP. Why? @@ -316,7 +317,7 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, * allows the panel driver to properly power itself on to read the * modes. */ - if (!dp_display->is_edp) { + if (!msm_dp_display->is_edp) { bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_HPD | @@ -337,9 +338,9 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, return rc; } - if (dp_display->next_bridge) { + if (msm_dp_display->next_bridge) { rc = drm_bridge_attach(encoder, - dp_display->next_bridge, bridge, + msm_dp_display->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (rc < 0) { DRM_ERROR("failed to attach panel bridge: %d\n", rc); @@ -351,21 +352,18 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, } /* connector initialization */ -struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder, - bool yuv_supported) +struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display, + struct drm_encoder *encoder) { struct drm_connector *connector = NULL; - connector = drm_bridge_connector_init(dp_display->drm_dev, encoder); + connector = drm_bridge_connector_init(msm_dp_display->drm_dev, encoder); if (IS_ERR(connector)) return connector; - if (!dp_display->is_edp) + if (!msm_dp_display->is_edp) drm_connector_attach_dp_subconnector_property(connector); - if (yuv_supported) - connector->ycbcr_420_allowed = true; - drm_connector_attach_encoder(connector, encoder); return connector; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h index 45e57ac25a4d..8eae2f74839f 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.h +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -14,31 +14,32 @@ struct msm_dp_bridge { struct drm_bridge bridge; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; }; #define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge) -struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder, - bool yuv_supported); -int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, - struct drm_encoder *encoder); +struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display, + struct drm_encoder *encoder); +int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, + struct drm_encoder *encoder, + bool yuv_supported); -void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, +enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode); -void dp_bridge_mode_set(struct drm_bridge *drm_bridge, +void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode); -void dp_bridge_hpd_enable(struct drm_bridge *bridge); -void dp_bridge_hpd_disable(struct drm_bridge *bridge); -void dp_bridge_hpd_notify(struct drm_bridge *bridge, +void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge); +void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge); +void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status); #endif /* _DP_DRM_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index d8967615d84d..1a1fbb2d7d4f 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -28,25 +28,25 @@ enum audio_pattern_type { AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, }; -struct dp_link_request { +struct msm_dp_link_request { u32 test_requested; u32 test_link_rate; u32 test_lane_count; }; -struct dp_link_private { +struct msm_dp_link_private { u32 prev_sink_count; struct drm_device *drm_dev; struct drm_dp_aux *aux; - struct dp_link dp_link; + struct msm_dp_link msm_dp_link; - struct dp_link_request request; + struct msm_dp_link_request request; struct mutex psm_mutex; u8 link_status[DP_LINK_STATUS_SIZE]; }; -static int dp_aux_link_power_up(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_power_up(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 value; ssize_t len; @@ -73,8 +73,8 @@ static int dp_aux_link_power_up(struct drm_dp_aux *aux, return 0; } -static int dp_aux_link_power_down(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_power_down(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 value; int err; @@ -96,7 +96,7 @@ static int dp_aux_link_power_down(struct drm_dp_aux *aux, return 0; } -static int dp_link_get_period(struct dp_link_private *link, int const addr) +static int msm_dp_link_get_period(struct msm_dp_link_private *link, int const addr) { int ret = 0; u8 data; @@ -122,19 +122,19 @@ exit: return ret; } -static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +static int msm_dp_link_parse_audio_channel_period(struct msm_dp_link_private *link) { int ret = 0; - struct dp_link_test_audio *req = &link->dp_link.test_audio; + struct msm_dp_link_test_audio *req = &link->msm_dp_link.test_audio; - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_1 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); if (ret == -EINVAL) goto exit; @@ -142,42 +142,42 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link) drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret); /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_3 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_4 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_5 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_6 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_7 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); if (ret == -EINVAL) goto exit; @@ -187,7 +187,7 @@ exit: return ret; } -static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +static int msm_dp_link_parse_audio_pattern_type(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -208,13 +208,13 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) goto exit; } - link->dp_link.test_audio.test_audio_pattern_type = data; + link->msm_dp_link.test_audio.test_audio_pattern_type = data; drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data); exit: return ret; } -static int dp_link_parse_audio_mode(struct dp_link_private *link) +static int msm_dp_link_parse_audio_mode(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -248,8 +248,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link) goto exit; } - link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; - link->dp_link.test_audio.test_audio_channel_count = channel_count; + link->msm_dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->msm_dp_link.test_audio.test_audio_channel_count = channel_count; drm_dbg_dp(link->drm_dev, "sampling_rate = 0x%x, channel_count = 0x%x\n", sampling_rate, channel_count); @@ -257,25 +257,25 @@ exit: return ret; } -static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +static int msm_dp_link_parse_audio_pattern_params(struct msm_dp_link_private *link) { int ret = 0; - ret = dp_link_parse_audio_mode(link); + ret = msm_dp_link_parse_audio_mode(link); if (ret) goto exit; - ret = dp_link_parse_audio_pattern_type(link); + ret = msm_dp_link_parse_audio_pattern_type(link); if (ret) goto exit; - ret = dp_link_parse_audio_channel_period(link); + ret = msm_dp_link_parse_audio_channel_period(link); exit: return ret; } -static bool dp_link_is_video_pattern_valid(u32 pattern) +static bool msm_dp_link_is_video_pattern_valid(u32 pattern) { switch (pattern) { case DP_NO_TEST_PATTERN: @@ -289,12 +289,12 @@ static bool dp_link_is_video_pattern_valid(u32 pattern) } /** - * dp_link_is_bit_depth_valid() - validates the bit depth requested + * msm_dp_link_is_bit_depth_valid() - validates the bit depth requested * @tbd: bit depth requested by the sink * * Returns true if the requested bit depth is supported. */ -static bool dp_link_is_bit_depth_valid(u32 tbd) +static bool msm_dp_link_is_bit_depth_valid(u32 tbd) { /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ switch (tbd) { @@ -307,7 +307,7 @@ static bool dp_link_is_bit_depth_valid(u32 tbd) } } -static int dp_link_parse_timing_params1(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params1(struct msm_dp_link_private *link, int addr, int len, u32 *val) { u8 bp[2]; @@ -328,7 +328,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link, return 0; } -static int dp_link_parse_timing_params2(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params2(struct msm_dp_link_private *link, int addr, int len, u32 *val1, u32 *val2) { @@ -351,7 +351,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link, return 0; } -static int dp_link_parse_timing_params3(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params3(struct msm_dp_link_private *link, int addr, u32 *val) { u8 bp; @@ -369,13 +369,13 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link, } /** - * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD + * msm_dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD * @link: Display Port Driver data * * Returns 0 if it successfully parses the video link pattern and the link * bit depth requested by the sink and, and if the values parsed are valid. */ -static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +static int msm_dp_link_parse_video_pattern_params(struct msm_dp_link_private *link) { int ret = 0; ssize_t rlen; @@ -388,13 +388,13 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) return rlen; } - if (!dp_link_is_video_pattern_valid(bp)) { + if (!msm_dp_link_is_video_pattern_valid(bp)) { DRM_ERROR("invalid link video pattern = 0x%x\n", bp); ret = -EINVAL; return ret; } - link->dp_link.test_video.test_video_pattern = bp; + link->msm_dp_link.test_video.test_video_pattern = bp; /* Read the requested color bit depth and dynamic range (Byte 0x232) */ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp); @@ -404,88 +404,88 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) } /* Dynamic Range */ - link->dp_link.test_video.test_dyn_range = + link->msm_dp_link.test_video.test_dyn_range = (bp & DP_TEST_DYNAMIC_RANGE_CEA); /* Color bit depth */ bp &= DP_TEST_BIT_DEPTH_MASK; - if (!dp_link_is_bit_depth_valid(bp)) { + if (!msm_dp_link_is_bit_depth_valid(bp)) { DRM_ERROR("invalid link bit depth = 0x%x\n", bp); ret = -EINVAL; return ret; } - link->dp_link.test_video.test_bit_depth = bp; + link->msm_dp_link.test_video.test_bit_depth = bp; /* resolution timing params */ - ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, - &link->dp_link.test_video.test_h_total); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->msm_dp_link.test_video.test_h_total); if (ret) { DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, - &link->dp_link.test_video.test_v_total); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->msm_dp_link.test_video.test_v_total); if (ret) { DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, - &link->dp_link.test_video.test_h_start); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->msm_dp_link.test_video.test_h_start); if (ret) { DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, - &link->dp_link.test_video.test_v_start); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->msm_dp_link.test_video.test_v_start); if (ret) { DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n"); return ret; } - ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, - &link->dp_link.test_video.test_hsync_pol, - &link->dp_link.test_video.test_hsync_width); + ret = msm_dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->msm_dp_link.test_video.test_hsync_pol, + &link->msm_dp_link.test_video.test_hsync_width); if (ret) { DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n"); return ret; } - ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, - &link->dp_link.test_video.test_vsync_pol, - &link->dp_link.test_video.test_vsync_width); + ret = msm_dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->msm_dp_link.test_video.test_vsync_pol, + &link->msm_dp_link.test_video.test_vsync_width); if (ret) { DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, - &link->dp_link.test_video.test_h_width); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->msm_dp_link.test_video.test_h_width); if (ret) { DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, - &link->dp_link.test_video.test_v_height); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->msm_dp_link.test_video.test_v_height); if (ret) { DRM_ERROR("failed to parse test_v_height\n"); return ret; } - ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, - &link->dp_link.test_video.test_rr_d); - link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + ret = msm_dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->msm_dp_link.test_video.test_rr_d); + link->msm_dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; if (ret) { DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); return ret; } - ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, - &link->dp_link.test_video.test_rr_n); + ret = msm_dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->msm_dp_link.test_video.test_rr_n); if (ret) { DRM_ERROR("failed to parse test_rr_n\n"); return ret; @@ -505,34 +505,34 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) "TEST_V_HEIGHT = %d\n" "TEST_REFRESH_DENOMINATOR = %d\n" "TEST_REFRESH_NUMERATOR = %d\n", - link->dp_link.test_video.test_video_pattern, - link->dp_link.test_video.test_dyn_range, - link->dp_link.test_video.test_bit_depth, - link->dp_link.test_video.test_h_total, - link->dp_link.test_video.test_v_total, - link->dp_link.test_video.test_h_start, - link->dp_link.test_video.test_v_start, - link->dp_link.test_video.test_hsync_pol, - link->dp_link.test_video.test_hsync_width, - link->dp_link.test_video.test_vsync_pol, - link->dp_link.test_video.test_vsync_width, - link->dp_link.test_video.test_h_width, - link->dp_link.test_video.test_v_height, - link->dp_link.test_video.test_rr_d, - link->dp_link.test_video.test_rr_n); + link->msm_dp_link.test_video.test_video_pattern, + link->msm_dp_link.test_video.test_dyn_range, + link->msm_dp_link.test_video.test_bit_depth, + link->msm_dp_link.test_video.test_h_total, + link->msm_dp_link.test_video.test_v_total, + link->msm_dp_link.test_video.test_h_start, + link->msm_dp_link.test_video.test_v_start, + link->msm_dp_link.test_video.test_hsync_pol, + link->msm_dp_link.test_video.test_hsync_width, + link->msm_dp_link.test_video.test_vsync_pol, + link->msm_dp_link.test_video.test_vsync_width, + link->msm_dp_link.test_video.test_h_width, + link->msm_dp_link.test_video.test_v_height, + link->msm_dp_link.test_video.test_rr_d, + link->msm_dp_link.test_video.test_rr_n); return ret; } /** - * dp_link_parse_link_training_params() - parses link training parameters from + * msm_dp_link_parse_link_training_params() - parses link training parameters from * DPCD * @link: Display Port Driver data * * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane * count (Byte 0x220), and if these values parse are valid. */ -static int dp_link_parse_link_training_params(struct dp_link_private *link) +static int msm_dp_link_parse_link_training_params(struct msm_dp_link_private *link) { u8 bp; ssize_t rlen; @@ -571,13 +571,13 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link) } /** - * dp_link_parse_phy_test_params() - parses the phy link parameters + * msm_dp_link_parse_phy_test_params() - parses the phy link parameters * @link: Display Port Driver data * * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being * requested. */ -static int dp_link_parse_phy_test_params(struct dp_link_private *link) +static int msm_dp_link_parse_phy_test_params(struct msm_dp_link_private *link) { u8 data; ssize_t rlen; @@ -589,7 +589,7 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link) return rlen; } - link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07; + link->msm_dp_link.phy_params.phy_test_pattern_sel = data & 0x07; drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data); @@ -608,12 +608,12 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link) } /** - * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * msm_dp_link_is_video_audio_test_requested() - checks for audio/video link request * @link: link requested by the sink * * Returns true if the requested link is a permitted audio/video link. */ -static bool dp_link_is_video_audio_test_requested(u32 link) +static bool msm_dp_link_is_video_audio_test_requested(u32 link) { u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN | DP_TEST_LINK_AUDIO_PATTERN | @@ -624,13 +624,13 @@ static bool dp_link_is_video_audio_test_requested(u32 link) } /** - * dp_link_parse_request() - parses link request parameters from sink + * msm_dp_link_parse_request() - parses link request parameters from sink * @link: Display Port Driver data * * Parses the DPCD to check if an automated link is requested (Byte 0x201), * and what type of link automation is being requested (Byte 0x218). */ -static int dp_link_parse_request(struct dp_link_private *link) +static int msm_dp_link_parse_request(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -672,27 +672,27 @@ static int dp_link_parse_request(struct dp_link_private *link) drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data); link->request.test_requested = data; if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { - ret = dp_link_parse_phy_test_params(link); + ret = msm_dp_link_parse_phy_test_params(link); if (ret) goto end; - ret = dp_link_parse_link_training_params(link); + ret = msm_dp_link_parse_link_training_params(link); if (ret) goto end; } if (link->request.test_requested == DP_TEST_LINK_TRAINING) { - ret = dp_link_parse_link_training_params(link); + ret = msm_dp_link_parse_link_training_params(link); if (ret) goto end; } - if (dp_link_is_video_audio_test_requested( + if (msm_dp_link_is_video_audio_test_requested( link->request.test_requested)) { - ret = dp_link_parse_video_pattern_params(link); + ret = msm_dp_link_parse_video_pattern_params(link); if (ret) goto end; - ret = dp_link_parse_audio_pattern_params(link); + ret = msm_dp_link_parse_audio_pattern_params(link); } end: /* @@ -700,29 +700,29 @@ end: * a DP_TEST_NAK. */ if (ret) { - link->dp_link.test_response = DP_TEST_NAK; + link->msm_dp_link.test_response = DP_TEST_NAK; } else { if (link->request.test_requested != DP_TEST_LINK_EDID_READ) - link->dp_link.test_response = DP_TEST_ACK; + link->msm_dp_link.test_response = DP_TEST_ACK; else - link->dp_link.test_response = + link->msm_dp_link.test_response = DP_TEST_EDID_CHECKSUM_WRITE; } return ret; } -static int dp_link_parse_sink_status_field(struct dp_link_private *link) +static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link) { int len; - link->prev_sink_count = link->dp_link.sink_count; + link->prev_sink_count = link->msm_dp_link.sink_count; len = drm_dp_read_sink_count(link->aux); if (len < 0) { DRM_ERROR("DP parse sink count failed\n"); return len; } - link->dp_link.sink_count = len; + link->msm_dp_link.sink_count = len; len = drm_dp_dpcd_read_link_status(link->aux, link->link_status); @@ -731,11 +731,11 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link) return len; } - return dp_link_parse_request(link); + return msm_dp_link_parse_request(link); } /** - * dp_link_process_link_training_request() - processes new training requests + * msm_dp_link_process_link_training_request() - processes new training requests * @link: Display Port link data * * This function will handle new link training requests that are initiated by @@ -745,7 +745,7 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link) * The function will return 0 if a link training request has been processed, * otherwise it will return -EINVAL. */ -static int dp_link_process_link_training_request(struct dp_link_private *link) +static int msm_dp_link_process_link_training_request(struct msm_dp_link_private *link) { if (link->request.test_requested != DP_TEST_LINK_TRAINING) return -EINVAL; @@ -756,49 +756,49 @@ static int dp_link_process_link_training_request(struct dp_link_private *link) link->request.test_link_rate, link->request.test_lane_count); - link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count; + link->msm_dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); return 0; } -bool dp_link_send_test_response(struct dp_link *dp_link) +bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return false; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE, - dp_link->test_response); + msm_dp_link->test_response); return ret == 1; } -int dp_link_psm_config(struct dp_link *dp_link, - struct dp_link_info *link_info, bool enable) +int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link, + struct msm_dp_link_info *link_info, bool enable) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid params\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); mutex_lock(&link->psm_mutex); if (enable) - ret = dp_aux_link_power_down(link->aux, link_info); + ret = msm_dp_aux_link_power_down(link->aux, link_info); else - ret = dp_aux_link_power_up(link->aux, link_info); + ret = msm_dp_aux_link_power_up(link->aux, link_info); if (ret) DRM_ERROR("Failed to %s low power mode\n", enable ? @@ -808,24 +808,24 @@ int dp_link_psm_config(struct dp_link *dp_link, return ret; } -bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return false; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM, checksum); return ret == 1; } -static void dp_link_parse_vx_px(struct dp_link_private *link) +static void msm_dp_link_parse_vx_px(struct msm_dp_link_private *link) { drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n", drm_dp_get_adjust_request_voltage(link->link_status, 0), @@ -845,31 +845,31 @@ static void dp_link_parse_vx_px(struct dp_link_private *link) */ drm_dbg_dp(link->drm_dev, "Current: v_level = 0x%x, p_level = 0x%x\n", - link->dp_link.phy_params.v_level, - link->dp_link.phy_params.p_level); - link->dp_link.phy_params.v_level = + link->msm_dp_link.phy_params.v_level, + link->msm_dp_link.phy_params.p_level); + link->msm_dp_link.phy_params.v_level = drm_dp_get_adjust_request_voltage(link->link_status, 0); - link->dp_link.phy_params.p_level = + link->msm_dp_link.phy_params.p_level = drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); - link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + link->msm_dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; drm_dbg_dp(link->drm_dev, "Requested: v_level = 0x%x, p_level = 0x%x\n", - link->dp_link.phy_params.v_level, - link->dp_link.phy_params.p_level); + link->msm_dp_link.phy_params.v_level, + link->msm_dp_link.phy_params.p_level); } /** - * dp_link_process_phy_test_pattern_request() - process new phy link requests + * msm_dp_link_process_phy_test_pattern_request() - process new phy link requests * @link: Display Port Driver data * * This function will handle new phy link pattern requests that are initiated * by the sink. The function will return 0 if a phy link pattern has been * processed, otherwise it will return -EINVAL. */ -static int dp_link_process_phy_test_pattern_request( - struct dp_link_private *link) +static int msm_dp_link_process_phy_test_pattern_request( + struct msm_dp_link_private *link) { if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { drm_dbg_dp(link->drm_dev, "no phy test\n"); @@ -886,24 +886,24 @@ static int dp_link_process_phy_test_pattern_request( drm_dbg_dp(link->drm_dev, "Current: rate = 0x%x, lane count = 0x%x\n", - link->dp_link.link_params.rate, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.rate, + link->msm_dp_link.link_params.num_lanes); drm_dbg_dp(link->drm_dev, "Requested: rate = 0x%x, lane count = 0x%x\n", link->request.test_link_rate, link->request.test_lane_count); - link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count; + link->msm_dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); - dp_link_parse_vx_px(link); + msm_dp_link_parse_vx_px(link); return 0; } -static bool dp_link_read_psr_error_status(struct dp_link_private *link) +static bool msm_dp_link_read_psr_error_status(struct msm_dp_link_private *link) { u8 status; @@ -921,7 +921,7 @@ static bool dp_link_read_psr_error_status(struct dp_link_private *link) return true; } -static bool dp_link_psr_capability_changed(struct dp_link_private *link) +static bool msm_dp_link_psr_capability_changed(struct msm_dp_link_private *link) { u8 status; @@ -941,7 +941,7 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) } /** - * dp_link_process_link_status_update() - processes link status updates + * msm_dp_link_process_link_status_update() - processes link status updates * @link: Display Port link module data * * This function will check for changes in the link status, e.g. clock @@ -951,13 +951,13 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) * The function will return 0 if the a link status update has been processed, * otherwise it will return -EINVAL. */ -static int dp_link_process_link_status_update(struct dp_link_private *link) +static int msm_dp_link_process_link_status_update(struct msm_dp_link_private *link) { bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.num_lanes); bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.num_lanes); drm_dbg_dp(link->drm_dev, "channel_eq_done = %d, clock_recovery_done = %d\n", @@ -970,7 +970,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) } /** - * dp_link_process_ds_port_status_change() - process port status changes + * msm_dp_link_process_ds_port_status_change() - process port status changes * @link: Display Port Driver data * * This function will handle downstream port updates that are initiated by @@ -980,122 +980,122 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) * The function will return 0 if a downstream port update has been * processed, otherwise it will return -EINVAL. */ -static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +static int msm_dp_link_process_ds_port_status_change(struct msm_dp_link_private *link) { if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & DP_DOWNSTREAM_PORT_STATUS_CHANGED) goto reset; - if (link->prev_sink_count == link->dp_link.sink_count) + if (link->prev_sink_count == link->msm_dp_link.sink_count) return -EINVAL; reset: /* reset prev_sink_count */ - link->prev_sink_count = link->dp_link.sink_count; + link->prev_sink_count = link->msm_dp_link.sink_count; return 0; } -static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +static bool msm_dp_link_is_video_pattern_requested(struct msm_dp_link_private *link) { return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) && !(link->request.test_requested & DP_TEST_LINK_AUDIO_DISABLED_VIDEO); } -static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +static bool msm_dp_link_is_audio_pattern_requested(struct msm_dp_link_private *link) { return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); } -static void dp_link_reset_data(struct dp_link_private *link) +static void msm_dp_link_reset_data(struct msm_dp_link_private *link) { - link->request = (const struct dp_link_request){ 0 }; - link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; - link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; - link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; - link->dp_link.phy_params.phy_test_pattern_sel = 0; - link->dp_link.sink_request = 0; - link->dp_link.test_response = 0; + link->request = (const struct msm_dp_link_request){ 0 }; + link->msm_dp_link.test_video = (const struct msm_dp_link_test_video){ 0 }; + link->msm_dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->msm_dp_link.test_audio = (const struct msm_dp_link_test_audio){ 0 }; + link->msm_dp_link.phy_params.phy_test_pattern_sel = 0; + link->msm_dp_link.sink_request = 0; + link->msm_dp_link.test_response = 0; } /** - * dp_link_process_request() - handle HPD IRQ transition to HIGH - * @dp_link: pointer to link module data + * msm_dp_link_process_request() - handle HPD IRQ transition to HIGH + * @msm_dp_link: pointer to link module data * * This function will handle the HPD IRQ state transitions from LOW to HIGH * (including cases when there are back to back HPD IRQ HIGH) indicating * the start of a new link training request or sink status update. */ -int dp_link_process_request(struct dp_link *dp_link) +int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link) { int ret = 0; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); - dp_link_reset_data(link); + msm_dp_link_reset_data(link); - ret = dp_link_parse_sink_status_field(link); + ret = msm_dp_link_parse_sink_status_field(link); if (ret) return ret; if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { - dp_link->sink_request |= DP_TEST_LINK_EDID_READ; - } else if (!dp_link_process_ds_port_status_change(link)) { - dp_link->sink_request |= DS_PORT_STATUS_CHANGED; - } else if (!dp_link_process_link_training_request(link)) { - dp_link->sink_request |= DP_TEST_LINK_TRAINING; - } else if (!dp_link_process_phy_test_pattern_request(link)) { - dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; - } else if (dp_link_read_psr_error_status(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + } else if (!msm_dp_link_process_ds_port_status_change(link)) { + msm_dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + } else if (!msm_dp_link_process_link_training_request(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_TRAINING; + } else if (!msm_dp_link_process_phy_test_pattern_request(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + } else if (msm_dp_link_read_psr_error_status(link)) { DRM_ERROR("PSR IRQ_HPD received\n"); - } else if (dp_link_psr_capability_changed(link)) { + } else if (msm_dp_link_psr_capability_changed(link)) { drm_dbg_dp(link->drm_dev, "PSR Capability changed\n"); } else { - ret = dp_link_process_link_status_update(link); + ret = msm_dp_link_process_link_status_update(link); if (!ret) { - dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + msm_dp_link->sink_request |= DP_LINK_STATUS_UPDATED; } else { - if (dp_link_is_video_pattern_requested(link)) { + if (msm_dp_link_is_video_pattern_requested(link)) { ret = 0; - dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + msm_dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; } - if (dp_link_is_audio_pattern_requested(link)) { - dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + if (msm_dp_link_is_audio_pattern_requested(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; ret = -EINVAL; } } } drm_dbg_dp(link->drm_dev, "sink request=%#x\n", - dp_link->sink_request); + msm_dp_link->sink_request); return ret; } -int dp_link_get_colorimetry_config(struct dp_link *dp_link) +int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link) { u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* * Unless a video pattern CTS test is ongoing, use RGB_VESA * Only RGB_VESA and RGB_CEA supported for now */ - if (dp_link_is_video_pattern_requested(link)) { - if (link->dp_link.test_video.test_dyn_range & + if (msm_dp_link_is_video_pattern_requested(link)) { + if (link->msm_dp_link.test_video.test_dyn_range & DP_TEST_DYNAMIC_RANGE_CEA) cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB; } @@ -1103,22 +1103,22 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link) return cc; } -int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status) { int i; u8 max_p_level; int v_max = 0, p_max = 0; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* use the max level across lanes */ - for (i = 0; i < dp_link->link_params.num_lanes; i++) { + for (i = 0; i < msm_dp_link->link_params.num_lanes; i++) { u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i); u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status, i); @@ -1131,56 +1131,56 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) p_max = data_p; } - dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; - dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + msm_dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + msm_dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; /** * Adjust the voltage swing and pre-emphasis level combination to within * the allowable range. */ - if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { + if (msm_dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested vSwingLevel=%d, change to %d\n", - dp_link->phy_params.v_level, + msm_dp_link->phy_params.v_level, DP_TRAIN_LEVEL_MAX); - dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; + msm_dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; } - if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { + if (msm_dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", - dp_link->phy_params.p_level, + msm_dp_link->phy_params.p_level, DP_TRAIN_LEVEL_MAX); - dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; + msm_dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; } - max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level; - if (dp_link->phy_params.p_level > max_p_level) { + max_p_level = DP_TRAIN_LEVEL_MAX - msm_dp_link->phy_params.v_level; + if (msm_dp_link->phy_params.p_level > max_p_level) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", - dp_link->phy_params.p_level, + msm_dp_link->phy_params.p_level, max_p_level); - dp_link->phy_params.p_level = max_p_level; + msm_dp_link->phy_params.p_level = max_p_level; } drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n", - dp_link->phy_params.v_level, dp_link->phy_params.p_level); + msm_dp_link->phy_params.v_level, msm_dp_link->phy_params.p_level); return 0; } -void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) +void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link) { - dp_link->phy_params.v_level = 0; - dp_link->phy_params.p_level = 0; + msm_dp_link->phy_params.v_level = 0; + msm_dp_link->phy_params.p_level = 0; } -u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp) { u32 tbd; - struct dp_link_private *link; + struct msm_dp_link_private *link; - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* * Few simplistic rules and assumptions made here: @@ -1209,10 +1209,10 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) return tbd; } -struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) +struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux) { - struct dp_link_private *link; - struct dp_link *dp_link; + struct msm_dp_link_private *link; + struct msm_dp_link *msm_dp_link; if (!dev || !aux) { DRM_ERROR("invalid input\n"); @@ -1226,7 +1226,7 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) link->aux = aux; mutex_init(&link->psm_mutex); - dp_link = &link->dp_link; + msm_dp_link = &link->msm_dp_link; - return dp_link; + return msm_dp_link; } diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 5846337bb56f..8db5d5698a97 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -12,7 +12,7 @@ #define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF #define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) -struct dp_link_info { +struct msm_dp_link_info { unsigned char revision; unsigned int rate; unsigned int num_lanes; @@ -21,7 +21,7 @@ struct dp_link_info { #define DP_TRAIN_LEVEL_MAX 3 -struct dp_link_test_video { +struct msm_dp_link_test_video { u32 test_video_pattern; u32 test_bit_depth; u32 test_dyn_range; @@ -39,7 +39,7 @@ struct dp_link_test_video { u32 test_rr_n; }; -struct dp_link_test_audio { +struct msm_dp_link_test_audio { u32 test_audio_sampling_rate; u32 test_audio_channel_count; u32 test_audio_pattern_type; @@ -53,21 +53,21 @@ struct dp_link_test_audio { u32 test_audio_period_ch_8; }; -struct dp_link_phy_params { +struct msm_dp_link_phy_params { u32 phy_test_pattern_sel; u8 v_level; u8 p_level; }; -struct dp_link { +struct msm_dp_link { u32 sink_request; u32 test_response; u8 sink_count; - struct dp_link_test_video test_video; - struct dp_link_test_audio test_audio; - struct dp_link_phy_params phy_params; - struct dp_link_info link_params; + struct msm_dp_link_test_video test_video; + struct msm_dp_link_test_audio test_audio; + struct msm_dp_link_phy_params phy_params; + struct msm_dp_link_info link_params; }; /** @@ -78,7 +78,7 @@ struct dp_link { * git bit depth value. This function assumes that bit depth has * already been validated. */ -static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd) { /* * Few simplistic rules and assumptions made here: @@ -99,22 +99,22 @@ static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) } } -void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link); -u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); -int dp_link_process_request(struct dp_link *dp_link); -int dp_link_get_colorimetry_config(struct dp_link *dp_link); -int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status); -bool dp_link_send_test_response(struct dp_link *dp_link); -int dp_link_psm_config(struct dp_link *dp_link, - struct dp_link_info *link_info, bool enable); -bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum); +void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link); +u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp); +int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link); +int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link); +int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status); +bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link); +int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link, + struct msm_dp_link_info *link_info, bool enable); +bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum); /** - * dp_link_get() - get the functionalities of dp test module + * msm_dp_link_get() - get the functionalities of dp test module * * - * return: a pointer to dp_link struct + * return: a pointer to msm_dp_link struct */ -struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux); +struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux); #endif /* _DP_LINK_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 6ff6c9ef351f..5d7eaa31bf31 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -14,52 +14,52 @@ #define DP_MAX_NUM_DP_LANES 4 #define DP_LINK_RATE_HBR2 540000 /* kbytes */ -struct dp_panel_private { +struct msm_dp_panel_private { struct device *dev; struct drm_device *drm_dev; - struct dp_panel dp_panel; + struct msm_dp_panel msm_dp_panel; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; bool panel_on; }; -static void dp_panel_read_psr_cap(struct dp_panel_private *panel) +static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel) { ssize_t rlen; - struct dp_panel *dp_panel; + struct msm_dp_panel *msm_dp_panel; - dp_panel = &panel->dp_panel; + msm_dp_panel = &panel->msm_dp_panel; /* edp sink */ - if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { + if (msm_dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT, - &dp_panel->psr_cap, sizeof(dp_panel->psr_cap)); - if (rlen == sizeof(dp_panel->psr_cap)) { + &msm_dp_panel->psr_cap, sizeof(msm_dp_panel->psr_cap)); + if (rlen == sizeof(msm_dp_panel->psr_cap)) { drm_dbg_dp(panel->drm_dev, "psr version: 0x%x, psr_cap: 0x%x\n", - dp_panel->psr_cap.version, - dp_panel->psr_cap.capabilities); + msm_dp_panel->psr_cap.version, + msm_dp_panel->psr_cap.capabilities); } else DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen); } } -static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) { int rc; - struct dp_panel_private *panel; - struct dp_link_info *link_info; + struct msm_dp_panel_private *panel; + struct msm_dp_link_info *link_info; u8 *dpcd, major, minor; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); - dpcd = dp_panel->dpcd; + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); + dpcd = msm_dp_panel->dpcd; rc = drm_dp_read_dpcd_caps(panel->aux, dpcd); if (rc) return rc; - dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd); - link_info = &dp_panel->link_info; + msm_dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd); + link_info = &msm_dp_panel->link_info; link_info->revision = dpcd[DP_DPCD_REV]; major = (link_info->revision >> 4) & 0x0f; minor = link_info->revision & 0x0f; @@ -68,12 +68,12 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) link_info->num_lanes = drm_dp_max_lane_count(dpcd); /* Limit data lanes from data-lanes of endpoint property of dtsi */ - if (link_info->num_lanes > dp_panel->max_dp_lanes) - link_info->num_lanes = dp_panel->max_dp_lanes; + if (link_info->num_lanes > msm_dp_panel->max_dp_lanes) + link_info->num_lanes = msm_dp_panel->max_dp_lanes; /* Limit link rate from link-frequencies of endpoint property of dtsi */ - if (link_info->rate > dp_panel->max_dp_link_rate) - link_info->rate = dp_panel->max_dp_link_rate; + if (link_info->rate > msm_dp_panel->max_dp_link_rate) + link_info->rate = msm_dp_panel->max_dp_link_rate; drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor); drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate); @@ -82,21 +82,21 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) if (drm_dp_enhanced_frame_cap(dpcd)) link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; - dp_panel_read_psr_cap(panel); + msm_dp_panel_read_psr_cap(panel); return rc; } -static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, +static u32 msm_dp_panel_get_supported_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { - const struct dp_link_info *link_info; + const struct msm_dp_link_info *link_info; const u32 max_supported_bpp = 30, min_supported_bpp = 18; u32 bpp, data_rate_khz; bpp = min(mode_edid_bpp, max_supported_bpp); - link_info = &dp_panel->link_info; + link_info = &msm_dp_panel->link_info; data_rate_khz = link_info->num_lanes * link_info->rate * 8; do { @@ -108,39 +108,39 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, return min_supported_bpp; } -int dp_panel_read_sink_caps(struct dp_panel *dp_panel, +int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector) { int rc, bw_code; int count; - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - if (!dp_panel || !connector) { + if (!msm_dp_panel || !connector) { DRM_ERROR("invalid input\n"); return -EINVAL; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n", - dp_panel->max_dp_lanes, dp_panel->max_dp_link_rate); + msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate); - rc = dp_panel_read_dpcd(dp_panel); + rc = msm_dp_panel_read_dpcd(msm_dp_panel); if (rc) { DRM_ERROR("read dpcd failed %d\n", rc); return rc; } - bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + bw_code = drm_dp_link_rate_to_bw_code(msm_dp_panel->link_info.rate); if (!is_link_rate_valid(bw_code) || - !is_lane_count_valid(dp_panel->link_info.num_lanes) || - (bw_code > dp_panel->max_bw_code)) { - DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate, - dp_panel->link_info.num_lanes); + !is_lane_count_valid(msm_dp_panel->link_info.num_lanes) || + (bw_code > msm_dp_panel->max_bw_code)) { + DRM_ERROR("Illegal link rate=%d lane=%d\n", msm_dp_panel->link_info.rate, + msm_dp_panel->link_info.num_lanes); return -EINVAL; } - if (drm_dp_is_branch(dp_panel->dpcd)) { + if (drm_dp_is_branch(msm_dp_panel->dpcd)) { count = drm_dp_read_sink_count(panel->aux); if (!count) { panel->link->sink_count = 0; @@ -148,21 +148,21 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, } } - rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd, - dp_panel->downstream_ports); + rc = drm_dp_read_downstream_info(panel->aux, msm_dp_panel->dpcd, + msm_dp_panel->downstream_ports); if (rc) return rc; - drm_edid_free(dp_panel->drm_edid); + drm_edid_free(msm_dp_panel->drm_edid); - dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc); + msm_dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc); - drm_edid_connector_update(connector, dp_panel->drm_edid); + drm_edid_connector_update(connector, msm_dp_panel->drm_edid); - if (!dp_panel->drm_edid) { + if (!msm_dp_panel->drm_edid) { DRM_ERROR("panel edid read failed\n"); /* check edid read fail is due to unplug */ - if (!dp_catalog_link_is_connected(panel->catalog)) { + if (!msm_dp_catalog_link_is_connected(panel->catalog)) { rc = -ETIMEDOUT; goto end; } @@ -172,87 +172,87 @@ end: return rc; } -u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, +u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; u32 bpp; - if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + if (!msm_dp_panel || !mode_edid_bpp || !mode_pclk_khz) { DRM_ERROR("invalid input\n"); return 0; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); - if (dp_panel->video_test) - bpp = dp_link_bit_depth_to_bpp( + if (msm_dp_panel->video_test) + bpp = msm_dp_link_bit_depth_to_bpp( panel->link->test_video.test_bit_depth); else - bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + bpp = msm_dp_panel_get_supported_bpp(msm_dp_panel, mode_edid_bpp, mode_pclk_khz); return bpp; } -int dp_panel_get_modes(struct dp_panel *dp_panel, +int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector) { - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return -EINVAL; } - if (dp_panel->drm_edid) + if (msm_dp_panel->drm_edid) return drm_edid_connector_add_modes(connector); return 0; } -static u8 dp_panel_get_edid_checksum(const struct edid *edid) +static u8 msm_dp_panel_get_edid_checksum(const struct edid *edid) { edid += edid->extensions; return edid->checksum; } -void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { /* FIXME: get rid of drm_edid_raw() */ - const struct edid *edid = drm_edid_raw(dp_panel->drm_edid); + const struct edid *edid = drm_edid_raw(msm_dp_panel->drm_edid); u8 checksum; if (edid) - checksum = dp_panel_get_edid_checksum(edid); + checksum = msm_dp_panel_get_edid_checksum(edid); else - checksum = dp_panel->connector->real_edid_checksum; + checksum = msm_dp_panel->connector->real_edid_checksum; - dp_link_send_edid_checksum(panel->link, checksum); - dp_link_send_test_response(panel->link); + msm_dp_link_send_edid_checksum(panel->link, checksum); + msm_dp_link_send_test_response(panel->link); } } -void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; if (!panel->panel_on) { @@ -262,31 +262,31 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) } if (!enable) { - dp_catalog_panel_tpg_disable(catalog); + msm_dp_catalog_panel_tpg_disable(catalog); return; } drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n"); - dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode); + msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode); } -static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) +static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; - struct dp_display_mode *dp_mode; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; + struct msm_dp_display_mode *msm_dp_mode; struct drm_dp_vsc_sdp vsc_sdp_data; struct dp_sdp vsc_sdp; ssize_t len; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return -EINVAL; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - dp_mode = &dp_panel->dp_mode; + msm_dp_mode = &msm_dp_panel->msm_dp_mode; memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data)); @@ -300,7 +300,7 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) vsc_sdp_data.colorimetry = DP_COLORIMETRY_DEFAULT; /* VSC SDP Payload for DB17 */ - vsc_sdp_data.bpc = dp_mode->bpp / 3; + vsc_sdp_data.bpc = msm_dp_mode->bpp / 3; vsc_sdp_data.dynamic_range = DP_DYNAMIC_RANGE_CTA; /* VSC SDP Payload for DB18 */ @@ -312,36 +312,36 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) return len; } - dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp); + msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp); return 0; } -void dp_panel_dump_regs(struct dp_panel *dp_panel) +void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - dp_catalog_dump_regs(catalog); + msm_dp_catalog_dump_regs(catalog); } -int dp_panel_timing_cfg(struct dp_panel *dp_panel) +int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel) { u32 data, total_ver, total_hor; - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; struct drm_display_mode *drm_mode; u32 width_blanking; u32 sync_start; - u32 dp_active; + u32 msm_dp_active; u32 total; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - drm_mode = &panel->dp_panel.dp_mode.drm_mode; + drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode; drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n", drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end, @@ -371,9 +371,9 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data = drm_mode->vsync_end - drm_mode->vsync_start; data <<= 16; - data |= (panel->dp_panel.dp_mode.v_active_low << 31); + data |= (panel->msm_dp_panel.msm_dp_mode.v_active_low << 31); data |= drm_mode->hsync_end - drm_mode->hsync_start; - data |= (panel->dp_panel.dp_mode.h_active_low << 15); + data |= (panel->msm_dp_panel.msm_dp_mode.h_active_low << 15); width_blanking = data; @@ -381,26 +381,26 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data <<= 16; data |= drm_mode->hdisplay; - dp_active = data; + msm_dp_active = data; - dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active); + msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active); - if (dp_panel->dp_mode.out_fmt_is_yuv_420) - dp_panel_setup_vsc_sdp_yuv_420(dp_panel); + if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420) + msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel); panel->panel_on = true; return 0; } -int dp_panel_init_panel_info(struct dp_panel *dp_panel) +int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel) { struct drm_display_mode *drm_mode; - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - drm_mode = &dp_panel->dp_mode.drm_mode; + drm_mode = &msm_dp_panel->msm_dp_mode.drm_mode; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); /* * print resolution info as this is a result @@ -421,18 +421,18 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) drm_mode->vsync_end - drm_mode->vsync_start); drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n", drm_mode->clock); - drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); + drm_dbg_dp(panel->drm_dev, "bpp = %d\n", msm_dp_panel->msm_dp_mode.bpp); - dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp, - dp_panel->dp_mode.drm_mode.clock); + msm_dp_panel->msm_dp_mode.bpp = msm_dp_panel_get_mode_bpp(msm_dp_panel, msm_dp_panel->msm_dp_mode.bpp, + msm_dp_panel->msm_dp_mode.drm_mode.clock); drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", - dp_panel->dp_mode.bpp); + msm_dp_panel->msm_dp_mode.bpp); return 0; } -static u32 dp_panel_link_frequencies(struct device_node *of_node) +static u32 msm_dp_panel_link_frequencies(struct device_node *of_node) { struct device_node *endpoint; u64 frequency = 0; @@ -456,17 +456,17 @@ static u32 dp_panel_link_frequencies(struct device_node *of_node) return frequency; } -static int dp_panel_parse_dt(struct dp_panel *dp_panel) +static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; struct device_node *of_node; int cnt; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); of_node = panel->dev->of_node; /* - * data-lanes is the property of dp_out endpoint + * data-lanes is the property of msm_dp_out endpoint */ cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES); if (cnt < 0) { @@ -475,21 +475,21 @@ static int dp_panel_parse_dt(struct dp_panel *dp_panel) } if (cnt > 0) - dp_panel->max_dp_lanes = cnt; + msm_dp_panel->max_dp_lanes = cnt; else - dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ + msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ - dp_panel->max_dp_link_rate = dp_panel_link_frequencies(of_node); - if (!dp_panel->max_dp_link_rate) - dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; + msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node); + if (!msm_dp_panel->max_dp_link_rate) + msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; return 0; } -struct dp_panel *dp_panel_get(struct dp_panel_in *in) +struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in) { - struct dp_panel_private *panel; - struct dp_panel *dp_panel; + struct msm_dp_panel_private *panel; + struct msm_dp_panel *msm_dp_panel; int ret; if (!in->dev || !in->catalog || !in->aux || !in->link) { @@ -506,20 +506,20 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in) panel->catalog = in->catalog; panel->link = in->link; - dp_panel = &panel->dp_panel; - dp_panel->max_bw_code = DP_LINK_BW_8_1; + msm_dp_panel = &panel->msm_dp_panel; + msm_dp_panel->max_bw_code = DP_LINK_BW_8_1; - ret = dp_panel_parse_dt(dp_panel); + ret = msm_dp_panel_parse_dt(msm_dp_panel); if (ret) return ERR_PTR(ret); - return dp_panel; + return msm_dp_panel; } -void dp_panel_put(struct dp_panel *dp_panel) +void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel) { - if (!dp_panel) + if (!msm_dp_panel) return; - drm_edid_free(dp_panel->drm_edid); + drm_edid_free(msm_dp_panel->drm_edid); } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 6722e3923fa5..0e944db3adf2 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -13,7 +13,7 @@ struct edid; -struct dp_display_mode { +struct msm_dp_display_mode { struct drm_display_mode drm_mode; u32 bpp; u32 h_active_low; @@ -21,28 +21,28 @@ struct dp_display_mode { bool out_fmt_is_yuv_420; }; -struct dp_panel_in { +struct msm_dp_panel_in { struct device *dev; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; }; -struct dp_panel_psr { +struct msm_dp_panel_psr { u8 version; u8 capabilities; }; -struct dp_panel { +struct msm_dp_panel { /* dpcd raw data */ u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - struct dp_link_info link_info; + struct msm_dp_link_info link_info; const struct drm_edid *drm_edid; struct drm_connector *connector; - struct dp_display_mode dp_mode; - struct dp_panel_psr psr_cap; + struct msm_dp_display_mode msm_dp_mode; + struct msm_dp_panel_psr psr_cap; bool video_test; bool vsc_sdp_supported; @@ -52,18 +52,18 @@ struct dp_panel { u32 max_bw_code; }; -int dp_panel_init_panel_info(struct dp_panel *dp_panel); -int dp_panel_deinit(struct dp_panel *dp_panel); -int dp_panel_timing_cfg(struct dp_panel *dp_panel); -void dp_panel_dump_regs(struct dp_panel *dp_panel); -int dp_panel_read_sink_caps(struct dp_panel *dp_panel, +int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel); +void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector); -u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, +u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp, u32 mode_pclk_khz); -int dp_panel_get_modes(struct dp_panel *dp_panel, +int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector); -void dp_panel_handle_sink_request(struct dp_panel *dp_panel); -void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable); +void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel); +void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable); /** * is_link_rate_valid() - validates the link rate @@ -80,7 +80,7 @@ static inline bool is_link_rate_valid(u32 bw_code) } /** - * dp_link_is_lane_count_valid() - validates the lane count + * msm_dp_link_is_lane_count_valid() - validates the lane count * @lane_count: lane count requested by the sink * * Returns true if the requested lane count is supported. @@ -92,6 +92,6 @@ static inline bool is_lane_count_valid(u32 lane_count) lane_count == 4); } -struct dp_panel *dp_panel_get(struct dp_panel_in *in); -void dp_panel_put(struct dp_panel *dp_panel); +struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in); +void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel); #endif /* _DP_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c index da9207caf72d..2a40f07fe2d5 100644 --- a/drivers/gpu/drm/msm/dp/dp_utils.c +++ b/drivers/gpu/drm/msm/dp/dp_utils.c @@ -9,7 +9,7 @@ #define DP_SDP_HEADER_SIZE 8 -u8 dp_utils_get_g0_value(u8 data) +u8 msm_dp_utils_get_g0_value(u8 data) { u8 c[4]; u8 g[4]; @@ -30,7 +30,7 @@ u8 dp_utils_get_g0_value(u8 data) return ret_data; } -u8 dp_utils_get_g1_value(u8 data) +u8 msm_dp_utils_get_g1_value(u8 data) { u8 c[4]; u8 g[4]; @@ -51,7 +51,7 @@ u8 dp_utils_get_g1_value(u8 data) return ret_data; } -u8 dp_utils_calculate_parity(u32 data) +u8 msm_dp_utils_calculate_parity(u32 data) { u8 x0 = 0; u8 x1 = 0; @@ -65,8 +65,8 @@ u8 dp_utils_calculate_parity(u32 data) iData = (data >> i * 4) & 0xF; ci = iData ^ x1; - x1 = x0 ^ dp_utils_get_g1_value(ci); - x0 = dp_utils_get_g0_value(ci); + x1 = x0 ^ msm_dp_utils_get_g1_value(ci); + x0 = msm_dp_utils_get_g0_value(ci); } parity_byte = x1 | (x0 << 4); @@ -74,7 +74,7 @@ u8 dp_utils_calculate_parity(u32 data) return parity_byte; } -ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff) +ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff) { size_t length; @@ -83,14 +83,14 @@ ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_b return -ENOSPC; header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) | - FIELD_PREP(PARITY_0_MASK, dp_utils_calculate_parity(sdp_header->HB0)) | + FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) | FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) | - FIELD_PREP(PARITY_1_MASK, dp_utils_calculate_parity(sdp_header->HB1)); + FIELD_PREP(PARITY_1_MASK, msm_dp_utils_calculate_parity(sdp_header->HB1)); header_buff[1] = FIELD_PREP(HEADER_2_MASK, sdp_header->HB2) | - FIELD_PREP(PARITY_2_MASK, dp_utils_calculate_parity(sdp_header->HB2)) | + FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) | FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) | - FIELD_PREP(PARITY_3_MASK, dp_utils_calculate_parity(sdp_header->HB3)); + FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3)); return length; } diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h index 7c056d9798dc..88d53157f5b5 100644 --- a/drivers/gpu/drm/msm/dp/dp_utils.h +++ b/drivers/gpu/drm/msm/dp/dp_utils.h @@ -28,9 +28,9 @@ #define HEADER_3_MASK GENMASK(23, 16) #define PARITY_3_MASK GENMASK(31, 24) -u8 dp_utils_get_g0_value(u8 data); -u8 dp_utils_get_g1_value(u8 data); -u8 dp_utils_calculate_parity(u32 data); -ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff); +u8 msm_dp_utils_get_g0_value(u8 data); +u8 msm_dp_utils_get_g1_value(u8 data); +u8 msm_dp_utils_calculate_parity(u32 data); +ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff); #endif /* _DP_UTILS_H_ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c index e6ffaf92d26d..a719fd33d9d8 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c @@ -157,9 +157,8 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) #define HDMI_MHZ_TO_HZ ((u64)1000000) static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk) { - u32 const ratio_list[] = {1, 2, 3, 4, 5, 6, - 9, 10, 12, 15, 25}; - u32 const band_list[] = {0, 1, 2, 3}; + static const u32 ratio_list[] = {1, 2, 3, 4, 5, 6, 9, 10, 12, 15, 25}; + static const u32 band_list[] = {0, 1, 2, 3}; u32 const sz_ratio = ARRAY_SIZE(ratio_list); u32 const sz_band = ARRAY_SIZE(band_list); u32 const cmp_cnt = 1024; @@ -270,7 +269,7 @@ find_optimal_index: case 25: found_hsclk_divsel = 14; break; - }; + } pd->vco_freq = found_vco_freq; pd->tx_band_sel = found_tx_band_sel; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 8c13b08708d2..c2dd8ef6d6dc 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -11,6 +11,7 @@ #include <linux/of_address.h> #include <linux/uaccess.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -291,7 +292,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (priv->kms_init) { drm_kms_helper_poll_init(ddev); - msm_fbdev_setup(ddev); + drm_client_setup(ddev, NULL); } return 0; @@ -902,6 +903,7 @@ static const struct drm_driver msm_driver = { #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, #endif + MSM_FBDEV_DRIVER_OPS, .show_fdinfo = msm_show_fdinfo, .ioctls = msm_ioctls, .num_ioctls = ARRAY_SIZE(msm_ioctls), @@ -983,6 +985,10 @@ module_param(prefer_mdp5, bool, 0444); /* list all platforms supported by both mdp5 and dpu drivers */ static const char *const msm_mdp5_dpu_migration[] = { + "qcom,msm8917-mdp5", + "qcom,msm8937-mdp5", + "qcom,msm8953-mdp5", + "qcom,msm8996-mdp5", "qcom,sdm630-mdp5", "qcom,sdm660-mdp5", NULL, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 2e28a1344636..d8c9a1b19263 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -36,6 +36,9 @@ extern struct fault_attr fail_gem_alloc; extern struct fault_attr fail_gem_iova; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct msm_kms; struct msm_gpu; struct msm_mmu; @@ -49,7 +52,6 @@ struct msm_gem_vma; struct msm_disp_state; #define MAX_CRTCS 8 -#define MAX_BRIDGES 8 #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) @@ -68,23 +70,6 @@ enum msm_dsi_controller { }; #define MSM_GPU_MAX_RINGS 4 -#define MAX_H_TILES_PER_DISPLAY 2 - -/** - * struct msm_display_topology - defines a display topology pipeline - * @num_lm: number of layer mixers used - * @num_intf: number of interfaces the panel is mounted on - * @num_dspp: number of dspp blocks used - * @num_dsc: number of Display Stream Compression (DSC) blocks used - * @needs_cdm: indicates whether cdm block is needed for this display topology - */ -struct msm_display_topology { - u32 num_lm; - u32 num_intf; - u32 num_dspp; - u32 num_dsc; - bool needs_cdm; -}; /* Commit/Event thread specific structure */ struct msm_drm_thread { @@ -290,11 +275,13 @@ struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format); #ifdef CONFIG_DRM_FBDEV_EMULATION -void msm_fbdev_setup(struct drm_device *dev); +int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +#define MSM_FBDEV_DRIVER_OPS \ + .fbdev_probe = msm_fbdev_driver_fbdev_probe #else -static inline void msm_fbdev_setup(struct drm_device *dev) -{ -} +#define MSM_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif struct hdmi; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 030bedac632d..c62249b1ab3d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -65,8 +65,31 @@ static const struct fb_ops msm_fb_ops = { .fb_destroy = msm_fbdev_fb_destroy, }; -static int msm_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs msm_fbdev_helper_funcs = { + .fb_dirty = msm_fbdev_fb_dirty, +}; + +int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = helper->dev; struct msm_drm_private *priv = dev->dev_private; @@ -114,6 +137,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, DBG("fbi=%p, dev=%p", fbi, dev); + helper->funcs = &msm_fbdev_helper_funcs; helper->fb = fb; fbi->fbops = &msm_fb_ops; @@ -138,119 +162,3 @@ fail: drm_framebuffer_remove(fb); return ret; } - -static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper, - struct drm_clip_rect *clip) -{ - struct drm_device *dev = helper->dev; - int ret; - - /* Call damage handlers only if necessary */ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - return ret; - } - - return 0; -} - -static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { - .fb_probe = msm_fbdev_create, - .fb_dirty = msm_fbdev_fb_dirty, -}; - -/* - * struct drm_client - */ - -static void msm_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int msm_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int msm_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs msm_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = msm_fbdev_client_unregister, - .restore = msm_fbdev_client_restore, - .hotplug = msm_fbdev_client_hotplug, -}; - -/* initialize fbdev helper */ -void msm_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *helper; - int ret; - - if (!fbdev) - return; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - helper = kzalloc(sizeof(*helper), GFP_KERNEL); - if (!helper) - return; - drm_fb_helper_prepare(dev, helper, 32, &msm_fb_helper_funcs); - - ret = drm_client_init(dev, &helper->client, "fbdev", &msm_fbdev_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_fb_helper_unprepare; - } - - drm_client_register(&helper->client); - - return; - -err_drm_fb_helper_unprepare: - drm_fb_helper_unprepare(helper); - kfree(helper); -} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index a274b8466423..0d4a3744cfcb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) mutex_unlock(&gpu->active_lock); gpu->funcs->submit(gpu, submit); - gpu->cur_ctx_seqno = submit->queue->ctx->seqno; + submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno; pm_runtime_put(&gpu->pdev->dev); hangcheck_timer_reset(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 1f02bb9956be..7cabc8480d7c 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -194,17 +194,6 @@ struct msm_gpu { refcount_t sysprof_active; /** - * cur_ctx_seqno: - * - * The ctx->seqno value of the last context to submit rendering, - * and the one with current pgtables installed (for generations - * that support per-context pgtables). Tracked by seqno rather - * than pointer value to avoid dangling pointers, and cases where - * a ctx can be freed and a new one created with the same address. - */ - int cur_ctx_seqno; - - /** * lock: * * General lock for serializing all the gpu things. diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index ea70c1c32d94..6970b0f7f457 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -140,6 +140,7 @@ void msm_devfreq_init(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; struct msm_drm_private *priv = gpu->dev->dev_private; + int ret; /* We need target support to do devfreq */ if (!gpu->funcs->gpu_busy) @@ -156,8 +157,12 @@ void msm_devfreq_init(struct msm_gpu *gpu) mutex_init(&df->lock); - dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, - DEV_PM_QOS_MIN_FREQUENCY, 0); + ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); + if (ret < 0) { + DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n"); + return; + } msm_devfreq_profile.initial_freq = gpu->fast_rate; diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index ac40d857bc45..7f863282db0d 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume, TP_printk("%u", __entry->dummy) ); +TRACE_EVENT(msm_gpu_preemption_trigger, + TP_PROTO(int ring_id_from, int ring_id_to), + TP_ARGS(ring_id_from, ring_id_to), + TP_STRUCT__entry( + __field(int, ring_id_from) + __field(int, ring_id_to) + ), + TP_fast_assign( + __entry->ring_id_from = ring_id_from; + __entry->ring_id_to = ring_id_to; + ), + TP_printk("preempting %u -> %u", + __entry->ring_id_from, + __entry->ring_id_to) +); + +TRACE_EVENT(msm_gpu_preemption_irq, + TP_PROTO(u32 ring_id), + TP_ARGS(ring_id), + TP_STRUCT__entry( + __field(u32, ring_id) + ), + TP_fast_assign( + __entry->ring_id = ring_id; + ), + TP_printk("preempted to %u", __entry->ring_id) +); + #endif #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index af6a6fcb1173..f3326d09bdbc 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -5,11 +5,11 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/aperture.h> #include <linux/kthread.h> #include <linux/sched/mm.h> #include <uapi/linux/sched/types.h> -#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_mode_config.h> #include <drm/drm_vblank.h> @@ -237,7 +237,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv) int ret; /* the fw fb could be anywhere in memory */ - ret = drm_aperture_remove_framebuffers(drv); + ret = aperture_remove_all_conflicting_devices(drv->name); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 1e0c54de3716..e60162744c66 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -92,12 +92,6 @@ struct msm_kms_funcs { * Format handling: */ - /* do format checking on format modified through fb_cmd2 modifiers */ - int (*check_modified_format)(const struct msm_kms *kms, - const struct msm_format *msm_fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos); - /* misc: */ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index faa88fd6eb4d..b7bd899ead44 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -19,13 +19,7 @@ #include "msm_mdss.h" #include "msm_kms.h" -#define HW_REV 0x0 -#define HW_INTR_STATUS 0x0010 - -#define UBWC_DEC_HW_VERSION 0x58 -#define UBWC_STATIC 0x144 -#define UBWC_CTRL_2 0x150 -#define UBWC_PREDICTION_MODE 0x154 +#include <generated/mdss.xml.h> #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ @@ -83,7 +77,7 @@ static void msm_mdss_irq(struct irq_desc *desc) chained_irq_enter(chip, desc); - interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS); + interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS); while (interrupts) { irq_hw_number_t hwirq = fls(interrupts) - 1; @@ -173,7 +167,7 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) { const struct msm_mdss_data *data = msm_mdss->mdss_data; - writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); } static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) @@ -189,7 +183,7 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) if (data->ubwc_enc_version == UBWC_1_0) value |= BIT(8); - writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); } static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) @@ -200,21 +194,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) (data->highest_bank_bit & 0x7) << 4 | (data->macrotile_mode & 0x1) << 12; - writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); if (data->ubwc_enc_version == UBWC_3_0) { - writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2); - writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE); + writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); + writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); } else { if (data->ubwc_dec_version == UBWC_4_3) - writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2); + writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); else - writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); - writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); + writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); + writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); } } -#define MDSS_HW_MAJ_MIN GENMASK(31, 16) +#define MDSS_HW_MAJ_MIN \ + (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK) #define MDSS_HW_MSM8996 0x1007 #define MDSS_HW_MSM8937 0x100e @@ -235,7 +230,7 @@ static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_m if (!data) return NULL; - hw_rev = readl_relaxed(mdss->mmio + HW_REV); + hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION); hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev); if (hw_rev == MDSS_HW_MSM8996 || @@ -334,9 +329,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n", msm_mdss->mdss_data->ubwc_dec_version); dev_err(msm_mdss->dev, "HW_REV: 0x%x\n", - readl_relaxed(msm_mdss->mmio + HW_REV)); + readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION)); dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n", - readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION)); + readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION)); break; } @@ -573,6 +568,16 @@ static const struct msm_mdss_data qcm2290_data = { .reg_bus_bw = 76800, }; +static const struct msm_mdss_data sa8775p_data = { + .ubwc_enc_version = UBWC_4_0, + .ubwc_dec_version = UBWC_4_0, + .ubwc_swizzle = 4, + .ubwc_static = 1, + .highest_bank_bit = 0, + .macrotile_mode = 1, + .reg_bus_bw = 74000, +}; + static const struct msm_mdss_data sc7180_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, @@ -710,6 +715,7 @@ static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,mdss" }, { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, + { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data }, { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 9d6655f96f0c..c803556a8f64 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -64,7 +64,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, char name[32]; int ret; - /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ + /* We assume everywhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ)); ring = kzalloc(sizeof(*ring), GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 0d6beb8cd39a..d1e49f701c81 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -31,10 +31,12 @@ struct msm_rbmemptrs { volatile uint32_t rptr; volatile uint32_t fence; /* Introduced on A7xx */ + volatile uint32_t bv_rptr; volatile uint32_t bv_fence; volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT]; volatile u64 ttbr0; + volatile u32 context_idr; }; struct msm_cp_state { @@ -99,6 +101,22 @@ struct msm_ringbuffer { * preemption. Can be aquired from irq context. */ spinlock_t preempt_lock; + + /* + * Whether we skipped writing wptr and it needs to be updated in the + * future when the ring becomes current. + */ + bool restore_wptr; + + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the last context to submit to this ring + * Tracked by seqno rather than pointer value to avoid dangling + * pointers, and cases where a ctx can be freed and a new one created + * with the same address. + */ + int cur_ctx_seqno; }; struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 0e803125a325..2fc3eaf81f44 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -161,6 +161,8 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; enum drm_sched_priority sched_prio; + extern int enable_preemption; + bool preemption_supported; unsigned ring_nr; int ret; @@ -170,6 +172,11 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, if (!priv->gpu) return -ENODEV; + preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; + + if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) + return -EINVAL; + ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml index 97608603ea62..2db425abf0f3 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -2358,7 +2358,12 @@ to upconvert to 32b float internally? <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/> </array> - <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"/> + <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"> + <bitfield name="STOP" pos="0" type="boolean"/> + <bitfield name="LEVEL" low="6" high="7"/> + <bitfield name="USES_GMEM" pos="8" type="boolean"/> + <bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/> + </reg32> <reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/> <reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/> <reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/> diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index cab01af55d22..55a35182858c 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -581,8 +581,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> and forcibly switch to the indicated context. </doc> <value name="CP_CONTEXT_SWITCH" value="0x54" variants="A6XX"/> - <!-- Note, kgsl calls this CP_SET_AMBLE: --> - <value name="CP_SET_CTXSWITCH_IB" value="0x55" variants="A6XX-"/> + <value name="CP_SET_AMBLE" value="0x55" variants="A6XX-"/> <!-- Seems to always have the payload: @@ -2013,42 +2012,38 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> </domain> -<domain name="CP_SET_CTXSWITCH_IB" width="32"> +<domain name="CP_SET_AMBLE" width="32"> <doc> - Used by the userspace driver to set various IB's which are - executed during context save/restore for handling - state that isn't restored by the - context switch routine itself. - </doc> - <enum name="ctxswitch_ib"> - <value name="RESTORE_IB" value="0"> + Used by the userspace and kernel drivers to set various IB's + which are executed during context save/restore for handling + state that isn't restored by the context switch routine itself. + </doc> + <enum name="amble_type"> + <value name="PREAMBLE_AMBLE_TYPE" value="0"> <doc>Executed unconditionally when switching back to the context.</doc> </value> - <value name="YIELD_RESTORE_IB" value="1"> + <value name="BIN_PREAMBLE_AMBLE_TYPE" value="1"> <doc> Executed when switching back after switching away during execution of - a CP_SET_MARKER packet with RM6_YIELD as the - payload *and* the normal save routine was - bypassed for a shorter one. I think this is - connected to the "skipsaverestore" bit set by - the kernel when preempting. + a CP_SET_MARKER packet with RM6_BIN_RENDER_END as the + payload *and* skipsaverestore is set. This is + expected to restore static register values not + saved when skipsaverestore is set. </doc> </value> - <value name="SAVE_IB" value="2"> + <value name="POSTAMBLE_AMBLE_TYPE" value="2"> <doc> Executed when switching away from the context, except for context switches initiated via CP_YIELD. </doc> </value> - <value name="RB_SAVE_IB" value="3"> + <value name="KMD_AMBLE_TYPE" value="3"> <doc> This can only be set by the RB (i.e. the kernel) and executes with protected mode off, but - is otherwise similar to SAVE_IB. - - Note, kgsl calls this CP_KMD_AMBLE_TYPE + is otherwise similar to POSTAMBLE_AMBLE_TYPE. </doc> </value> </enum> @@ -2060,7 +2055,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> <reg32 offset="2" name="2"> <bitfield name="DWORDS" low="0" high="19" type="uint"/> - <bitfield name="TYPE" low="20" high="21" type="ctxswitch_ib"/> + <bitfield name="TYPE" low="20" high="21" type="amble_type"/> </reg32> </domain> diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml index 92f3263af170..8c9c4af350aa 100644 --- a/drivers/gpu/drm/msm/registers/display/mdp5.xml +++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml @@ -9,22 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <domain name="VBIF" width="32"> </domain> -<domain name="MDSS" width="32"> - <reg32 offset="0x00000" name="HW_VERSION"> - <bitfield name="STEP" low="0" high="15" type="uint"/> - <bitfield name="MINOR" low="16" high="27" type="uint"/> - <bitfield name="MAJOR" low="28" high="31" type="uint"/> - </reg32> - - <reg32 offset="0x00010" name="HW_INTR_STATUS"> - <bitfield name="INTR_MDP" pos="0" type="boolean"/> - <bitfield name="INTR_DSI0" pos="4" type="boolean"/> - <bitfield name="INTR_DSI1" pos="5" type="boolean"/> - <bitfield name="INTR_HDMI" pos="8" type="boolean"/> - <bitfield name="INTR_EDP" pos="12" type="boolean"/> - </reg32> -</domain> - <domain name="MDP5" width="32"> <enum name="mdp5_intf_type"> diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml new file mode 100644 index 000000000000..ac85caf1575c --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/mdss.xml @@ -0,0 +1,29 @@ +<?xml version="1.0" encoding="UTF-8"?> +<database xmlns="http://nouveau.freedesktop.org/" +xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" +xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> +<import file="freedreno_copyright.xml"/> + +<domain name="MDSS" width="32"> + <reg32 offset="0x00000" name="HW_VERSION"> + <bitfield name="STEP" low="0" high="15" type="uint"/> + <bitfield name="MINOR" low="16" high="27" type="uint"/> + <bitfield name="MAJOR" low="28" high="31" type="uint"/> + </reg32> + + <reg32 offset="0x00010" name="HW_INTR_STATUS"> + <bitfield name="INTR_MDP" pos="0" type="boolean"/> + <bitfield name="INTR_DSI0" pos="4" type="boolean"/> + <bitfield name="INTR_DSI1" pos="5" type="boolean"/> + <bitfield name="INTR_HDMI" pos="8" type="boolean"/> + <bitfield name="INTR_EDP" pos="12" type="boolean"/> + </reg32> + + <reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/> + + <reg32 offset="0x00144" name="UBWC_STATIC"/> + <reg32 offset="0x00150" name="UBWC_CTRL_2"/> + <reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/> +</domain> + +</database> diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig index 518b53345354..264e74f45554 100644 --- a/drivers/gpu/drm/mxsfb/Kconfig +++ b/drivers/gpu/drm/mxsfb/Kconfig @@ -9,6 +9,7 @@ config DRM_MXSFB depends on DRM && OF depends on COMMON_CLK depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_MXS select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER @@ -26,6 +27,7 @@ config DRM_IMX_LCDIF depends on DRM && OF depends on COMMON_CLK depends on ARCH_MXC || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_MXS select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 0f895b8a99d6..58ccad9c425d 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_fbdev_dma.h> @@ -243,6 +244,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver lcdif_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "imx-lcdif", .desc = "i.MX LCDIF Controller DRM", @@ -275,7 +277,7 @@ static int lcdif_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index cb5ce4e81fc7..34a98717b72c 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -19,6 +19,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -331,6 +332,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver mxsfb_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "mxsfb-drm", .desc = "MXSFB Controller DRM", @@ -364,7 +366,7 @@ static int mxsfb_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ceef470c9fbf..ce840300578d 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -4,6 +4,7 @@ config DRM_NOUVEAU depends on DRM && PCI && MMU select IOMMU_API select FW_LOADER + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/nouveau/dispnv50/tile.h b/drivers/gpu/drm/nouveau/dispnv50/tile.h new file mode 100644 index 000000000000..e2be82830cf7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/tile.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NV50_TILE_H__ +#define __NV50_TILE_H__ + +#include <linux/types.h> +#include <linux/math.h> + +/* + * Tiling parameters for NV50+. + * GOB = Group of bytes, the main unit for tiling blocks. + * Tiling blocks are a power of 2 number of GOB. + * All GOBs and blocks have the same width: 64 bytes (so 16 pixels in 32bits). + * tile_mode is the log2 of the number of GOB per block. + */ + +#define NV_TILE_GOB_HEIGHT_TESLA 4 /* 4 x 64 bytes = 256 bytes for a GOB on Tesla*/ +#define NV_TILE_GOB_HEIGHT 8 /* 8 x 64 bytes = 512 bytes for a GOB on Fermi and later */ +#define NV_TILE_GOB_WIDTH_BYTES 64 + +/* Number of blocks to cover the width of the framebuffer */ +static inline u32 nouveau_get_width_in_blocks(u32 stride) +{ + return DIV_ROUND_UP(stride, NV_TILE_GOB_WIDTH_BYTES); +} + +/* Return the height in pixel of one GOB */ +static inline u32 nouveau_get_gob_height(u16 family) +{ + if (family == NV_DEVICE_INFO_V0_TESLA) + return NV_TILE_GOB_HEIGHT_TESLA; + else + return NV_TILE_GOB_HEIGHT; +} + +/* Number of blocks to cover the heigth of the framebuffer */ +static inline u32 nouveau_get_height_in_blocks(u32 height, u32 gobs_in_block, u16 family) +{ + return DIV_ROUND_UP(height, nouveau_get_gob_height(family) * gobs_in_block); +} + +/* Return the GOB size in bytes */ +static inline u32 nouveau_get_gob_size(u16 family) +{ + return nouveau_get_gob_height(family) * NV_TILE_GOB_WIDTH_BYTES; +} + +/* Return the number of GOB in a block */ +static inline int nouveau_get_gobs_in_block(u32 tile_mode, u16 chipset) +{ + if (chipset >= 0xc0) + return 1 << (tile_mode >> 4); + return 1 << tile_mode; +} + +/* Return true if tile_mode is invalid */ +static inline bool nouveau_check_tile_mode(u32 tile_mode, u16 chipset) +{ + if (chipset >= 0xc0) + return (tile_mode & 0xfffff0f); + return (tile_mode & 0xfffffff0); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 7a2cceaee6e9..f6be426dd525 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -30,14 +30,20 @@ #include <nvhw/class/cl507e.h> #include <nvhw/class/clc37e.h> +#include <linux/iosys-map.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_panic.h> +#include <drm/ttm/ttm_bo.h> #include "nouveau_bo.h" #include "nouveau_gem.h" +#include "tile.h" static void nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma) @@ -577,6 +583,114 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return 0; } +/* Only used by drm_panic get_scanout_buffer() and set_pixel(), so it is + * protected by the drm panic spinlock + */ +static u32 nv50_panic_blk_h; + +/* Return the framebuffer offset of the start of the block where pixel(x,y) is */ +static u32 +nv50_get_block_off(unsigned int x, unsigned int y, unsigned int pitch) +{ + u32 blk_x, blk_y, blk_columns; + + blk_columns = nouveau_get_width_in_blocks(pitch); + blk_x = (x * 4) / NV_TILE_GOB_WIDTH_BYTES; + blk_y = y / nv50_panic_blk_h; + + return ((blk_y * blk_columns) + blk_x) * NV_TILE_GOB_WIDTH_BYTES * nv50_panic_blk_h; +} + +/* Turing and later have 2 level of tiles inside the block */ +static void +nv50_set_pixel_swizzle(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + u32 blk_off, off, swizzle; + + blk_off = nv50_get_block_off(x, y, sb->pitch[0]); + + y = y % nv50_panic_blk_h; + + /* Inside the block, use the fast address swizzle to compute the offset + * For nvidia blocklinear, bit order is yn..y3 x3 y2 x2 y1 y0 x1 x0 + */ + swizzle = (x & 3) | (y & 3) << 2 | (x & 4) << 2 | (y & 4) << 3; + swizzle |= (x & 8) << 3 | (y >> 3) << 7; + off = blk_off + swizzle * 4; + + iosys_map_wr(&sb->map[0], off, u32, color); +} + +static void +nv50_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, unsigned int y, + u32 color) +{ + u32 blk_off, off; + + blk_off = nv50_get_block_off(x, y, sb->width); + + x = x % (NV_TILE_GOB_WIDTH_BYTES / 4); + y = y % nv50_panic_blk_h; + off = blk_off + x * 4 + y * NV_TILE_GOB_WIDTH_BYTES; + + iosys_map_wr(&sb->map[0], off, u32, color); +} + +static int +nv50_wndw_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb) +{ + struct drm_framebuffer *fb; + struct nouveau_bo *nvbo; + struct nouveau_drm *drm = nouveau_drm(plane->dev); + u16 chipset = drm->client.device.info.chipset; + u8 family = drm->client.device.info.family; + u32 tile_mode; + u8 kind; + + if (!plane->state || !plane->state->fb) + return -EINVAL; + + fb = plane->state->fb; + nvbo = nouveau_gem_object(fb->obj[0]); + + /* Don't support compressed format, or multiplane yet. */ + if (nvbo->comp || fb->format->num_planes != 1) + return -EOPNOTSUPP; + + if (nouveau_bo_map(nvbo)) { + drm_warn(plane->dev, "nouveau bo map failed, panic won't be displayed\n"); + return -ENOMEM; + } + + if (nvbo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK) + iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)nvbo->kmap.virtual); + else + iosys_map_set_vaddr(&sb->map[0], nvbo->kmap.virtual); + + sb->height = fb->height; + sb->width = fb->width; + sb->pitch[0] = fb->pitches[0]; + sb->format = fb->format; + + nouveau_framebuffer_get_layout(fb, &tile_mode, &kind); + if (kind) { + /* If tiling is enabled, use set_pixel() to display correctly. + * Only handle 32bits format for now. + */ + if (fb->format->cpp[0] != 4) + return -EOPNOTSUPP; + nv50_panic_blk_h = nouveau_get_gob_height(family) * + nouveau_get_gobs_in_block(tile_mode, chipset); + + if (chipset >= 0x160) + sb->set_pixel = nv50_set_pixel_swizzle; + else + sb->set_pixel = nv50_set_pixel; + } + return 0; +} + static const struct drm_plane_helper_funcs nv50_wndw_helper = { .prepare_fb = nv50_wndw_prepare_fb, @@ -584,6 +698,14 @@ nv50_wndw_helper = { .atomic_check = nv50_wndw_atomic_check, }; +static const struct drm_plane_helper_funcs +nv50_wndw_primary_helper = { + .prepare_fb = nv50_wndw_prepare_fb, + .cleanup_fb = nv50_wndw_cleanup_fb, + .atomic_check = nv50_wndw_atomic_check, + .get_scanout_buffer = nv50_wndw_get_scanout_buffer, +}; + static void nv50_wndw_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) @@ -732,7 +854,10 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, return ret; } - drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&wndw->plane, &nv50_wndw_primary_helper); + else + drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); if (wndw->func->ilut) { ret = nv50_lut_init(disp, mmu, &wndw->ilut); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b06aa473102b..8d5c9c74cbb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -477,14 +477,14 @@ nouveau_connector_of_detect(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder; struct pci_dev *pdev = to_pci_dev(dev->dev); - struct device_node *cn, *dn = pci_device_to_OF_node(pdev); + struct device_node *dn = pci_device_to_OF_node(pdev); if (!dn || !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) || (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG)))) return NULL; - for_each_child_of_node(dn, cn) { + for_each_child_of_node_scoped(dn, cn) { const char *name = of_get_property(cn, "name", NULL); const void *edid = of_get_property(cn, "EDID", NULL); int idx = name ? name[strlen(name) - 1] - 'A' : 0; @@ -492,7 +492,6 @@ nouveau_connector_of_detect(struct drm_connector *connector) if (nv_encoder->dcb->i2c_index == idx && edid) { nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); - of_node_put(cn); return nv_encoder; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index e2fd561cd23f..add006fc8d81 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -28,8 +28,8 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> @@ -44,6 +44,7 @@ #include <nvif/if0011.h> #include <nvif/if0013.h> #include <dispnv50/crc.h> +#include <dispnv50/tile.h> int nouveau_display_vblank_enable(struct drm_crtc *crtc) @@ -220,69 +221,29 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm, return 0; } -static inline uint32_t -nouveau_get_width_in_blocks(uint32_t stride) -{ - /* GOBs per block in the x direction is always one, and GOBs are - * 64 bytes wide - */ - static const uint32_t log_block_width = 6; - - return (stride + (1 << log_block_width) - 1) >> log_block_width; -} - -static inline uint32_t -nouveau_get_height_in_blocks(struct nouveau_drm *drm, - uint32_t height, - uint32_t log_block_height_in_gobs) -{ - uint32_t log_gob_height; - uint32_t log_block_height; - - BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); - - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) - log_gob_height = 2; - else - log_gob_height = 3; - - log_block_height = log_block_height_in_gobs + log_gob_height; - - return (height + (1 << log_block_height) - 1) >> log_block_height; -} - static int nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, uint32_t offset, uint32_t stride, uint32_t h, uint32_t tile_mode) { - uint32_t gob_size, bw, bh; + uint32_t gob_size, bw, bh, gobs_in_block; uint64_t bl_size; BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); - if (drm->client.device.info.chipset >= 0xc0) { - if (tile_mode & 0xF) - return -EINVAL; - tile_mode >>= 4; - } - - if (tile_mode & 0xFFFFFFF0) + if (nouveau_check_tile_mode(tile_mode, drm->client.device.info.chipset)) return -EINVAL; - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) - gob_size = 256; - else - gob_size = 512; - + gobs_in_block = nouveau_get_gobs_in_block(tile_mode, drm->client.device.info.chipset); bw = nouveau_get_width_in_blocks(stride); - bh = nouveau_get_height_in_blocks(drm, h, tile_mode); + bh = nouveau_get_height_in_blocks(h, gobs_in_block, drm->client.device.info.family); + gob_size = nouveau_get_gob_size(drm->client.device.info.family); - bl_size = bw * bh * (1 << tile_mode) * gob_size; + bl_size = bw * bh * gobs_in_block * gob_size; - DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", - offset, stride, h, tile_mode, bw, bh, gob_size, bl_size, - nvbo->bo.base.size); + DRM_DEBUG_KMS("offset=%u stride=%u h=%u gobs_in_block=%u bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", + offset, stride, h, gobs_in_block, bw, bh, gob_size, + bl_size, nvbo->bo.base.size); if (bl_size + offset > nvbo->bo.base.size) return -ERANGE; @@ -804,8 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); - /* Disable console. */ - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true); + drm_client_dev_suspend(dev, false); if (drm_drv_uses_atomic_modeset(dev)) { if (!runtime) { @@ -836,8 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) } } - /* Enable console. */ - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false); + drm_client_dev_resume(dev, false); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 34985771b2a2..107f63f08bd9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ +#include <linux/aperture.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> @@ -30,7 +31,7 @@ #include <linux/mmu_notifier.h> #include <linux/dynamic_debug.h> -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_ttm_helper.h> @@ -836,6 +837,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, { struct nvkm_device *device; struct nouveau_drm *drm; + const struct drm_format_info *format; int ret; if (vga_switcheroo_client_probe_defer(pdev)) @@ -849,7 +851,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, return ret; /* Remove conflicting drivers (vesafb, efifb etc). */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci); + ret = aperture_remove_conflicting_pci_devices(pdev, driver_pci.name); if (ret) return ret; @@ -873,9 +875,11 @@ static int nouveau_drm_probe(struct pci_dev *pdev, goto fail_pci; if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) - drm_fbdev_ttm_setup(drm->dev, 8); + format = drm_format_info(DRM_FORMAT_C8); else - drm_fbdev_ttm_setup(drm->dev, 32); + format = NULL; + + drm_client_setup(drm->dev, format); quirk_broken_nv_runpm(pdev); return 0; @@ -1318,6 +1322,8 @@ driver_stub = { .dumb_create = nouveau_display_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, + DRM_FBDEV_TTM_DRIVER_OPS, + .name = DRIVER_NAME, .desc = DRIVER_DESC, #ifdef GIT_REVISION diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index eb6c3f9a01f5..4412f2711fb5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job) else NV_PRINTK(warn, job->cli, "Generic job timeout.\n"); - drm_sched_start(sched); + drm_sched_start(sched, 0); return stat; } diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index ab4e11dc0b8a..a6c375a24154 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -2,7 +2,7 @@ #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_client_event.h> #include "nouveau_drv.h" #include "nouveau_acpi.h" diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index d1c294f00665..78a83f904bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -120,8 +120,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) mutex_init(&tdev->iommu.mutex); if (device_iommu_mapped(dev)) { - tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); - if (!tdev->iommu.domain) + tdev->iommu.domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(tdev->iommu.domain)) goto error; /* diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 060c74a80eb1..3ea447f6a45b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -443,6 +443,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch, ret = gf100_grctx_generate(gr, chan, fifoch->inst); if (ret) { nvkm_error(&base->engine.subdev, "failed to construct context\n"); + mutex_unlock(&gr->fecs.mutex); return ret; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild index 819703913a00..2c551bdc9bc9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild @@ -25,7 +25,7 @@ nvkm-y += nvkm/subdev/i2c/busnv50.o nvkm-y += nvkm/subdev/i2c/busgf119.o nvkm-y += nvkm/subdev/i2c/bit.o -nvkm-y += nvkm/subdev/i2c/aux.o +nvkm-y += nvkm/subdev/i2c/auxch.o nvkm-y += nvkm/subdev/i2c/auxg94.o nvkm-y += nvkm/subdev/i2c/auxgf119.o nvkm-y += nvkm/subdev/i2c/auxgm200.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c index dd391809fef7..6c76e5e14b75 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c @@ -24,7 +24,7 @@ #define anx9805_pad(p) container_of((p), struct anx9805_pad, base) #define anx9805_bus(p) container_of((p), struct anx9805_bus, base) #define anx9805_aux(p) container_of((p), struct anx9805_aux, base) -#include "aux.h" +#include "auxch.h" #include "bus.h" struct anx9805_pad { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c index d063d0dc13c5..fafc634acbf6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c @@ -24,7 +24,7 @@ #include <linux/string_helpers.h> -#include "aux.h" +#include "auxch.h" #include "pad.h" static int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h index f920eabf8628..f920eabf8628 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index 47068f6f9c55..854bb4b5fdb4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ #define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base) -#include "aux.h" +#include "auxch.h" struct g94_i2c_aux { struct nvkm_i2c_aux base; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c index dab40cd8fe3a..c17d5647cb99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c @@ -19,7 +19,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "aux.h" +#include "auxch.h" static const struct nvkm_i2c_aux_func gf119_i2c_aux = { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 8bd1d442e465..3c5005e3b330 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ #define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base) -#include "aux.h" +#include "auxch.h" struct gm200_i2c_aux { struct nvkm_i2c_aux base; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index 731b2f68d3db..7ec17e8435a1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "priv.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" #include "pad.h" diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c index 5904bc5f2d2a..cc26cd677917 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "pad.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c index 3bc4d0310076..1797c6c65979 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "pad.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" static const struct nvkm_i2c_pad_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c index 7d417f6a816e..5afc1bf8e798 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "pad.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index a17a6dd8d3de..803b98df4858 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c @@ -142,7 +142,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp) return -ENODEV; } - result = min(max(result, (s64)info.min), (s64)info.max); + result = clamp(result, (s64)info.min, (s64)info.max); if (info.link != 0xff) { int ret = nvkm_volt_map(volt, info.link, temp); diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index fbd9af758581..9d4016bd0f44 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -4,6 +4,7 @@ config DRM_OMAP depends on MMU depends on DRM && OF depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 5f8002f6bb7a..a4ac113e1690 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -139,21 +139,13 @@ static bool omapdss_device_is_connected(struct omap_dss_device *dssdev) } int omapdss_device_connect(struct dss_device *dss, - struct omap_dss_device *src, struct omap_dss_device *dst) { - dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n", - src ? dev_name(src->dev) : "NULL", + dev_dbg(&dss->pdev->dev, "connect(%s)\n", dst ? dev_name(dst->dev) : "NULL"); - if (!dst) { - /* - * The destination is NULL when the source is connected to a - * bridge instead of a DSS device. Stop here, we will attach - * the bridge later when we will have a DRM encoder. - */ - return src && src->bridge ? 0 : -EINVAL; - } + if (!dst) + return -EINVAL; if (omapdss_device_is_connected(dst)) return -EBUSY; @@ -163,19 +155,14 @@ int omapdss_device_connect(struct dss_device *dss, return 0; } -void omapdss_device_disconnect(struct omap_dss_device *src, +void omapdss_device_disconnect(struct dss_device *dss, struct omap_dss_device *dst) { - struct dss_device *dss = src ? src->dss : dst->dss; - - dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n", - src ? dev_name(src->dev) : "NULL", + dev_dbg(&dss->pdev->dev, "disconnect(%s)\n", dst ? dev_name(dst->dev) : "NULL"); - if (!dst) { - WARN_ON(!src->bridge); + if (WARN_ON(!dst)) return; - } if (!dst->id && !omapdss_device_is_connected(dst)) { WARN_ON(1); diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 993691b3cc7e..9344855c4887 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -691,11 +691,6 @@ u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, return mgr_desc[channel].sync_lost_irq; } -u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc) -{ - return DISPC_IRQ_FRAMEDONEWB; -} - void dispc_mgr_enable(struct dispc_device *dispc, enum omap_channel channel, bool enable) { @@ -726,30 +721,6 @@ void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel) mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1); } -bool dispc_wb_go_busy(struct dispc_device *dispc) -{ - return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; -} - -void dispc_wb_go(struct dispc_device *dispc) -{ - enum omap_plane_id plane = OMAP_DSS_WB; - bool enable, go; - - enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1; - - if (!enable) - return; - - go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; - if (go) { - DSSERR("GO bit not down for WB\n"); - return; - } - - REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6); -} - static void dispc_ovl_write_firh_reg(struct dispc_device *dispc, enum omap_plane_id plane, int reg, u32 value) @@ -1498,17 +1469,6 @@ void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc, min(high, 0xfffu)); } -void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable) -{ - if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) { - WARN_ON(enable); - return; - } - - DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); - REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14); -} - void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc, enum omap_plane_id plane, u32 *fifo_low, u32 *fifo_high, @@ -2814,95 +2774,6 @@ int dispc_ovl_setup(struct dispc_device *dispc, return r; } -int dispc_wb_setup(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in) -{ - int r; - u32 l; - enum omap_plane_id plane = OMAP_DSS_WB; - const int pos_x = 0, pos_y = 0; - const u8 zorder = 0, global_alpha = 0; - const bool replication = true; - bool truncation; - int in_width = vm->hactive; - int in_height = vm->vactive; - enum omap_overlay_caps caps = - OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA; - - if (vm->flags & DISPLAY_FLAGS_INTERLACED) - in_height /= 2; - - DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, " - "rot %d\n", wi->paddr, wi->p_uv_addr, in_width, - in_height, wi->width, wi->height, wi->fourcc, wi->rotation); - - r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr, - wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width, - wi->height, wi->fourcc, wi->rotation, zorder, - wi->pre_mult_alpha, global_alpha, wi->rotation_type, - replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601, - DRM_COLOR_YCBCR_LIMITED_RANGE); - if (r) - return r; - - switch (wi->fourcc) { - case DRM_FORMAT_RGB565: - case DRM_FORMAT_RGB888: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XRGB4444: - truncation = true; - break; - default: - truncation = false; - break; - } - - /* setup extra DISPC_WB_ATTRIBUTES */ - l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane)); - l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ - l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */ - l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ - if (mem_to_mem) - l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ - else - l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ - dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l); - - if (mem_to_mem) { - /* WBDELAYCOUNT */ - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0); - } else { - u32 wbdelay; - - if (channel_in == DSS_WB_TV_MGR) - wbdelay = vm->vsync_len + vm->vback_porch; - else - wbdelay = vm->vfront_porch + vm->vsync_len + - vm->vback_porch; - - if (vm->flags & DISPLAY_FLAGS_INTERLACED) - wbdelay /= 2; - - wbdelay = min(wbdelay, 255u); - - /* WBDELAYCOUNT */ - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0); - } - - return 0; -} - -bool dispc_has_writeback(struct dispc_device *dispc) -{ - return dispc->feat->has_writeback; -} - int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable) { @@ -3742,23 +3613,6 @@ void dispc_mgr_set_clock_div(struct dispc_device *dispc, cinfo->pck_div); } -int dispc_mgr_get_clock_div(struct dispc_device *dispc, - enum omap_channel channel, - struct dispc_clock_info *cinfo) -{ - unsigned long fck; - - fck = dispc_fclk_rate(dispc); - - cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16); - cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0); - - cinfo->lck = fck / cinfo->lck_div; - cinfo->pck = cinfo->lck / cinfo->pck_div; - - return 0; -} - u32 dispc_read_irqstatus(struct dispc_device *dispc) { return dispc_read_reg(dispc, DISPC_IRQSTATUS); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 4ff02fbc0e71..a8b231ed4f4b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -416,7 +416,6 @@ u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, enum omap_channel channel); u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, enum omap_channel channel); -u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc); u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc); @@ -458,20 +457,11 @@ int dispc_ovl_setup(struct dispc_device *dispc, int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable); -bool dispc_has_writeback(struct dispc_device *dispc); -int dispc_wb_setup(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in); -bool dispc_wb_go_busy(struct dispc_device *dispc); -void dispc_wb_go(struct dispc_device *dispc); - void dispc_enable_sidle(struct dispc_device *dispc); void dispc_disable_sidle(struct dispc_device *dispc); void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable); void dispc_pck_free_enable(struct dispc_device *dispc, bool enable); -void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable); typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck, unsigned long pck, void *data); @@ -494,9 +484,6 @@ void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc, void dispc_mgr_set_clock_div(struct dispc_device *dispc, enum omap_channel channel, const struct dispc_clock_info *cinfo); -int dispc_mgr_get_clock_div(struct dispc_device *dispc, - enum omap_channel channel, - struct dispc_clock_info *cinfo); void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 040d5a3e33d6..4c22c09c93d5 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -242,9 +242,8 @@ struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); void omapdss_device_put(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node); int omapdss_device_connect(struct dss_device *dss, - struct omap_dss_device *src, struct omap_dss_device *dst); -void omapdss_device_disconnect(struct omap_dss_device *src, +void omapdss_device_disconnect(struct dss_device *dss, struct omap_dss_device *dst); int omap_dss_get_num_overlay_managers(void); diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 1aca3060333e..fcd600024136 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -119,7 +119,7 @@ static u32 dmm_read_wa(struct dmm *dmm, u32 reg) * earlier than the DMA finished writing the value to memory. */ rmb(); - return readl(dmm->wa_dma_data); + return readl((__iomem void *)dmm->wa_dma_data); } static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg) @@ -127,7 +127,7 @@ static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg) dma_addr_t src, dst; int r; - writel(val, dmm->wa_dma_data); + writel(val, (__iomem void *)dmm->wa_dma_data); /* * As per i878 workaround, the DMA is used to access the DMM registers. * Make sure that the writel is not moved by the compiler or the CPU, so @@ -411,7 +411,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) */ /* read back to ensure the data is in RAM */ - readl(&txn->last_pat->next_pa); + readl((__iomem void *)&txn->last_pat->next_pa); /* write to PAT_DESCR to clear out any pending transaction */ dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d3eac4817d76..1796cd20a877 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -307,7 +307,7 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; - omapdss_device_disconnect(NULL, pipe->output); + omapdss_device_disconnect(priv->dss, pipe->output); omapdss_device_put(pipe->output); pipe->output = NULL; @@ -325,7 +325,7 @@ static int omap_connect_pipelines(struct drm_device *ddev) int r; for_each_dss_output(output) { - r = omapdss_device_connect(priv->dss, NULL, output); + r = omapdss_device_connect(priv->dss, output); if (r == -EPROBE_DEFER) { omapdss_device_put(output); return r; @@ -647,6 +647,7 @@ static const struct drm_driver omap_drm_driver = { .gem_prime_import = omap_gem_prime_import, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, + OMAP_FBDEV_DRIVER_OPS, .ioctls = ioctls, .num_ioctls = DRM_OMAP_NUM_IOCTLS, .fops = &omapdriver_fops, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 4c7217b35f6b..d903568fd8cc 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -32,6 +32,7 @@ #define MODULE_NAME "omapdrm" struct omap_drm_usergart; +struct omap_fbdev; struct omap_drm_pipeline { struct drm_crtc *crtc; @@ -97,6 +98,8 @@ struct omap_drm_private { /* memory bandwidth limit if it is needed on the platform */ unsigned int max_bandwidth; + + struct omap_fbdev *fbdev; }; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 523be34682ca..f4bd0c6e3f34 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -6,6 +6,7 @@ #include <linux/fb.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> @@ -13,6 +14,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_util.h> #include "omap_drv.h" @@ -26,10 +28,8 @@ module_param_named(ywrap, ywrap_enabled, bool, 0644); * fbdev funcs, to implement legacy fbdev interface on top of drm driver */ -#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base) - struct omap_fbdev { - struct drm_fb_helper base; + struct drm_device *dev; bool ywrap_enabled; /* for deferred dmm roll when getting called in atomic ctx */ @@ -41,7 +41,7 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi); static void pan_worker(struct work_struct *work) { struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); - struct drm_fb_helper *helper = &fbdev->base; + struct drm_fb_helper *helper = fbdev->dev->fb_helper; struct fb_info *fbi = helper->info; struct drm_gem_object *bo = drm_gem_fb_get_obj(helper->fb, 0); int npages; @@ -55,24 +55,25 @@ FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev, drm_fb_helper_damage_range, drm_fb_helper_damage_area) -static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, - struct fb_info *fbi) +static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { struct drm_fb_helper *helper = get_fb(fbi); - struct omap_fbdev *fbdev = to_omap_fbdev(helper); + struct omap_drm_private *priv; + struct omap_fbdev *fbdev; if (!helper) goto fallback; + priv = helper->dev->dev_private; + fbdev = priv->fbdev; + if (!fbdev->ywrap_enabled) goto fallback; - if (drm_can_sleep()) { + if (drm_can_sleep()) pan_worker(&fbdev->work); - } else { - struct omap_drm_private *priv = helper->dev->dev_private; + else queue_work(priv->wq, &fbdev->work); - } return 0; @@ -92,7 +93,6 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) struct drm_fb_helper *helper = info->par; struct drm_framebuffer *fb = helper->fb; struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0); - struct omap_fbdev *fbdev = to_omap_fbdev(helper); DBG(); @@ -104,7 +104,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) drm_client_release(&helper->client); drm_fb_helper_unprepare(helper); - kfree(fbdev); + kfree(helper); } /* @@ -125,12 +125,36 @@ static const struct fb_ops omap_fb_ops = { .fb_destroy = omap_fbdev_fb_destroy, }; -static int omap_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) +{ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) + return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + + return 0; +} + +static const struct drm_fb_helper_funcs omap_fbdev_helper_funcs = { + .fb_dirty = omap_fbdev_dirty, +}; + +static struct drm_fb_helper *get_fb(struct fb_info *fbi) +{ + if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) { + /* these are not the fb's you're looking for */ + return NULL; + } + return fbi->par; +} + +int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { - struct omap_fbdev *fbdev = to_omap_fbdev(helper); struct drm_device *dev = helper->dev; struct omap_drm_private *priv = dev->dev_private; + struct omap_fbdev *fbdev = priv->fbdev; struct drm_framebuffer *fb = NULL; union omap_gem_size gsize; struct fb_info *fbi = NULL; @@ -208,6 +232,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, DBG("fbi=%p, dev=%p", fbi, dev); + helper->funcs = &omap_fbdev_helper_funcs; helper->fb = fb; fbi->fbops = &omap_fb_ops; @@ -254,115 +279,21 @@ fail: return ret; } -static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) -{ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) - return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - - return 0; -} - -static const struct drm_fb_helper_funcs omap_fb_helper_funcs = { - .fb_probe = omap_fbdev_create, - .fb_dirty = omap_fbdev_dirty, -}; - -static struct drm_fb_helper *get_fb(struct fb_info *fbi) -{ - if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) { - /* these are not the fb's you're looking for */ - return NULL; - } - return fbi->par; -} - -/* - * struct drm_client - */ - -static void omap_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int omap_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int omap_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs omap_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = omap_fbdev_client_unregister, - .restore = omap_fbdev_client_restore, - .hotplug = omap_fbdev_client_hotplug, -}; - void omap_fbdev_setup(struct drm_device *dev) { + struct omap_drm_private *priv = dev->dev_private; struct omap_fbdev *fbdev; - struct drm_fb_helper *helper; - int ret; drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); + fbdev = drmm_kzalloc(dev, sizeof(*fbdev), GFP_KERNEL); if (!fbdev) return; - helper = &fbdev->base; - - drm_fb_helper_prepare(dev, helper, 32, &omap_fb_helper_funcs); - - ret = drm_client_init(dev, &helper->client, "fbdev", &omap_fbdev_client_funcs); - if (ret) - goto err_drm_client_init; - + fbdev->dev = dev; INIT_WORK(&fbdev->work, pan_worker); - drm_client_register(&helper->client); + priv->fbdev = fbdev; - return; - -err_drm_client_init: - drm_fb_helper_unprepare(helper); - kfree(fbdev); + drm_client_setup(dev, NULL); } diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.h b/drivers/gpu/drm/omapdrm/omap_fbdev.h index 74c691a8d45f..283e35b42ada 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.h +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.h @@ -10,10 +10,18 @@ #define __OMAPDRM_FBDEV_H__ struct drm_device; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; #ifdef CONFIG_DRM_FBDEV_EMULATION +int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +#define OMAP_FBDEV_DRIVER_OPS \ + .fbdev_probe = omap_fbdev_driver_fbdev_probe void omap_fbdev_setup(struct drm_device *dev); #else +#define OMAP_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL static inline void omap_fbdev_setup(struct drm_device *dev) { } diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index fdae677558f3..b9c67e4ca360 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1402,8 +1402,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj = to_omap_bo(obj); - mutex_lock(&omap_obj->lock); - omap_obj->sgt = sgt; if (omap_gem_sgt_is_contiguous(sgt, size)) { @@ -1418,21 +1416,17 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); if (!pages) { omap_gem_free_object(obj); - obj = ERR_PTR(-ENOMEM); - goto done; + return ERR_PTR(-ENOMEM); } omap_obj->pages = pages; ret = drm_prime_sg_to_page_array(sgt, pages, npages); if (ret) { omap_gem_free_object(obj); - obj = ERR_PTR(-ENOMEM); - goto done; + return ERR_PTR(-ENOMEM); } } -done: - mutex_unlock(&omap_obj->lock); return obj; } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index d3a9a9fafe4e..d7469c565d1d 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -378,7 +378,7 @@ config DRM_PANEL_LG_SW43408 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for LG sw43408 panel. @@ -587,7 +587,7 @@ config DRM_PANEL_RAYDIUM_RM692E5 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Raydium RM692E5-based @@ -614,6 +614,34 @@ config DRM_PANEL_RONBO_RB070D30 Say Y here if you want to enable support for Ronbo Electronics RB070D30 1024x600 DSI panel. +config DRM_PANEL_SAMSUNG_AMS581VF01 + tristate "Samsung AMS581VF01 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y or M here if you want to enable support for the + Samsung AMS581VF01 FHD Plus (2340x1080@60Hz) CMD mode panel. + +config DRM_PANEL_SAMSUNG_AMS639RQ08 + tristate "Samsung AMS639RQ08 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y or M here if you want to enable support for the + Samsung AMS639RQ08 FHD Plus (2340x1080@60Hz) CMD mode panel. + +config DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24 + tristate "Samsung AMS427AP24 panel with S6E88A0 controller" + depends on GPIOLIB && OF && REGULATOR + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Samsung AMS427AP24 panel + with S6E88A0 controller (found in Samsung Galaxy S4 Mini Value Edition + GT-I9195I). To compile this driver as a module, choose M here. + config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller" depends on OF @@ -689,6 +717,14 @@ config DRM_PANEL_SAMSUNG_S6E3HA2 depends on BACKLIGHT_CLASS_DEVICE select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_S6E3HA8 + tristate "Samsung S6E3HA8 DSI video mode panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select DRM_DISPLAY_DSC_HELPER + select VIDEOMODE_HELPERS + config DRM_PANEL_SAMSUNG_S6E63J0X03 tristate "Samsung S6E63J0X03 DSI command mode panel" depends on OF @@ -946,7 +982,7 @@ config DRM_PANEL_VISIONOX_R66451 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Visionox diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 987a08702410..7dcf72646cac 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -62,6 +62,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o @@ -70,10 +72,12 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA8) += panel-samsung-s6e3ha8.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 767e47a2b0c1..8566e9cf2f82 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1977,11 +1977,13 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('L', 'G', 'D', 0x0567, &delay_200_500_e200_d200, "Unknown"), EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"), EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"), + EDP_PANEL_ENTRY('L', 'G', 'D', 0x0778, &delay_200_500_e200_d200, "134WT1"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index 00791ea81e90..b904d5437444 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -50,55 +50,44 @@ static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel) return container_of(panel, struct kd35t133, panel); } -static int kd35t133_init_sequence(struct kd35t133 *ctx) +static void kd35t133_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - struct device *dev = ctx->dev; - /* * Init sequence was supplied by the panel vendor with minimal * documentation. */ - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA, - 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56, - 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA, - 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34, - 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL, - 0x20, 0x02); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3, - 0xa9, 0x51, 0x2c, 0x82); - mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0); - - dev_dbg(dev, "Panel init sequence done\n"); - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POSITIVEGAMMA, + 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56, + 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_NEGATIVEGAMMA, + 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34, + 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POWERCONTROL2, 0x41); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x48); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_INTERFACEMODECTRL, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_FRAMERATECTRL, 0xa0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_DISPLAYFUNCTIONCTRL, + 0x20, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_SETIMAGEFUNCTION, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_ADJUSTCONTROL3, + 0xa9, 0x51, 0x2c, 0x82); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_ENTER_INVERT_MODE); } static int kd35t133_unprepare(struct drm_panel *panel) { struct kd35t133 *ctx = panel_to_kd35t133(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) - dev_err(ctx->dev, "failed to set display off: %d\n", ret); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; gpiod_set_value_cansleep(ctx->reset_gpio, 1); @@ -112,18 +101,20 @@ static int kd35t133_prepare(struct drm_panel *panel) { struct kd35t133 *ctx = panel_to_kd35t133(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dev_dbg(ctx->dev, "Resetting the panel\n"); - ret = regulator_enable(ctx->vdd); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret); - return ret; + dsi_ctx.accum_err = regulator_enable(ctx->vdd); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", + dsi_ctx.accum_err); + return dsi_ctx.accum_err; } - ret = regulator_enable(ctx->iovcc); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); + dsi_ctx.accum_err = regulator_enable(ctx->iovcc); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", + dsi_ctx.accum_err); goto disable_vdd; } @@ -135,27 +126,18 @@ static int kd35t133_prepare(struct drm_panel *panel) msleep(20); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); - goto disable_iovcc; - } + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 250); - msleep(250); + kd35t133_init_sequence(&dsi_ctx); + if (!dsi_ctx.accum_err) + dev_dbg(ctx->dev, "Panel init sequence done\n"); - ret = kd35t133_init_sequence(ctx); - if (ret < 0) { - dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); - goto disable_iovcc; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to set display on: %d\n", ret); + if (dsi_ctx.accum_err) goto disable_iovcc; - } - - msleep(50); return 0; @@ -163,7 +145,7 @@ disable_iovcc: regulator_disable(ctx->iovcc); disable_vdd: regulator_disable(ctx->vdd); - return ret; + return dsi_ctx.accum_err; } static const struct drm_display_mode default_mode = { diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c index 466c27012abf..47bce087e339 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c +++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c @@ -56,198 +56,173 @@ static void hx83112a_reset(struct hx83112a_panel *ctx) msleep(50); } -static int hx83112a_on(struct hx83112a_panel *ctx) +static int hx83112a_on(struct mipi_dsi_device *dsi) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETEXTC, 0x83, 0x11, 0x2a); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER1, - 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDISP, - 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19, - 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV, - 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0, - 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff, - 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07, - 0x12, 0x00, 0x29); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV, - 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00, - 0x53); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, - 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, - 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, - 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, - 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, - 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, - 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, - 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, - 0x40); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, - 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, - 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, - 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, - 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, - 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, - 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, - 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, - 0x40); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, - 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, - 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, - 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, - 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, - 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, - 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, - 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, - 0x40); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTCON, - 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPANEL, 0x08); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER2, 0x2b, 0x2b); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08, - 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07, - 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10, - 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31, - 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00, - 0x0f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0, - 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP1, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18, - 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, - 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03, - 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31, - 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP2, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, - 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, - 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00, - 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31, - 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, - 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e, - 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa, - 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff, - 0xff, 0xff, 0xff, 0xff); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, - 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50, - 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05, - 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, - 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, - 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc3); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETCLOCK, 0xd1, 0xd6); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc6); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPTBA, 0x37); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f); - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(150); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(50); - - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETEXTC, 0x83, 0x11, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPOWER1, + 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDISP, + 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19, + 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDRV, + 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0, + 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff, + 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07, + 0x12, 0x00, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDRV, + 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00, + 0x53); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTCON, + 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPANEL, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPOWER2, 0x2b, 0x2b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08, + 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07, + 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10, + 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00, + 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP0, + 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP1, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, + 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03, + 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP2, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, + 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00, + 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, + 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e, + 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff, + 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1, + 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50, + 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05, + 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1, + 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1, + 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETCLOCK, 0xd1, 0xd6); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0x3f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0xc6); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPTBA, 0x37); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0x3f); + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 150); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); + + return dsi_ctx.accum_err; } static int hx83112a_disable(struct drm_panel *panel) { struct hx83112a_panel *ctx = to_hx83112a_panel(panel); struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(20); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); - return 0; + return dsi_ctx.accum_err; } static int hx83112a_prepare(struct drm_panel *panel) { struct hx83112a_panel *ctx = to_hx83112a_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators: %d\n", ret); + if (ret < 0) return ret; - } hx83112a_reset(ctx); - ret = hx83112a_on(ctx); + ret = hx83112a_on(ctx->dsi); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - return ret; } - return 0; + return ret; } static int hx83112a_unprepare(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 4a6dcfd781e8..94b7dfef3b5e 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -318,7 +318,7 @@ static int ili9322_regmap_spi_read(void *context, const void *reg, return spi_write_then_read(spi, buf, 1, val, 1); } -static struct regmap_bus ili9322_regmap_bus = { +static const struct regmap_bus ili9322_regmap_bus = { .write = ili9322_regmap_spi_write, .read = ili9322_regmap_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 1fbc5d433d75..ff39f5dd4097 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -13,9 +13,6 @@ * Derived from drivers/drm/gpu/panel/panel-ilitek-ili9322.c * the reuse of DBI abstraction part referred from Linus's patch * "drm/panel: s6e63m0: Switch to DBI abstraction for SPI" - * - * For only-dbi part, copy from David's code (drm/tiny/ili9341.c) - * Copyright 2018 David Lechner <david@lechnology.com> */ #include <linux/backlight.h> @@ -486,176 +483,6 @@ static const struct drm_panel_funcs ili9341_dpi_funcs = { .get_modes = ili9341_dpi_get_modes, }; -static void ili9341_dbi_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) -{ - struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); - struct mipi_dbi *dbi = &dbidev->dbi; - u8 addr_mode; - int ret, idx; - - if (!drm_dev_enter(pipe->crtc.dev, &idx)) - return; - - ret = mipi_dbi_poweron_conditional_reset(dbidev); - if (ret < 0) - goto out_exit; - if (ret == 1) - goto out_enable; - - mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); - - mipi_dbi_command(dbi, ILI9341_POWERB, 0x00, 0xc1, 0x30); - mipi_dbi_command(dbi, ILI9341_POWER_SEQ, 0x64, 0x03, 0x12, 0x81); - mipi_dbi_command(dbi, ILI9341_DTCA, 0x85, 0x00, 0x78); - mipi_dbi_command(dbi, ILI9341_POWERA, 0x39, 0x2c, 0x00, 0x34, 0x02); - mipi_dbi_command(dbi, ILI9341_PRC, ILI9341_DBI_PRC_NORMAL); - mipi_dbi_command(dbi, ILI9341_DTCB, 0x00, 0x00); - - /* Power Control */ - mipi_dbi_command(dbi, ILI9341_POWER1, ILI9341_DBI_VCOMH_4P6V); - mipi_dbi_command(dbi, ILI9341_POWER2, ILI9341_DBI_PWR_2_DEFAULT); - /* VCOM */ - mipi_dbi_command(dbi, ILI9341_VCOM1, ILI9341_DBI_VCOM_1_VMH_4P25V, - ILI9341_DBI_VCOM_1_VML_1P5V); - mipi_dbi_command(dbi, ILI9341_VCOM2, ILI9341_DBI_VCOM_2_DEC_58); - - /* Memory Access Control */ - mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, - MIPI_DCS_PIXEL_FMT_16BIT); - - /* Frame Rate */ - mipi_dbi_command(dbi, ILI9341_FRC, ILI9341_DBI_FRC_DIVA & 0x03, - ILI9341_DBI_FRC_RTNA & 0x1f); - - /* Gamma */ - mipi_dbi_command(dbi, ILI9341_3GAMMA_EN, 0x00); - mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, ILI9341_GAMMA_CURVE_1); - mipi_dbi_command(dbi, ILI9341_PGAMMA, - 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1, - 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00); - mipi_dbi_command(dbi, ILI9341_NGAMMA, - 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1, - 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f); - - /* DDRAM */ - mipi_dbi_command(dbi, ILI9341_ETMOD, ILI9341_DBI_EMS_GAS | - ILI9341_DBI_EMS_DTS | - ILI9341_DBI_EMS_GON); - - /* Display */ - mipi_dbi_command(dbi, ILI9341_DFC, 0x08, 0x82, 0x27, 0x00); - mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); - msleep(100); - - mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); - msleep(100); - -out_enable: - switch (dbidev->rotation) { - default: - addr_mode = ILI9341_MADCTL_MX; - break; - case 90: - addr_mode = ILI9341_MADCTL_MV; - break; - case 180: - addr_mode = ILI9341_MADCTL_MY; - break; - case 270: - addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | - ILI9341_MADCTL_MX; - break; - } - - addr_mode |= ILI9341_MADCTL_BGR; - mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); - mipi_dbi_enable_flush(dbidev, crtc_state, plane_state); - drm_info(&dbidev->drm, "Initialized display serial interface\n"); -out_exit: - drm_dev_exit(idx); -} - -static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = { - DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable), -}; - -static const struct drm_display_mode ili9341_dbi_mode = { - DRM_SIMPLE_MODE(240, 320, 37, 49), -}; - -DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops); - -static struct drm_driver ili9341_dbi_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .fops = &ili9341_dbi_fops, - DRM_GEM_DMA_DRIVER_OPS_VMAP, - .debugfs_init = mipi_dbi_debugfs_init, - .name = "ili9341", - .desc = "Ilitek ILI9341", - .date = "20210716", - .major = 1, - .minor = 0, -}; - -static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc, - struct gpio_desc *reset) -{ - struct device *dev = &spi->dev; - struct mipi_dbi_dev *dbidev; - struct mipi_dbi *dbi; - struct drm_device *drm; - struct regulator *vcc; - u32 rotation = 0; - int ret; - - vcc = devm_regulator_get_optional(dev, "vcc"); - if (IS_ERR(vcc)) { - dev_err(dev, "get optional vcc failed\n"); - vcc = NULL; - } - - dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver, - struct mipi_dbi_dev, drm); - if (IS_ERR(dbidev)) - return PTR_ERR(dbidev); - - dbi = &dbidev->dbi; - drm = &dbidev->drm; - dbi->reset = reset; - dbidev->regulator = vcc; - - drm_mode_config_init(drm); - - dbidev->backlight = devm_of_find_backlight(dev); - if (IS_ERR(dbidev->backlight)) - return PTR_ERR(dbidev->backlight); - - device_property_read_u32(dev, "rotation", &rotation); - - ret = mipi_dbi_spi_init(spi, dbi, dc); - if (ret) - return ret; - - ret = mipi_dbi_dev_init(dbidev, &ili9341_dbi_funcs, - &ili9341_dbi_mode, rotation); - if (ret) - return ret; - - drm_mode_config_reset(drm); - - ret = drm_dev_register(drm, 0); - if (ret) - return ret; - - spi_set_drvdata(spi, drm); - - drm_fbdev_dma_setup(drm, 0); - - return 0; -} - static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc, struct gpio_desc *reset) { @@ -711,7 +538,6 @@ static int ili9341_probe(struct spi_device *spi) struct device *dev = &spi->dev; struct gpio_desc *dc; struct gpio_desc *reset; - const struct spi_device_id *id = spi_get_device_id(spi); reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset)) @@ -721,36 +547,15 @@ static int ili9341_probe(struct spi_device *spi) if (IS_ERR(dc)) return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n"); - if (!strcmp(id->name, "sf-tc240t-9370-t")) - return ili9341_dpi_probe(spi, dc, reset); - - if (!strcmp(id->name, "yx240qv29")) - return ili9341_dbi_probe(spi, dc, reset); - - return -ENODEV; + return ili9341_dpi_probe(spi, dc, reset); } static void ili9341_remove(struct spi_device *spi) { - const struct spi_device_id *id = spi_get_device_id(spi); struct ili9341 *ili = spi_get_drvdata(spi); - struct drm_device *drm = spi_get_drvdata(spi); - - if (!strcmp(id->name, "sf-tc240t-9370-t")) { - ili9341_dpi_power_off(ili); - drm_panel_remove(&ili->panel); - } else if (!strcmp(id->name, "yx240qv29")) { - drm_dev_unplug(drm); - drm_atomic_helper_shutdown(drm); - } -} -static void ili9341_shutdown(struct spi_device *spi) -{ - const struct spi_device_id *id = spi_get_device_id(spi); - - if (!strcmp(id->name, "yx240qv29")) - drm_atomic_helper_shutdown(spi_get_drvdata(spi)); + ili9341_dpi_power_off(ili); + drm_panel_remove(&ili->panel); } static const struct of_device_id ili9341_of_match[] = { @@ -758,19 +563,11 @@ static const struct of_device_id ili9341_of_match[] = { .compatible = "st,sf-tc240t-9370-t", .data = &ili9341_stm32f429_disco_data, }, - { - /* porting from tiny/ili9341.c - * for original mipi dbi compitable - */ - .compatible = "adafruit,yx240qv29", - .data = NULL, - }, { } }; MODULE_DEVICE_TABLE(of, ili9341_of_match); static const struct spi_device_id ili9341_id[] = { - { "yx240qv29", 0 }, { "sf-tc240t-9370-t", 0 }, { } }; @@ -779,7 +576,6 @@ MODULE_DEVICE_TABLE(spi, ili9341_id); static struct spi_driver ili9341_driver = { .probe = ili9341_probe, .remove = ili9341_remove, - .shutdown = ili9341_shutdown, .id_table = ili9341_id, .driver = { .name = "panel-ilitek-ili9341", diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 084c37fa7348..28cd7560e5db 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -42,6 +42,7 @@ struct ili9881c_desc { const size_t init_length; const struct drm_display_mode *mode; const unsigned long mode_flags; + u8 default_address_mode; }; struct ili9881c { @@ -53,6 +54,7 @@ struct ili9881c { struct gpio_desc *reset; enum drm_panel_orientation orientation; + u8 address_mode; }; #define ILI9881C_SWITCH_PAGE_INSTR(_page) \ @@ -815,8 +817,6 @@ static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_COMMAND_INSTR(0xd1, 0x4b), ILI9881C_COMMAND_INSTR(0xd2, 0x60), ILI9881C_COMMAND_INSTR(0xd3, 0x39), - ILI9881C_SWITCH_PAGE_INSTR(0), - ILI9881C_COMMAND_INSTR(0x36, 0x03), }; static const struct ili9881c_instr w552946ab_init[] = { @@ -1299,6 +1299,14 @@ static int ili9881c_prepare(struct drm_panel *panel) if (ret) return ret; + if (ctx->address_mode) { + ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_ADDRESS_MODE, + &ctx->address_mode, + sizeof(ctx->address_mode)); + if (ret < 0) + return ret; + } + ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret) return ret; @@ -1463,6 +1471,10 @@ static int ili9881c_get_modes(struct drm_panel *panel, connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; + if (ctx->address_mode == 0x3) + connector->display_info.subpixel_order = SubPixelHorizontalBGR; + else + connector->display_info.subpixel_order = SubPixelHorizontalRGB; /* * TODO: Remove once all drm drivers call @@ -1521,6 +1533,12 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) return ret; } + ctx->address_mode = ctx->desc->default_address_mode; + if (ctx->orientation == DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP) { + ctx->address_mode ^= 0x03; + ctx->orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; + } + ctx->panel.prepare_prev_first = true; ret = drm_panel_of_backlight(&ctx->panel); @@ -1572,6 +1590,7 @@ static const struct ili9881c_desc tl050hdv35_desc = { .mode = &tl050hdv35_default_mode, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_LPM, + .default_address_mode = 0x03, }; static const struct ili9881c_desc w552946aba_desc = { diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 44897e5218a6..45d09e6fa667 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -26,7 +26,6 @@ struct jadard_panel_desc { unsigned int lanes; enum mipi_dsi_pixel_format format; int (*init)(struct jadard *jadard); - u32 num_init_cmds; bool lp11_before_reset; bool reset_before_power_off_vcioo; unsigned int vcioo_to_lp11_delay_ms; diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c index 14932cb3defc..0e5e8e57bd1e 100644 --- a/drivers/gpu/drm/panel/panel-khadas-ts050.c +++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c @@ -617,12 +617,12 @@ static const struct khadas_ts050_panel_cmd ts050_init_code[] = { {0xd4, {0x04}, 0x01}, /* RGBMIPICTRL: VSYNC front porch = 4 */ }; -struct khadas_ts050_panel_data ts050_panel_data = { +static struct khadas_ts050_panel_data ts050_panel_data = { .init_code = (struct khadas_ts050_panel_cmd *)ts050_init_code, .len = ARRAY_SIZE(ts050_init_code) }; -struct khadas_ts050_panel_data ts050v2_panel_data = { +static struct khadas_ts050_panel_data ts050v2_panel_data = { .init_code = (struct khadas_ts050_panel_cmd *)ts050v2_init_code, .len = ARRAY_SIZE(ts050v2_init_code) }; diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c index 292aa26a456d..77f74e6c467e 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -26,7 +26,7 @@ struct ltk050h3146w; struct ltk050h3146w_desc { const unsigned long mode_flags; const struct drm_display_mode *mode; - int (*init)(struct ltk050h3146w *ctx); + void (*init)(struct mipi_dsi_multi_context *dsi_ctx); }; struct ltk050h3146w { @@ -243,67 +243,57 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) return container_of(panel, struct ltk050h3146w, panel); } -static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3148w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0xff, 0x83, 0x94); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, - 0x71, 0x31, 0x55, 0x2f); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, - 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, - 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); - mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, - 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, - 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, - 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, - 0x17, 0x11, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, - 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, - 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); - mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, - 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, - 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, - 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, - 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, - 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, - 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, - 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, - 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, - 0x5d, 0x61, 0x65, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x0b); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x1f, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0xc4, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef); - mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02); - - ret = mipi_dsi_dcs_set_tear_on(dsi, 1); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, + 0x71, 0x31, 0x55, 0x2f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x88); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, + 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, + 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, + 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, + 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, + 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, + 0x17, 0x11, 0x40); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, + 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, + 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, + 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, + 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, + 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, + 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, + 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, + 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, + 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, + 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, + 0x5d, 0x61, 0x65, 0x7f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcc, 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x1f, 0x31); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0xc4, 0xc4); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc6, 0xef); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x02); + + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1); + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3148w_mode = { @@ -327,74 +317,64 @@ static const struct ltk050h3146w_desc ltk050h3148w_data = { MIPI_DSI_MODE_VIDEO_BURST, }; -static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3146w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, - 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); - - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, - 0x28, 0x04, 0xcc, 0xcc, 0xcc); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12); - mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, - 0x80); - mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, - 0x16, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, - 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, - 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, - 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, - 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, - 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, - 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, - 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, - 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, - 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, - 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, - 0x21, 0x00, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); - mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00); - - ret = mipi_dsi_dcs_set_tear_on(dsi, 1); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdf, 0x93, 0x65, 0xf8); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, + 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0xb5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x00, 0xb5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); + + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0x00, 0xc4, 0x23, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, + 0x28, 0x04, 0xcc, 0xcc, 0xcc); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x0f, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbe, 0x1e, 0xf2); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x26, 0x03); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x00, 0x12); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, + 0x80); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, + 0x16, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, + 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, + 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, + 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, + 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, + 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, + 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, + 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, + 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, + 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, + 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, + 0x21, 0x00, 0x60); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdd, 0x2c, 0xa3, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x32, 0x1c); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x3b, 0x70, 0x00, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x11); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc2, 0x20, 0x38, 0x1e, 0x84); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x00); + + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1); + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3146w_mode = { @@ -418,79 +398,42 @@ static const struct ltk050h3146w_desc ltk050h3146w_data = { MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; -static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) +static void ltk050h3146w_a2_select_page(struct mipi_dsi_multi_context *dsi_ctx, int page) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - u8 d[3] = { 0x98, 0x81, page }; + u8 d[4] = { 0xff, 0x98, 0x81, page }; - return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d)); + mipi_dsi_dcs_write_buffer_multi(dsi_ctx, d, ARRAY_SIZE(d)); } -static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page, +static void ltk050h3146w_a2_write_page(struct mipi_dsi_multi_context *dsi_ctx, int page, const struct ltk050h3146w_cmd *cmds, int num) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int i, ret; + ltk050h3146w_a2_select_page(dsi_ctx, page); - ret = ltk050h3146w_a2_select_page(ctx, page); - if (ret < 0) { - dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret); - return ret; - } - - for (i = 0; i < num; i++) { - ret = mipi_dsi_generic_write(dsi, &cmds[i], + for (int i = 0; i < num; i++) + mipi_dsi_generic_write_multi(dsi_ctx, &cmds[i], sizeof(struct ltk050h3146w_cmd)); - if (ret < 0) { - dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret); - return ret; - } - } - - return 0; } -static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3146w_a2_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 3, page3_cmds, ARRAY_SIZE(page3_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 4, page4_cmds, ARRAY_SIZE(page4_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 1, page1_cmds, ARRAY_SIZE(page1_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_select_page(ctx, 0); - if (ret < 0) { - dev_err(ctx->dev, "failed to select page 0: %d\n", ret); - return ret; - } + ltk050h3146w_a2_select_page(dsi_ctx, 0); /* vendor code called this without param, where there should be one */ - ret = mipi_dsi_dcs_set_tear_on(dsi, 0); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 0); - return 0; + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3146w_a2_mode = { @@ -518,19 +461,12 @@ static int ltk050h3146w_unprepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to set display off: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; regulator_disable(ctx->iovcc); regulator_disable(ctx->vci); @@ -542,17 +478,17 @@ static int ltk050h3146w_prepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dev_dbg(ctx->dev, "Resetting the panel\n"); - ret = regulator_enable(ctx->vci); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret); - return ret; + dsi_ctx.accum_err = regulator_enable(ctx->vci); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable vci supply: %d\n", dsi_ctx.accum_err); + return dsi_ctx.accum_err; } - ret = regulator_enable(ctx->iovcc); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); + dsi_ctx.accum_err = regulator_enable(ctx->iovcc); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", dsi_ctx.accum_err); goto disable_vci; } @@ -561,28 +497,15 @@ static int ltk050h3146w_prepare(struct drm_panel *panel) gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(20); - ret = ctx->panel_desc->init(ctx); - if (ret < 0) { - dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); - goto disable_iovcc; - } - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); - goto disable_iovcc; - } - + ctx->panel_desc->init(&dsi_ctx); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); /* T9: 120ms */ - msleep(120); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to set display on: %d\n", ret); + if (dsi_ctx.accum_err) goto disable_iovcc; - } - - msleep(50); return 0; @@ -590,7 +513,7 @@ disable_iovcc: regulator_disable(ctx->iovcc); disable_vci: regulator_disable(ctx->vci); - return ret; + return dsi_ctx.accum_err; } static int ltk050h3146w_get_modes(struct drm_panel *panel, diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c index d3baccfe6286..06e16a7c14a7 100644 --- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c +++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c @@ -917,7 +917,7 @@ static const struct nv3052c_panel_info wl_355608_a8_panel_info = { static const struct spi_device_id nv3052c_ids[] = { { "ltk035c5444t", }, { "fs035vg158", }, - { "wl-355608-a8", }, + { "rg35xx-plus-panel", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, nv3052c_ids); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index 57686340de49..549b86f2cc28 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -38,6 +38,7 @@ #define NT35510_CMD_CORRECT_GAMMA BIT(0) #define NT35510_CMD_CONTROL_DISPLAY BIT(1) +#define NT35510_CMD_SETVCMOFF BIT(2) #define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */ #define MCS_CMD_READ_ID1 0xDA @@ -721,11 +722,13 @@ static int nt35510_setup_power(struct nt35510 *nt) if (ret) return ret; - ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF, - NT35510_P1_VCMOFF_LEN, - nt->conf->vcmoff); - if (ret) - return ret; + if (nt->conf->cmds & NT35510_CMD_SETVCMOFF) { + ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF, + NT35510_P1_VCMOFF_LEN, + nt->conf->vcmoff); + if (ret) + return ret; + } /* Typically 10 ms */ usleep_range(10000, 20000); @@ -1319,7 +1322,7 @@ static const struct nt35510_config nt35510_frida_frd400b25025 = { }, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM, - .cmds = NT35510_CMD_CONTROL_DISPLAY, + .cmds = NT35510_CMD_CONTROL_DISPLAY | NT35510_CMD_SETVCMOFF, /* 0x03: AVDD = 6.2V */ .avdd = { 0x03, 0x03, 0x03 }, /* 0x46: PCK = 2 x Hsync, BTP = 2.5 x VDDB */ diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c index 18bd2ee71201..04f1d2676c78 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c @@ -1095,18 +1095,6 @@ static int nt36523_unprepare(struct drm_panel *panel) static void nt36523_remove(struct mipi_dsi_device *dsi) { struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi); - int ret; - - ret = mipi_dsi_detach(pinfo->dsi[0]); - if (ret < 0) - dev_err(&dsi->dev, "failed to detach from DSI0 host: %d\n", ret); - - if (pinfo->desc->is_dual_dsi) { - ret = mipi_dsi_detach(pinfo->dsi[1]); - if (ret < 0) - dev_err(&pinfo->dsi[1]->dev, "failed to detach from DSI1 host: %d\n", ret); - mipi_dsi_device_unregister(pinfo->dsi[1]); - } drm_panel_remove(&pinfo->panel); } @@ -1251,7 +1239,7 @@ static int nt36523_probe(struct mipi_dsi_device *dsi) if (!dsi1_host) return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n"); - pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info); + pinfo->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi1_host, info); if (IS_ERR(pinfo->dsi[1])) { dev_err(dev, "cannot get secondary DSI device\n"); return PTR_ERR(pinfo->dsi[1]); @@ -1288,7 +1276,7 @@ static int nt36523_probe(struct mipi_dsi_device *dsi) pinfo->dsi[i]->format = pinfo->desc->format; pinfo->dsi[i]->mode_flags = pinfo->desc->mode_flags; - ret = mipi_dsi_attach(pinfo->dsi[i]); + ret = devm_mipi_dsi_attach(dev, pinfo->dsi[i]); if (ret < 0) return dev_err_probe(dev, ret, "cannot attach to DSI%d host.\n", i); } diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c index 4dca6802faef..d3071c01aaea 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c @@ -46,108 +46,73 @@ static void rm69380_reset(struct rm69380_panel *ctx) static int rm69380_on(struct rm69380_panel *ctx) { struct mipi_dsi_device *dsi = ctx->dsi[0]; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; if (ctx->dsi[1]) ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd4); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd0); - mipi_dsi_dcs_write_seq(dsi, 0x48, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x26); - mipi_dsi_dcs_write_seq(dsi, 0x75, 0x3f); - mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28); - mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x08); - - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(20); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(36); - - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x26); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x3f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08); + + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 36); + + return dsi_ctx.accum_err; } -static int rm69380_off(struct rm69380_panel *ctx) +static void rm69380_off(struct rm69380_panel *ctx) { struct mipi_dsi_device *dsi = ctx->dsi[0]; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; if (ctx->dsi[1]) ctx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(35); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(20); - - return 0; + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 35); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); } static int rm69380_prepare(struct drm_panel *panel) { struct rm69380_panel *ctx = to_rm69380_panel(panel); - struct device *dev = &ctx->dsi[0]->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators: %d\n", ret); + if (ret < 0) return ret; - } rm69380_reset(ctx); ret = rm69380_on(ctx); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - return ret; } - return 0; + return ret; } static int rm69380_unprepare(struct drm_panel *panel) { struct rm69380_panel *ctx = to_rm69380_panel(panel); - struct device *dev = &ctx->dsi[0]->dev; - int ret; - ret = rm69380_off(ctx); - if (ret < 0) - dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + rm69380_off(ctx); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c new file mode 100644 index 000000000000..cf6186312252 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer Command Set */ +#define MCS_ACCESS_PROT_OFF 0xb0 +#define MCS_PASSWD 0xf0 + +struct ams581vf01 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data ams581vf01_supplies[] = { + { .supply = "vdd3p3" }, + { .supply = "vddio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct ams581vf01 *to_ams581vf01(struct drm_panel *panel) +{ + return container_of(panel, struct ams581vf01, panel); +} + +static void ams581vf01_reset(struct ams581vf01 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int ams581vf01_on(struct ams581vf01 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Sleep Out, Wait 10ms */ + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + usleep_range(10000, 11000); + + /* TE On */ + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + /* MIC Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xeb, 0x17, + 0x41, 0x92, + 0x0e, 0x10, + 0x82, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + + /* Column & Page Address Setting */ + mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437); + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923); + + /* Brightness Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + + /* Horizontal & Vertical sync Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x09); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x11, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + mipi_dsi_msleep(&dsi_ctx, 110); + + /* Display On */ + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static void ams581vf01_off(struct ams581vf01 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Display Off & Sleep In */ + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + /* VCI operating mode change */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + + mipi_dsi_msleep(&dsi_ctx, 120); +} + +static int ams581vf01_prepare(struct drm_panel *panel) +{ + struct ams581vf01 *ctx = to_ams581vf01(panel); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + if (ret < 0) + return ret; + + ams581vf01_reset(ctx); + + ret = ams581vf01_on(ctx); + if (ret < 0) { + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int ams581vf01_unprepare(struct drm_panel *panel) +{ + struct ams581vf01 *ctx = to_ams581vf01(panel); + + ams581vf01_off(ctx); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams581vf01_mode = { + .clock = (1080 + 32 + 73 + 98) * (2340 + 8 + 1 + 8) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 32, + .hsync_end = 1080 + 32 + 73, + .htotal = 1080 + 32 + 73 + 98, + .vdisplay = 2340, + .vsync_start = 2340 + 8, + .vsync_end = 2340 + 8 + 1, + .vtotal = 2340 + 8 + 1 + 8, + .width_mm = 62, + .height_mm = 134, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int ams581vf01_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &ams581vf01_mode); +} + +static const struct drm_panel_funcs ams581vf01_panel_funcs = { + .prepare = ams581vf01_prepare, + .unprepare = ams581vf01_unprepare, + .get_modes = ams581vf01_get_modes, +}; + +static int ams581vf01_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops ams581vf01_bl_ops = { + .update_status = ams581vf01_bl_update_status, +}; + +static struct backlight_device * +ams581vf01_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 511, + .max_brightness = 1023, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &ams581vf01_bl_ops, &props); +} + +static int ams581vf01_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct ams581vf01 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(&dsi->dev, + ARRAY_SIZE(ams581vf01_supplies), + ams581vf01_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = ams581vf01_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void ams581vf01_remove(struct mipi_dsi_device *dsi) +{ + struct ams581vf01 *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id ams581vf01_of_match[] = { + { .compatible = "samsung,ams581vf01" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ams581vf01_of_match); + +static struct mipi_dsi_driver ams581vf01_driver = { + .probe = ams581vf01_probe, + .remove = ams581vf01_remove, + .driver = { + .name = "panel-samsung-ams581vf01", + .of_match_table = ams581vf01_of_match, + }, +}; +module_mipi_dsi_driver(ams581vf01_driver); + +MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>"); +MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS581VF01 cmd mode dsi panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c new file mode 100644 index 000000000000..817365cb5e46 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer Command Set */ +#define MCS_ACCESS_PROT_OFF 0xb0 +#define MCS_UNKNOWN_B7 0xb7 +#define MCS_BIAS_CURRENT_CTRL 0xd1 +#define MCS_PASSWD1 0xf0 +#define MCS_PASSWD2 0xfc +#define MCS_UNKNOWN_FF 0xff + +struct ams639rq08 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data ams639rq08_supplies[] = { + { .supply = "vdd3p3" }, + { .supply = "vddio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct ams639rq08 *to_ams639rq08(struct drm_panel *panel) +{ + return container_of(panel, struct ams639rq08, panel); +} + +static void ams639rq08_reset(struct ams639rq08 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int ams639rq08_on(struct ams639rq08 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Delay 2ms for VCI1 power */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_FF, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_BIAS_CURRENT_CTRL, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5); + + /* Sleep Out */ + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + usleep_range(10000, 11000); + + /* TE OUT (Vsync On) */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + /* DBV Smooth Transition */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_B7, 0x01, 0x4b); + + /* Edge Dimming Speed Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_B7, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + + /* Page Address Set */ + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a); + + /* Set DDIC internal HFP */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x23); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_BIAS_CURRENT_CTRL, 0x11); + + /* OFC Setting 84.1 Mhz */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe9, 0x11, 0x55, + 0xa6, 0x75, 0xa3, + 0xb9, 0xa1, 0x4a, + 0x00, 0x1a, 0xb8); + + /* Err_FG Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, + 0x00, 0x00, 0x02, + 0x02, 0x42, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, + 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x19); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + + /* Brightness Control */ + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000); + + /* Display On */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_msleep(&dsi_ctx, 67); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static void ams639rq08_off(struct ams639rq08 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); +} + +static int ams639rq08_prepare(struct drm_panel *panel) +{ + struct ams639rq08 *ctx = to_ams639rq08(panel); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ams639rq08_supplies), + ctx->supplies); + if (ret < 0) + return ret; + + ams639rq08_reset(ctx); + + ret = ams639rq08_on(ctx); + if (ret < 0) { + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams639rq08_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int ams639rq08_unprepare(struct drm_panel *panel) +{ + struct ams639rq08 *ctx = to_ams639rq08(panel); + + ams639rq08_off(ctx); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams639rq08_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams639rq08_mode = { + .clock = (1080 + 64 + 20 + 64) * (2340 + 64 + 20 + 64) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 64, + .hsync_end = 1080 + 64 + 20, + .htotal = 1080 + 64 + 20 + 64, + .vdisplay = 2340, + .vsync_start = 2340 + 64, + .vsync_end = 2340 + 64 + 20, + .vtotal = 2340 + 64 + 20 + 64, + .width_mm = 68, + .height_mm = 147, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int ams639rq08_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &ams639rq08_mode); +} + +static const struct drm_panel_funcs ams639rq08_panel_funcs = { + .prepare = ams639rq08_prepare, + .unprepare = ams639rq08_unprepare, + .get_modes = ams639rq08_get_modes, +}; + +static int ams639rq08_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int ams639rq08_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return brightness; +} + +static const struct backlight_ops ams639rq08_bl_ops = { + .update_status = ams639rq08_bl_update_status, + .get_brightness = ams639rq08_bl_get_brightness, +}; + +static struct backlight_device * +ams639rq08_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 1023, + .max_brightness = 2047, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &ams639rq08_bl_ops, &props); +} + +static int ams639rq08_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct ams639rq08 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(&dsi->dev, + ARRAY_SIZE(ams639rq08_supplies), + ams639rq08_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = ams639rq08_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void ams639rq08_remove(struct mipi_dsi_device *dsi) +{ + struct ams639rq08 *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id ams639rq08_of_match[] = { + { .compatible = "samsung,ams639rq08" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ams639rq08_of_match); + +static struct mipi_dsi_driver ams639rq08_driver = { + .probe = ams639rq08_probe, + .remove = ams639rq08_remove, + .driver = { + .name = "panel-samsung-ams639rq08", + .of_match_table = ams639rq08_of_match, + }, +}; +module_mipi_dsi_driver(ams639rq08_driver); + +MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>"); +MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS639RQ08 cmd mode dsi panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c index 10bc8fb5f1f9..27a059b55ae5 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c @@ -38,57 +38,38 @@ static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx) usleep_range(10000, 11000); } -static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx) +static int s6e3fa7_panel_on(struct mipi_dsi_device *dsi) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, + 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0, + 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); - mipi_dsi_dcs_write_seq(dsi, 0xf4, - 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0, - 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - - return 0; + return dsi_ctx.accum_err; } static int s6e3fa7_panel_prepare(struct drm_panel *panel) { struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; s6e3fa7_panel_reset(ctx); - ret = s6e3fa7_panel_on(ctx); - if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); + ret = s6e3fa7_panel_on(ctx->dsi); + if (ret < 0) gpiod_set_value_cansleep(ctx->reset_gpio, 1); - return ret; - } - return 0; + return ret; } static int s6e3fa7_panel_unprepare(struct drm_panel *panel) @@ -104,23 +85,13 @@ static int s6e3fa7_panel_disable(struct drm_panel *panel) { struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel); struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; - - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); - return 0; + return dsi_ctx.accum_err; } static const struct drm_display_mode s6e3fa7_panel_mode = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c new file mode 100644 index 000000000000..64c6f7d45bed --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: +// Copyright (c) 2013, The Linux Foundation. All rights reserved. +// Copyright (c) 2024 Dzmitry Sankouski <dsankouski@gmail.com> + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/display/drm_dsc.h> +#include <drm/display/drm_dsc_helper.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_panel.h> + +struct s6e3ha8 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct drm_dsc_config dsc; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data s6e3ha8_supplies[] = { + { .supply = "vdd3" }, + { .supply = "vci" }, + { .supply = "vddr" }, +}; + +static inline +struct s6e3ha8 *to_s6e3ha8_amb577px01_wqhd(struct drm_panel *panel) +{ + return container_of(panel, struct s6e3ha8, panel); +} + +#define s6e3ha8_test_key_on_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a) +#define s6e3ha8_test_key_off_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5) +#define s6e3ha8_test_key_on_lvl3(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0x5a, 0x5a) +#define s6e3ha8_test_key_off_lvl3(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0xa5, 0xa5) +#define s6e3ha8_test_key_on_lvl1(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0xa5, 0xa5) +#define s6e3ha8_test_key_off_lvl1(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0x5a, 0x5a) +#define s6e3ha8_afc_off(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xe2, 0x00, 0x00) + +static void s6e3ha8_amb577px01_wqhd_reset(struct s6e3ha8 *priv) +{ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(priv->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(5000, 6000); +} + +static int s6e3ha8_amb577px01_wqhd_on(struct s6e3ha8 *priv) +{ + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + s6e3ha8_test_key_on_lvl1(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_compression_mode_multi(&ctx, true); + s6e3ha8_test_key_off_lvl2(&ctx); + + mipi_dsi_dcs_exit_sleep_mode_multi(&ctx); + usleep_range(5000, 6000); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x13); + s6e3ha8_test_key_off_lvl2(&ctx); + usleep_range(10000, 11000); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x13); + s6e3ha8_test_key_off_lvl2(&ctx); + + /* OMOK setting 1 (Initial setting) - Scaler Latch Setting Guide */ + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x07); + /* latch setting 1 : Scaler on/off & address setting & PPS setting -> Image update latch */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x3c, 0x10); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x0b); + /* latch setting 2 : Ratio change mode -> Image update latch */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x30); + /* OMOK setting 2 - Seamless setting guide : WQHD */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00, 0x00, 0x05, 0x9f); /* CASET */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00, 0x00, 0x0b, 0x8f); /* PASET */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xba, 0x01); /* scaler setup : scaler off */ + s6e3ha8_test_key_off_lvl2(&ctx); + + mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00); /* TE Vsync ON */ + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xed, 0x4c); /* ERR_FG */ + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_on_lvl3(&ctx); + /* FFC Setting 897.6Mbps */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x0d, 0x10, 0xb4, 0x3e, 0x01); + s6e3ha8_test_key_off_lvl3(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, + 0x00, 0xb0, 0x81, 0x09, 0x00, 0x00, 0x00, + 0x11, 0x03); /* TSP HSYNC Setting */ + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x03); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf6, 0x43); + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + /* Brightness condition set */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, + 0x07, 0x00, 0x00, 0x00, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x0c); /* AID Set : 0% */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, + 0x19, 0xdc, 0x16, 0x01, 0x34, 0x67, 0x9a, + 0xcd, 0x01, 0x22, 0x33, 0x44, 0x00, 0x00, + 0x05, 0x55, 0xcc, 0x0c, 0x01, 0x11, 0x11, + 0x10); /* MPS/ELVSS Setting */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf4, 0xeb, 0x28); /* VINT */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf7, 0x03); /* Gamma, LTPS(AID) update */ + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_off_lvl1(&ctx); + + return ctx.accum_err; +} + +static int s6e3ha8_enable(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + + s6e3ha8_test_key_on_lvl1(&ctx); + mipi_dsi_dcs_set_display_on_multi(&ctx); + s6e3ha8_test_key_off_lvl1(&ctx); + + return ctx.accum_err; +} + +static int s6e3ha8_disable(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + + s6e3ha8_test_key_on_lvl1(&ctx); + mipi_dsi_dcs_set_display_off_multi(&ctx); + s6e3ha8_test_key_off_lvl1(&ctx); + mipi_dsi_msleep(&ctx, 20); + + s6e3ha8_test_key_on_lvl2(&ctx); + s6e3ha8_afc_off(&ctx); + s6e3ha8_test_key_off_lvl2(&ctx); + + mipi_dsi_msleep(&ctx, 160); + + return ctx.accum_err; +} + +static int s6e3ha8_amb577px01_wqhd_prepare(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + struct drm_dsc_picture_parameter_set pps; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies); + if (ret < 0) + return ret; + mipi_dsi_msleep(&ctx, 120); + s6e3ha8_amb577px01_wqhd_reset(priv); + + ret = s6e3ha8_amb577px01_wqhd_on(priv); + if (ret < 0) { + gpiod_set_value_cansleep(priv->reset_gpio, 1); + goto err; + } + + drm_dsc_pps_payload_pack(&pps, &priv->dsc); + + s6e3ha8_test_key_on_lvl1(&ctx); + mipi_dsi_picture_parameter_set_multi(&ctx, &pps); + s6e3ha8_test_key_off_lvl1(&ctx); + + mipi_dsi_msleep(&ctx, 28); + + return ctx.accum_err; +err: + regulator_bulk_disable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies); + return ret; +} + +static int s6e3ha8_amb577px01_wqhd_unprepare(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + + return regulator_bulk_disable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies); +} + +static const struct drm_display_mode s6e3ha8_amb577px01_wqhd_mode = { + .clock = (1440 + 116 + 44 + 120) * (2960 + 120 + 80 + 124) * 60 / 1000, + .hdisplay = 1440, + .hsync_start = 1440 + 116, + .hsync_end = 1440 + 116 + 44, + .htotal = 1440 + 116 + 44 + 120, + .vdisplay = 2960, + .vsync_start = 2960 + 120, + .vsync_end = 2960 + 120 + 80, + .vtotal = 2960 + 120 + 80 + 124, + .width_mm = 64, + .height_mm = 132, +}; + +static int s6e3ha8_amb577px01_wqhd_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &s6e3ha8_amb577px01_wqhd_mode); +} + +static const struct drm_panel_funcs s6e3ha8_amb577px01_wqhd_panel_funcs = { + .prepare = s6e3ha8_amb577px01_wqhd_prepare, + .unprepare = s6e3ha8_amb577px01_wqhd_unprepare, + .get_modes = s6e3ha8_amb577px01_wqhd_get_modes, + .enable = s6e3ha8_enable, + .disable = s6e3ha8_disable, +}; + +static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e3ha8 *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies), + s6e3ha8_supplies, + &priv->supplies); + if (ret < 0) { + dev_err(dev, "failed to get regulators: %d\n", ret); + return ret; + } + + priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "Failed to get reset-gpios\n"); + + priv->dsi = dsi; + mipi_dsi_set_drvdata(dsi, priv); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | + MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET; + + drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + priv->panel.prepare_prev_first = true; + + drm_panel_add(&priv->panel); + + /* This panel only supports DSC; unconditionally enable it */ + dsi->dsc = &priv->dsc; + + priv->dsc.dsc_version_major = 1; + priv->dsc.dsc_version_minor = 1; + + priv->dsc.slice_height = 40; + priv->dsc.slice_width = 720; + WARN_ON(1440 % priv->dsc.slice_width); + priv->dsc.slice_count = 1440 / priv->dsc.slice_width; + priv->dsc.bits_per_component = 8; + priv->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */ + priv->dsc.block_pred_enable = true; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&priv->panel); + return ret; + } + + return 0; +} + +static void s6e3ha8_amb577px01_wqhd_remove(struct mipi_dsi_device *dsi) +{ + struct s6e3ha8 *priv = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&priv->panel); +} + +static const struct of_device_id s6e3ha8_amb577px01_wqhd_of_match[] = { + { .compatible = "samsung,s6e3ha8" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e3ha8_amb577px01_wqhd_of_match); + +static struct mipi_dsi_driver s6e3ha8_amb577px01_wqhd_driver = { + .probe = s6e3ha8_amb577px01_wqhd_probe, + .remove = s6e3ha8_amb577px01_wqhd_remove, + .driver = { + .name = "panel-s6e3ha8", + .of_match_table = s6e3ha8_amb577px01_wqhd_of_match, + }, +}; +module_mipi_dsi_driver(s6e3ha8_amb577px01_wqhd_driver); + +MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>"); +MODULE_DESCRIPTION("DRM driver for S6E3HA8 panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c new file mode 100644 index 000000000000..e92e95158d1f --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Samsung AMS427AP24 panel with S6E88A0 controller + * Copyright (c) 2024 Jakob Hauser <jahau@rocketmail.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#define NUM_STEPS_CANDELA 54 +#define NUM_STEPS_AID 39 +#define NUM_STEPS_ELVSS 17 + +/* length of the payload data, thereof fixed and variable */ +#define FIX_LEN_AID 4 +#define FIX_LEN_ELVSS 2 +#define FIX_LEN_GAMMA 1 +#define VAR_LEN_AID 2 +#define VAR_LEN_ELVSS 1 +#define VAR_LEN_GAMMA 33 +#define LEN_AID (FIX_LEN_AID + VAR_LEN_AID) +#define LEN_ELVSS (FIX_LEN_ELVSS + VAR_LEN_ELVSS) +#define LEN_GAMMA (FIX_LEN_GAMMA + VAR_LEN_GAMMA) + +struct s6e88a0_ams427ap24 { + struct drm_panel panel; + struct backlight_device *bl_dev; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data *supplies; + struct gpio_desc *reset_gpio; + bool flip_horizontal; +}; + +static const struct regulator_bulk_data s6e88a0_ams427ap24_supplies[] = { + { .supply = "vdd3" }, + { .supply = "vci" }, +}; + +static inline +struct s6e88a0_ams427ap24 *to_s6e88a0_ams427ap24(struct drm_panel *panel) +{ + return container_of(panel, struct s6e88a0_ams427ap24, panel); +} + +enum candela { + CANDELA_10CD, /* 0 */ + CANDELA_11CD, + CANDELA_12CD, + CANDELA_13CD, + CANDELA_14CD, + CANDELA_15CD, + CANDELA_16CD, + CANDELA_17CD, + CANDELA_19CD, + CANDELA_20CD, + CANDELA_21CD, + CANDELA_22CD, + CANDELA_24CD, + CANDELA_25CD, + CANDELA_27CD, + CANDELA_29CD, + CANDELA_30CD, + CANDELA_32CD, + CANDELA_34CD, + CANDELA_37CD, + CANDELA_39CD, + CANDELA_41CD, + CANDELA_44CD, + CANDELA_47CD, + CANDELA_50CD, + CANDELA_53CD, + CANDELA_56CD, + CANDELA_60CD, + CANDELA_64CD, + CANDELA_68CD, + CANDELA_72CD, + CANDELA_77CD, + CANDELA_82CD, + CANDELA_87CD, + CANDELA_93CD, + CANDELA_98CD, + CANDELA_105CD, + CANDELA_111CD, + CANDELA_119CD, + CANDELA_126CD, + CANDELA_134CD, + CANDELA_143CD, + CANDELA_152CD, + CANDELA_162CD, + CANDELA_172CD, + CANDELA_183CD, + CANDELA_195CD, + CANDELA_207CD, + CANDELA_220CD, + CANDELA_234CD, + CANDELA_249CD, + CANDELA_265CD, + CANDELA_282CD, + CANDELA_300CD, /* 53 */ +}; + +static const int s6e88a0_ams427ap24_br_to_cd[NUM_STEPS_CANDELA] = { + /* columns: brightness from, brightness till, candela */ + /* 0 */ 10, /* 10CD */ + /* 11 */ 11, /* 11CD */ + /* 12 */ 12, /* 12CD */ + /* 13 */ 13, /* 13CD */ + /* 14 */ 14, /* 14CD */ + /* 15 */ 15, /* 15CD */ + /* 16 */ 16, /* 16CD */ + /* 17 */ 17, /* 17CD */ + /* 18 */ 18, /* 19CD */ + /* 19 */ 19, /* 20CD */ + /* 20 */ 20, /* 21CD */ + /* 21 */ 21, /* 22CD */ + /* 22 */ 22, /* 24CD */ + /* 23 */ 23, /* 25CD */ + /* 24 */ 24, /* 27CD */ + /* 25 */ 25, /* 29CD */ + /* 26 */ 26, /* 30CD */ + /* 27 */ 27, /* 32CD */ + /* 28 */ 28, /* 34CD */ + /* 29 */ 29, /* 37CD */ + /* 30 */ 30, /* 39CD */ + /* 31 */ 32, /* 41CD */ + /* 33 */ 34, /* 44CD */ + /* 35 */ 36, /* 47CD */ + /* 37 */ 38, /* 50CD */ + /* 39 */ 40, /* 53CD */ + /* 41 */ 43, /* 56CD */ + /* 44 */ 46, /* 60CD */ + /* 47 */ 49, /* 64CD */ + /* 50 */ 52, /* 68CD */ + /* 53 */ 56, /* 72CD */ + /* 57 */ 59, /* 77CD */ + /* 60 */ 63, /* 82CD */ + /* 64 */ 67, /* 87CD */ + /* 68 */ 71, /* 93CD */ + /* 72 */ 76, /* 98CD */ + /* 77 */ 80, /* 105CD */ + /* 81 */ 86, /* 111CD */ + /* 87 */ 91, /* 119CD */ + /* 92 */ 97, /* 126CD */ + /* 98 */ 104, /* 134CD */ + /* 105 */ 110, /* 143CD */ + /* 111 */ 118, /* 152CD */ + /* 119 */ 125, /* 162CD */ + /* 126 */ 133, /* 172CD */ + /* 134 */ 142, /* 183CD */ + /* 143 */ 150, /* 195CD */ + /* 151 */ 160, /* 207CD */ + /* 161 */ 170, /* 220CD */ + /* 171 */ 181, /* 234CD */ + /* 182 */ 205, /* 249CD */ + /* 206 */ 234, /* 265CD */ + /* 235 */ 254, /* 282CD */ + /* 255 */ 255, /* 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_aid[NUM_STEPS_AID][VAR_LEN_AID] = { + { 0x03, 0x77 }, /* AOR 90.9%, 10CD */ + { 0x03, 0x73 }, /* AOR 90.5%, 11CD */ + { 0x03, 0x69 }, /* AOR 89.4%, 12CD */ + { 0x03, 0x65 }, /* AOR 89.0%, 13CD */ + { 0x03, 0x61 }, /* AOR 88.6%, 14CD */ + { 0x03, 0x55 }, /* AOR 87.4%, 15CD */ + { 0x03, 0x50 }, /* AOR 86.9%, 16CD */ + { 0x03, 0x45 }, /* AOR 85.8%, 17CD */ + { 0x03, 0x35 }, /* AOR 84.1%, 19CD */ + { 0x03, 0x27 }, /* AOR 82.7%, 20CD */ + { 0x03, 0x23 }, /* AOR 82.3%, 21CD */ + { 0x03, 0x17 }, /* AOR 81.0%, 22CD */ + { 0x03, 0x11 }, /* AOR 80.4%, 24CD */ + { 0x03, 0x04 }, /* AOR 79.1%, 25CD */ + { 0x02, 0xf4 }, /* AOR 77.5%, 27CD */ + { 0x02, 0xe3 }, /* AOR 75.7%, 29CD */ + { 0x02, 0xd7 }, /* AOR 74.5%, 30CD */ + { 0x02, 0xc6 }, /* AOR 72.7%, 32CD */ + { 0x02, 0xb7 }, /* AOR 71.2%, 34CD */ + { 0x02, 0xa1 }, /* AOR 69.0%, 37CD */ + { 0x02, 0x91 }, /* AOR 67.3%, 39CD */ + { 0x02, 0x78 }, /* AOR 64.8%, 41CD */ + { 0x02, 0x62 }, /* AOR 62.5%, 44CD */ + { 0x02, 0x45 }, /* AOR 59.5%, 47CD */ + { 0x02, 0x30 }, /* AOR 57.4%, 50CD */ + { 0x02, 0x13 }, /* AOR 54.4%, 53CD */ + { 0x01, 0xf5 }, /* AOR 51.3%, 56CD */ + { 0x01, 0xd3 }, /* AOR 47.8%, 60CD */ + { 0x01, 0xb1 }, /* AOR 44.4%, 64CD */ + { 0x01, 0x87 }, /* AOR 40.1%, 68CD */ + { 0x01, 0x63 }, /* AOR 36.6%, 72CD */ + { 0x01, 0x35 }, /* AOR 31.7%, 77CD */ + { 0x01, 0x05 }, /* AOR 26.9%, 82CD */ + { 0x00, 0xd5 }, /* AOR 21.8%, 87CD */ + { 0x00, 0xa1 }, /* AOR 16.5%, 93CD */ + { 0x00, 0x6f }, /* AOR 11.4%, 98CD */ + { 0x00, 0x31 }, /* AOR 5.0%, 105CD */ + { 0x01, 0x86 }, /* AOR 40.0%, 111CD ~ 172CD */ + { 0x00, 0x08 }, /* AOR 0.6%, 183CD ~ 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_elvss[NUM_STEPS_ELVSS][VAR_LEN_ELVSS] = { + { 0x14 }, /* 10CD ~ 111CD */ + { 0x13 }, /* 119CD */ + { 0x12 }, /* 126CD */ + { 0x12 }, /* 134CD */ + { 0x11 }, /* 143CD */ + { 0x10 }, /* 152CD */ + { 0x0f }, /* 162CD */ + { 0x0e }, /* 172CD */ + { 0x11 }, /* 183CD */ + { 0x11 }, /* 195CD */ + { 0x10 }, /* 207CD */ + { 0x0f }, /* 220CD */ + { 0x0f }, /* 234CD */ + { 0x0e }, /* 249CD */ + { 0x0d }, /* 265CD */ + { 0x0c }, /* 282CD */ + { 0x0b }, /* 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_gamma[NUM_STEPS_CANDELA][VAR_LEN_GAMMA] = { + /* 10CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b, + 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b, + 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 11CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b, + 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b, + 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 12CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a, + 0x72, 0x8b, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 13CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a, + 0x72, 0x8b, 0x61, 0x69, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 14CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x87, 0x86, 0x8a, 0x82, 0x82, 0x87, 0x79, + 0x71, 0x89, 0x63, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 15CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8c, + 0x8c, 0x86, 0x87, 0x88, 0x85, 0x85, 0x8a, 0x83, 0x83, 0x88, 0x78, + 0x72, 0x89, 0x64, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 16CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x88, 0x86, 0x86, 0x8a, 0x84, 0x84, 0x88, 0x78, + 0x72, 0x89, 0x5d, 0x67, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 17CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x8a, 0x84, 0x83, 0x87, 0x78, + 0x73, 0x89, 0x64, 0x6e, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 19CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x89, 0x84, 0x84, 0x87, 0x77, + 0x72, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 20CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79, + 0x73, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 21CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79, + 0x74, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 22CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c, + 0x75, 0x87, 0x65, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 24CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c, + 0x76, 0x86, 0x66, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 25CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f, + 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 27CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f, + 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 29CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 30CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 32CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 34CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x83, 0x84, 0x84, 0x7f, + 0x79, 0x86, 0x6c, 0x76, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 37CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x88, 0x87, 0x86, 0x87, 0x83, 0x84, 0x84, 0x7f, + 0x79, 0x86, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 39CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x83, 0x85, 0x85, 0x80, + 0x79, 0x85, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 41CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x84, 0x6e, 0x79, 0x93, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 44CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x84, 0x6e, 0x79, 0x92, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 47CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x83, 0x6f, 0x79, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 50CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x82, 0x84, 0x83, 0x7f, + 0x79, 0x83, 0x6f, 0x79, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 53CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x85, 0x7f, + 0x79, 0x83, 0x70, 0x79, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 56CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7f, + 0x79, 0x82, 0x70, 0x7a, 0x8e, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 60CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7e, + 0x79, 0x82, 0x71, 0x7a, 0x8d, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 64CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x83, 0x82, 0x80, + 0x7a, 0x84, 0x71, 0x7a, 0x8c, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 68CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x84, 0x82, 0x81, + 0x7b, 0x83, 0x72, 0x7b, 0x8b, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 72CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x85, 0x85, 0x86, 0x82, 0x84, 0x82, 0x81, + 0x7b, 0x83, 0x72, 0x7c, 0x8a, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 77CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81, + 0x7c, 0x82, 0x72, 0x7c, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 82CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81, + 0x7c, 0x82, 0x73, 0x7c, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 87CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x84, 0x84, 0x86, 0x80, 0x84, 0x81, 0x80, + 0x7a, 0x82, 0x76, 0x7f, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 93CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x87, 0x85, 0x84, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80, + 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 98CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x87, 0x85, 0x85, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80, + 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 105CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x89, 0x88, 0x88, 0x8b, 0x8a, + 0x8a, 0x84, 0x87, 0x85, 0x85, 0x85, 0x85, 0x80, 0x84, 0x80, 0x7f, + 0x79, 0x81, 0x71, 0x7d, 0x87, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 111CD */ + { 0x00, 0xdf, 0x00, 0xde, 0x00, 0xde, 0x85, 0x85, 0x84, 0x87, 0x86, + 0x87, 0x85, 0x86, 0x85, 0x83, 0x83, 0x83, 0x81, 0x82, 0x82, 0x80, + 0x7d, 0x82, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 119CD */ + { 0x00, 0xe3, 0x00, 0xe1, 0x00, 0xe2, 0x85, 0x85, 0x84, 0x86, 0x85, + 0x85, 0x84, 0x85, 0x84, 0x83, 0x83, 0x83, 0x82, 0x82, 0x82, 0x7e, + 0x7b, 0x81, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 126CD */ + { 0x00, 0xe6, 0x00, 0xe5, 0x00, 0xe5, 0x85, 0x84, 0x84, 0x85, 0x85, + 0x85, 0x84, 0x84, 0x84, 0x82, 0x83, 0x83, 0x80, 0x81, 0x81, 0x80, + 0x7f, 0x83, 0x73, 0x7c, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 134CD */ + { 0x00, 0xe9, 0x00, 0xe8, 0x00, 0xe8, 0x84, 0x84, 0x83, 0x85, 0x85, + 0x85, 0x84, 0x84, 0x83, 0x81, 0x82, 0x82, 0x81, 0x81, 0x81, 0x7f, + 0x7d, 0x81, 0x73, 0x7c, 0x83, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 143CD */ + { 0x00, 0xed, 0x00, 0xec, 0x00, 0xec, 0x84, 0x83, 0x83, 0x84, 0x84, + 0x84, 0x84, 0x84, 0x83, 0x82, 0x83, 0x83, 0x81, 0x80, 0x81, 0x7f, + 0x7e, 0x81, 0x70, 0x79, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 152CD */ + { 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x83, 0x83, 0x83, 0x83, 0x83, + 0x83, 0x84, 0x84, 0x83, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80, + 0x80, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 162CD */ + { 0x00, 0xf4, 0x00, 0xf3, 0x00, 0xf4, 0x83, 0x83, 0x83, 0x83, 0x83, + 0x83, 0x82, 0x81, 0x81, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80, + 0x7f, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 172CD */ + { 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8, 0x82, 0x82, 0x82, 0x82, 0x82, + 0x82, 0x82, 0x81, 0x81, 0x80, 0x81, 0x80, 0x80, 0x80, 0x81, 0x81, + 0x80, 0x83, 0x6d, 0x76, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 183CD */ + { 0x00, 0xe0, 0x00, 0xdf, 0x00, 0xdf, 0x84, 0x84, 0x83, 0x86, 0x86, + 0x86, 0x83, 0x84, 0x83, 0x82, 0x82, 0x82, 0x81, 0x83, 0x81, 0x81, + 0x7e, 0x81, 0x80, 0x82, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 195CD */ + { 0x00, 0xe4, 0x00, 0xe3, 0x00, 0xe3, 0x84, 0x83, 0x83, 0x85, 0x85, + 0x85, 0x83, 0x84, 0x83, 0x81, 0x82, 0x82, 0x82, 0x83, 0x81, 0x81, + 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 207CD */ + { 0x00, 0xe7, 0x00, 0xe6, 0x00, 0xe6, 0x83, 0x82, 0x82, 0x85, 0x85, + 0x85, 0x82, 0x83, 0x83, 0x82, 0x82, 0x82, 0x80, 0x81, 0x80, 0x81, + 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 220CD */ + { 0x00, 0xeb, 0x00, 0xea, 0x00, 0xea, 0x83, 0x83, 0x82, 0x84, 0x84, + 0x84, 0x82, 0x83, 0x82, 0x81, 0x81, 0x82, 0x81, 0x82, 0x81, 0x80, + 0x7e, 0x80, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 234CD */ + { 0x00, 0xef, 0x00, 0xee, 0x00, 0xee, 0x83, 0x82, 0x82, 0x83, 0x83, + 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x81, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 249CD */ + { 0x00, 0xf3, 0x00, 0xf2, 0x00, 0xf2, 0x82, 0x81, 0x81, 0x83, 0x83, + 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x81, 0x80, 0x7f, + 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 265CD */ + { 0x00, 0xf7, 0x00, 0xf7, 0x00, 0xf7, 0x81, 0x81, 0x80, 0x82, 0x82, + 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x80, 0x7f, + 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 282CD */ + { 0x00, 0xfb, 0x00, 0xfb, 0x00, 0xfb, 0x80, 0x80, 0x80, 0x81, 0x81, + 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x78, 0x79, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 300CD */ + { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00 }, +}; + +static int s6e88a0_ams427ap24_set_brightness(struct backlight_device *bd) +{ + struct s6e88a0_ams427ap24 *ctx = bl_get_data(bd); + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + struct device *dev = &dsi->dev; + int brightness = bd->props.brightness; + int candela_enum; + u8 b2[LEN_AID] = { 0xb2, 0x40, 0x08, 0x20, 0x00, 0x00 }; + u8 b6[LEN_ELVSS] = { 0xb6, 0x28, 0x00 }; + u8 ca[LEN_GAMMA]; + + /* get candela enum from brightness */ + for (candela_enum = 0; candela_enum < NUM_STEPS_CANDELA; candela_enum++) + if (brightness <= s6e88a0_ams427ap24_br_to_cd[candela_enum]) + break; + + /* get aid */ + switch (candela_enum) { + case CANDELA_10CD ... CANDELA_105CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[candela_enum], + VAR_LEN_AID); + break; + case CANDELA_111CD ... CANDELA_172CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[CANDELA_111CD], + VAR_LEN_AID); + break; + case CANDELA_183CD ... CANDELA_300CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[CANDELA_111CD + 1], + VAR_LEN_AID); + break; + default: + dev_err(dev, "Failed to get aid data\n"); + return -EINVAL; + } + + /* get elvss */ + if (candela_enum <= CANDELA_111CD) { + memcpy(&b6[FIX_LEN_ELVSS], + s6e88a0_ams427ap24_elvss[0], + VAR_LEN_ELVSS); + } else { + memcpy(&b6[FIX_LEN_ELVSS], + s6e88a0_ams427ap24_elvss[candela_enum - CANDELA_111CD], + VAR_LEN_ELVSS); + } + + /* get gamma */ + ca[0] = 0xca; + memcpy(&ca[FIX_LEN_GAMMA], + s6e88a0_ams427ap24_gamma[candela_enum], + VAR_LEN_GAMMA); + + /* write data */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b2, ARRAY_SIZE(b2)); // set aid + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x00); // acl off + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b6, ARRAY_SIZE(b6)); // set elvss + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, ca, ARRAY_SIZE(ca)); // set gamma + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x03); // gamma update + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off + + return dsi_ctx.accum_err; +} + +static void s6e88a0_ams427ap24_reset(struct s6e88a0_ams427ap24 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(18000, 19000); +} + +static int s6e88a0_ams427ap24_on(struct s6e88a0_ams427ap24 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x5a, 0x5a); // level 2 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x11); // src latch set global 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x11); // src latch set 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x13); // src latch set global 2 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x18); // src latch set 2 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x02); // avdd set 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x30); // avdd set 2 + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x5a, 0x5a); // level 3 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x4c); // pixel clock divider pol. + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x03, 0x0d); // unknown + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0xa5, 0xa5); // level 3 key off + + if (ctx->flip_horizontal) + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x0e); // flip display + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0xa5, 0xa5); // level 2 key off + + ret = s6e88a0_ams427ap24_set_brightness(ctx->bl_dev); + if (ret < 0) { + dev_err(dev, "Failed to set brightness: %d\n", ret); + return ret; + } + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static int s6e88a0_ams427ap24_off(struct s6e88a0_ams427ap24 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + + return dsi_ctx.accum_err; +} + +static int s6e88a0_ams427ap24_prepare(struct drm_panel *panel) +{ + struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + s6e88a0_ams427ap24_reset(ctx); + + ret = s6e88a0_ams427ap24_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int s6e88a0_ams427ap24_unprepare(struct drm_panel *panel) +{ + struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = s6e88a0_ams427ap24_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode s6e88a0_ams427ap24_mode = { + .clock = (540 + 94 + 4 + 18) * (960 + 12 + 1 + 3) * 60 / 1000, + .hdisplay = 540, + .hsync_start = 540 + 94, + .hsync_end = 540 + 94 + 4, + .htotal = 540 + 94 + 4 + 18, + .vdisplay = 960, + .vsync_start = 960 + 12, + .vsync_end = 960 + 12 + 1, + .vtotal = 960 + 12 + 1 + 3, + .width_mm = 55, + .height_mm = 95, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int s6e88a0_ams427ap24_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, + &s6e88a0_ams427ap24_mode); +} + +static const struct drm_panel_funcs s6e88a0_ams427ap24_panel_funcs = { + .prepare = s6e88a0_ams427ap24_prepare, + .unprepare = s6e88a0_ams427ap24_unprepare, + .get_modes = s6e88a0_ams427ap24_get_modes, +}; + +static const struct backlight_ops s6e88a0_ams427ap24_bl_ops = { + .update_status = s6e88a0_ams427ap24_set_brightness, +}; + +static int s6e88a0_ams427ap24_register_backlight(struct s6e88a0_ams427ap24 *ctx) +{ + struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 180, + .max_brightness = 255, + }; + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret = 0; + + ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), dev, ctx, + &s6e88a0_ams427ap24_bl_ops, + &props); + if (IS_ERR(ctx->bl_dev)) { + ret = PTR_ERR(ctx->bl_dev); + dev_err(dev, "error registering backlight device (%d)\n", ret); + } + + return ret; +} + +static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e88a0_ams427ap24 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + s6e88a0_ams427ap24_supplies, + &ctx->supplies); + if (ret < 0) + return ret; + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP; + + drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal"); + + ret = s6e88a0_ams427ap24_register_backlight(ctx); + if (ret < 0) + return ret; + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void s6e88a0_ams427ap24_remove(struct mipi_dsi_device *dsi) +{ + struct s6e88a0_ams427ap24 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id s6e88a0_ams427ap24_of_match[] = { + { .compatible = "samsung,s6e88a0-ams427ap24" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, s6e88a0_ams427ap24_of_match); + +static struct mipi_dsi_driver s6e88a0_ams427ap24_driver = { + .probe = s6e88a0_ams427ap24_probe, + .remove = s6e88a0_ams427ap24_remove, + .driver = { + .name = "panel-s6e88a0-ams427ap24", + .of_match_table = s6e88a0_ams427ap24_of_match, + }, +}; +module_mipi_dsi_driver(s6e88a0_ams427ap24_driver); + +MODULE_AUTHOR("Jakob Hauser <jahau@rocketmail.com>"); +MODULE_DESCRIPTION("Samsung AMS427AP24 panel with S6E88A0 controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 86735430462f..06381c628209 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -4565,6 +4565,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode mchp_ac69t88a_mode = { + .clock = 25000, + .hdisplay = 800, + .hsync_start = 800 + 88, + .hsync_end = 800 + 88 + 5, + .htotal = 800 + 88 + 5 + 40, + .vdisplay = 480, + .vsync_start = 480 + 23, + .vsync_end = 480 + 23 + 5, + .vtotal = 480 + 23 + 5 + 1, +}; + +static const struct panel_desc mchp_ac69t88a = { + .modes = &mchp_ac69t88a_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode arm_rtsm_mode[] = { { .clock = 65000, @@ -5049,6 +5074,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "yes-optoelectronics,ytc700tlag-05-201c", .data = &yes_optoelectronics_ytc700tlag_05_201c, }, { + .compatible = "microchip,ac69t88a", + .data = &mchp_ac69t88a, + }, { /* Must be the last entry */ .compatible = "panel-dpi", .data = &panel_dpi, diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index 217f03569494..d437f5c84f5f 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -562,8 +562,7 @@ static int acx565akm_detect(struct acx565akm_panel *lcd) lcd->enabled ? "enabled" : "disabled ", status); acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3); - dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n", - lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]); + dev_dbg(&lcd->spi->dev, "MIPI display ID: %3phN\n", lcd->display_id); switch (lcd->display_id[0]) { case 0x10: diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 2d30da38c2c3..3385fd3ef41a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -38,7 +38,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, return PTR_ERR(opp); dev_pm_opp_put(opp); - err = dev_pm_opp_set_rate(dev, *freq); + err = dev_pm_opp_set_rate(dev, *freq); if (!err) ptdev->pfdevfreq.current_frequency = *freq; @@ -182,6 +182,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) * if any and will avoid a switch off by regulator_late_cleanup() */ ret = dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); if (ret) { DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); return ret; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 671eed4ad890..04d615df5259 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -3,6 +3,10 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/module.h> #include <linux/of.h> #include <linux/pagemap.h> @@ -21,13 +25,33 @@ #include "panfrost_gpu.h" #include "panfrost_perfcnt.h" +#define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT) + static bool unstable_ioctls; module_param_unsafe(unstable_ioctls, bool, 0600); +static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, + u64 *arg) +{ + int ret; + + ret = pm_runtime_resume_and_get(pfdev->dev); + if (ret) + return ret; + + panfrost_cycle_counter_get(pfdev); + *arg = panfrost_timestamp_read(pfdev); + panfrost_cycle_counter_put(pfdev); + + pm_runtime_put(pfdev->dev); + return 0; +} + static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panfrost_get_param *param = data; struct panfrost_device *pfdev = ddev->dev_private; + int ret; if (param->pad != 0) return -EINVAL; @@ -69,6 +93,21 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); + + case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP: + ret = panfrost_ioctl_query_timestamp(pfdev, ¶m->value); + if (ret) + return ret; + break; + + case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY: +#ifdef CONFIG_ARM_ARCH_TIMER + param->value = arch_timer_get_cntfrq(); +#else + param->value = 0; +#endif + break; + default: return -EINVAL; } @@ -245,7 +284,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, if (!args->jc) return -EINVAL; - if (args->requirements && args->requirements != PANFROST_JD_REQ_FS) + if (args->requirements & ~JOB_REQUIREMENTS) return -EINVAL; if (args->out_sync > 0) { @@ -584,6 +623,8 @@ static const struct file_operations panfrost_drm_driver_fops = { * - 1.0 - initial interface * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO * - 1.2 - adds AFBC_FEATURES query + * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT + * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries */ static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -597,7 +638,7 @@ static const struct drm_driver panfrost_drm_driver = { .desc = "panfrost DRM", .date = "20180908", .major = 1, - .minor = 2, + .minor = 3, .gem_create_object = panfrost_gem_create_object, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index fd8e44992184..f5abde3866fb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -177,7 +177,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) struct panfrost_model { const char *name; u32 id; - u32 id_mask; u64 features; u64 issues; struct { @@ -380,6 +379,18 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) return ((u64)hi << 32) | lo; } +unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev) +{ + u32 hi, lo; + + do { + hi = gpu_read(pfdev, GPU_TIMESTAMP_HI); + lo = gpu_read(pfdev, GPU_TIMESTAMP_LO); + } while (hi != gpu_read(pfdev, GPU_TIMESTAMP_HI)); + + return ((u64)hi << 32) | lo; +} + static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) { u64 core_mask; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index d841b86504ea..b4fef11211d5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -20,6 +20,7 @@ void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev); void panfrost_cycle_counter_get(struct panfrost_device *pfdev); void panfrost_cycle_counter_put(struct panfrost_device *pfdev); unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev); +unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev); void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index df49d37d0e7e..9b8e82fb8bc4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -159,16 +159,17 @@ panfrost_dequeue_job(struct panfrost_device *pfdev, int slot) struct panfrost_job *job = pfdev->jobs[slot][0]; WARN_ON(!job); - if (job->is_profiled) { - if (job->engine_usage) { - job->engine_usage->elapsed_ns[slot] += - ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); - job->engine_usage->cycles[slot] += - panfrost_cycle_counter_read(pfdev) - job->start_cycles; - } - panfrost_cycle_counter_put(job->pfdev); + + if (job->is_profiled && job->engine_usage) { + job->engine_usage->elapsed_ns[slot] += + ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); + job->engine_usage->cycles[slot] += + panfrost_cycle_counter_read(pfdev) - job->start_cycles; } + if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || job->is_profiled) + panfrost_cycle_counter_put(pfdev); + pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; pfdev->jobs[slot][1] = NULL; @@ -243,9 +244,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) subslot = panfrost_enqueue_job(pfdev, js, job); /* Don't queue the job if a reset is in progress */ if (!atomic_read(&pfdev->reset.pending)) { - if (pfdev->profile_mode) { + job->is_profiled = pfdev->profile_mode; + + if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || + job->is_profiled) panfrost_cycle_counter_get(pfdev); - job->is_profiled = true; + + if (job->is_profiled) { job->start_time = ktime_get(); job->start_cycles = panfrost_cycle_counter_read(pfdev); } @@ -693,7 +698,8 @@ panfrost_reset(struct panfrost_device *pfdev, spin_lock(&pfdev->js->job_lock); for (i = 0; i < NUM_JOB_SLOTS; i++) { for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { - if (pfdev->jobs[i][j]->is_profiled) + if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || + pfdev->jobs[i][j]->is_profiled) panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); pm_runtime_put_noidle(pfdev->dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); @@ -727,7 +733,7 @@ panfrost_reset(struct panfrost_device *pfdev, /* Restart the schedulers */ for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_start(&pfdev->js->queue[i].sched); + drm_sched_start(&pfdev->js->queue[i].sched, 0); /* Re-enable job interrupts now that everything has been restarted. */ job_write(pfdev, JOB_INT_MASK, diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index c25743b05c55..c7bba476ab3f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -78,6 +78,8 @@ #define GPU_CYCLE_COUNT_LO 0x90 #define GPU_CYCLE_COUNT_HI 0x94 +#define GPU_TIMESTAMP_LO 0x98 +#define GPU_TIMESTAMP_HI 0x9C #define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */ #define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */ diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c index c6d3c327cc24..ecc7a52bd688 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.c +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c @@ -62,14 +62,20 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq) static int panthor_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { + struct panthor_device *ptdev = dev_get_drvdata(dev); struct dev_pm_opp *opp; + int err; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); - return dev_pm_opp_set_rate(dev, *freq); + err = dev_pm_opp_set_rate(dev, *freq); + if (!err) + ptdev->current_frequency = *freq; + + return err; } static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq) @@ -130,6 +136,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) struct panthor_devfreq *pdevfreq; struct dev_pm_opp *opp; unsigned long cur_freq; + unsigned long freq = ULONG_MAX; int ret; pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL); @@ -156,12 +163,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev) cur_freq = clk_get_rate(ptdev->clks.core); - opp = devfreq_recommended_opp(dev, &cur_freq, 0); - if (IS_ERR(opp)) - return PTR_ERR(opp); - - panthor_devfreq_profile.initial_freq = cur_freq; - /* Regulator coupling only takes care of synchronizing/balancing voltage * updates, but the coupled regulator needs to be enabled manually. * @@ -192,16 +193,30 @@ int panthor_devfreq_init(struct panthor_device *ptdev) return ret; } + opp = devfreq_recommended_opp(dev, &cur_freq, 0); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + panthor_devfreq_profile.initial_freq = cur_freq; + ptdev->current_frequency = cur_freq; + /* * Set the recommend OPP this will enable and configure the regulator * if any and will avoid a switch off by regulator_late_cleanup() */ ret = dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); if (ret) { DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); return ret; } + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (IS_ERR(opp)) + return PTR_ERR(opp); + ptdev->fast_rate = freq; + dev_pm_opp_put(opp); /* diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index e388c0472ba7..0e68f5a70d20 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -67,6 +67,25 @@ struct panthor_irq { }; /** + * enum panthor_device_profiling_mode - Profiling state + */ +enum panthor_device_profiling_flags { + /** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */ + PANTHOR_DEVICE_PROFILING_DISABLED = 0, + + /** @PANTHOR_DEVICE_PROFILING_CYCLES: Sampling job cycles. */ + PANTHOR_DEVICE_PROFILING_CYCLES = BIT(0), + + /** @PANTHOR_DEVICE_PROFILING_TIMESTAMP: Sampling job timestamp. */ + PANTHOR_DEVICE_PROFILING_TIMESTAMP = BIT(1), + + /** @PANTHOR_DEVICE_PROFILING_ALL: Sampling everything. */ + PANTHOR_DEVICE_PROFILING_ALL = + PANTHOR_DEVICE_PROFILING_CYCLES | + PANTHOR_DEVICE_PROFILING_TIMESTAMP, +}; + +/** * struct panthor_device - Panthor device */ struct panthor_device { @@ -162,6 +181,20 @@ struct panthor_device { */ struct page *dummy_latest_flush; } pm; + + /** @profile_mask: User-set profiling flags for job accounting. */ + u32 profile_mask; + + /** @current_frequency: Device clock frequency at present. Set by DVFS*/ + unsigned long current_frequency; + + /** @fast_rate: Maximum device clock frequency. Set by DVFS */ + unsigned long fast_rate; +}; + +struct panthor_gpu_usage { + u64 time; + u64 cycles; }; /** @@ -176,6 +209,9 @@ struct panthor_file { /** @groups: Scheduling group pool attached to this file. */ struct panthor_group_pool *groups; + + /** @stats: cycle and timestamp measures for job execution. */ + struct panthor_gpu_usage stats; }; int panthor_device_init(struct panthor_device *ptdev); diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index c520f156e2d7..ac7e53f6e3f0 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -3,12 +3,17 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/list.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/pagemap.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/time64.h> #include <drm/drm_auth.h> #include <drm/drm_debugfs.h> @@ -165,6 +170,8 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride, _Generic(_obj_name, \ PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \ PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \ + PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \ + PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \ PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \ @@ -751,10 +758,63 @@ static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx, kvfree(ctx->jobs); } +static int panthor_query_timestamp_info(struct panthor_device *ptdev, + struct drm_panthor_timestamp_info *arg) +{ + int ret; + + ret = pm_runtime_resume_and_get(ptdev->base.dev); + if (ret) + return ret; + +#ifdef CONFIG_ARM_ARCH_TIMER + arg->timestamp_frequency = arch_timer_get_cntfrq(); +#else + arg->timestamp_frequency = 0; +#endif + arg->current_timestamp = panthor_gpu_read_timestamp(ptdev); + arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev); + + pm_runtime_put(ptdev->base.dev); + return 0; +} + +static int group_priority_permit(struct drm_file *file, + u8 priority) +{ + /* Ensure that priority is valid */ + if (priority > PANTHOR_GROUP_PRIORITY_REALTIME) + return -EINVAL; + + /* Medium priority and below are always allowed */ + if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM) + return 0; + + /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ + if (capable(CAP_SYS_NICE) || drm_is_current_master(file)) + return 0; + + return -EACCES; +} + +static void panthor_query_group_priorities_info(struct drm_file *file, + struct drm_panthor_group_priorities_info *arg) +{ + int prio; + + for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) { + if (!group_priority_permit(file, prio)) + arg->allowed_mask |= BIT(prio); + } +} + static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file) { struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); struct drm_panthor_dev_query *args = data; + struct drm_panthor_timestamp_info timestamp_info; + struct drm_panthor_group_priorities_info priorities_info; + int ret; if (!args->pointer) { switch (args->type) { @@ -766,6 +826,14 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d args->size = sizeof(ptdev->csif_info); return 0; + case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: + args->size = sizeof(timestamp_info); + return 0; + + case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: + args->size = sizeof(priorities_info); + return 0; + default: return -EINVAL; } @@ -778,6 +846,18 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d case DRM_PANTHOR_DEV_QUERY_CSIF_INFO: return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info); + case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: + ret = panthor_query_timestamp_info(ptdev, ×tamp_info); + + if (ret) + return ret; + + return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info); + + case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: + panthor_query_group_priorities_info(file, &priorities_info); + return PANTHOR_UOBJ_SET(args->pointer, args->size, priorities_info); + default: return -EINVAL; } @@ -997,24 +1077,6 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data, return panthor_group_destroy(pfile, args->group_handle); } -static int group_priority_permit(struct drm_file *file, - u8 priority) -{ - /* Ensure that priority is valid */ - if (priority > PANTHOR_GROUP_PRIORITY_HIGH) - return -EINVAL; - - /* Medium priority and below are always allowed */ - if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM) - return 0; - - /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ - if (capable(CAP_SYS_NICE) || drm_is_current_master(file)) - return 0; - - return -EACCES; -} - static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, struct drm_file *file) { @@ -1374,6 +1436,37 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } +static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev, + struct panthor_file *pfile, + struct drm_printer *p) +{ + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL) + panthor_fdinfo_gather_group_samples(pfile); + + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) { +#ifdef CONFIG_ARM_ARCH_TIMER + drm_printf(p, "drm-engine-panthor:\t%llu ns\n", + DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC), + arch_timer_get_cntfrq())); +#endif + } + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES) + drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles); + + drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate); + drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); +} + +static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) +{ + struct drm_device *dev = file->minor->dev; + struct panthor_device *ptdev = container_of(dev, struct panthor_device, base); + + panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p); + + drm_show_memory_stats(p, file); +} + static const struct file_operations panthor_drm_driver_fops = { .open = drm_open, .release = drm_release, @@ -1383,6 +1476,7 @@ static const struct file_operations panthor_drm_driver_fops = { .read = drm_read, .llseek = noop_llseek, .mmap = panthor_mmap, + .show_fdinfo = drm_show_fdinfo, .fop_flags = FOP_UNSIGNED_OFFSET, }; @@ -1396,12 +1490,16 @@ static void panthor_debugfs_init(struct drm_minor *minor) /* * PanCSF driver version: * - 1.0 - initial interface + * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query + * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query + * - adds PANTHOR_GROUP_PRIORITY_REALTIME priority */ static const struct drm_driver panthor_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, .open = panthor_open, .postclose = panthor_postclose, + .show_fdinfo = panthor_show_fdinfo, .ioctls = panthor_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls), .fops = &panthor_drm_driver_fops, @@ -1409,7 +1507,7 @@ static const struct drm_driver panthor_drm_driver = { .desc = "Panthor DRM driver", .date = "20230801", .major = 1, - .minor = 0, + .minor = 2, .gem_create_object = panthor_gem_create_object, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, @@ -1439,6 +1537,44 @@ static void panthor_remove(struct platform_device *pdev) panthor_device_unplug(ptdev); } +static ssize_t profiling_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct panthor_device *ptdev = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", ptdev->profile_mask); +} + +static ssize_t profiling_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct panthor_device *ptdev = dev_get_drvdata(dev); + u32 value; + int err; + + err = kstrtou32(buf, 0, &value); + if (err) + return err; + + if ((value & ~PANTHOR_DEVICE_PROFILING_ALL) != 0) + return -EINVAL; + + ptdev->profile_mask = value; + + return len; +} + +static DEVICE_ATTR_RW(profiling); + +static struct attribute *panthor_attrs[] = { + &dev_attr_profiling.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(panthor); + static const struct of_device_id dt_match[] = { { .compatible = "rockchip,rk3588-mali" }, { .compatible = "arm,mali-valhall-csf" }, @@ -1458,6 +1594,7 @@ static struct platform_driver panthor_driver = { .name = "panthor", .pm = pm_ptr(&panthor_pm_ops), .of_match_table = dt_match, + .dev_groups = panthor_groups, }, }; diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 4e2d3a02ea06..ecca5565ce41 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -78,6 +78,12 @@ enum panthor_fw_binary_entry_type { /** @CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA: Timeline metadata interface. */ CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA = 4, + + /** + * @CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA: Metadata about how + * the FW binary was built. + */ + CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA = 6 }; #define CSF_FW_BINARY_ENTRY_TYPE(ehdr) ((ehdr) & 0xff) @@ -132,6 +138,13 @@ struct panthor_fw_binary_section_entry_hdr { } data; }; +struct panthor_fw_build_info_hdr { + /** @meta_start: Offset of the build info data in the FW binary */ + u32 meta_start; + /** @meta_size: Size of the build info data in the FW binary */ + u32 meta_size; +}; + /** * struct panthor_fw_binary_iter - Firmware binary iterator * @@ -628,6 +641,46 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, return 0; } +static int panthor_fw_read_build_info(struct panthor_device *ptdev, + const struct firmware *fw, + struct panthor_fw_binary_iter *iter, + u32 ehdr) +{ + struct panthor_fw_build_info_hdr hdr; + char header[9]; + const char git_sha_header[sizeof(header)] = "git_sha: "; + int ret; + + ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr)); + if (ret) + return ret; + + if (hdr.meta_start > fw->size || + hdr.meta_start + hdr.meta_size > fw->size) { + drm_err(&ptdev->base, "Firmware build info corrupt\n"); + /* We don't need the build info, so continue */ + return 0; + } + + if (memcmp(git_sha_header, fw->data + hdr.meta_start, + sizeof(git_sha_header))) { + /* Not the expected header, this isn't metadata we understand */ + return 0; + } + + /* Check that the git SHA is NULL terminated as expected */ + if (fw->data[hdr.meta_start + hdr.meta_size - 1] != '\0') { + drm_warn(&ptdev->base, "Firmware's git sha is not NULL terminated\n"); + /* Don't treat as fatal */ + return 0; + } + + drm_info(&ptdev->base, "Firmware git sha: %s\n", + fw->data + hdr.meta_start + sizeof(git_sha_header)); + + return 0; +} + static void panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload) { @@ -672,6 +725,8 @@ static int panthor_fw_load_entry(struct panthor_device *ptdev, switch (CSF_FW_BINARY_ENTRY_TYPE(ehdr)) { case CSF_FW_BINARY_ENTRY_TYPE_IFACE: return panthor_fw_load_section_entry(ptdev, fw, &eiter, ehdr); + case CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA: + return panthor_fw_read_build_info(ptdev, fw, &eiter, ehdr); /* FIXME: handle those entry types? */ case CSF_FW_BINARY_ENTRY_TYPE_CONFIG: @@ -921,7 +976,7 @@ static int panthor_fw_init_ifaces(struct panthor_device *ptdev) return ret; } - drm_info(&ptdev->base, "CSF FW v%d.%d.%d, Features %#x Instrumentation features %#x", + drm_info(&ptdev->base, "CSF FW using interface v%d.%d.%d, Features %#x Instrumentation features %#x", CSF_IFACE_VERSION_MAJOR(glb_iface->control->version), CSF_IFACE_VERSION_MINOR(glb_iface->control->version), CSF_IFACE_VERSION_PATCH(glb_iface->control->version), diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index be97d56bc011..8244a4e6c2a2 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -150,6 +150,17 @@ panthor_gem_prime_export(struct drm_gem_object *obj, int flags) return drm_gem_prime_export(obj, flags); } +static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) +{ + struct panthor_gem_object *bo = to_panthor_bo(obj); + enum drm_gem_object_status res = 0; + + if (bo->base.base.import_attach || bo->base.pages) + res |= DRM_GEM_OBJECT_RESIDENT; + + return res; +} + static const struct drm_gem_object_funcs panthor_gem_funcs = { .free = panthor_gem_free_object, .print_info = drm_gem_shmem_object_print_info, @@ -159,6 +170,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = panthor_gem_mmap, + .status = panthor_gem_status, .export = panthor_gem_prime_export, .vm_ops = &drm_gem_shmem_vm_ops, }; diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 5251d8764e7d..2d3529a0b156 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -480,3 +480,50 @@ void panthor_gpu_resume(struct panthor_device *ptdev) panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); panthor_gpu_l2_power_on(ptdev); } + +/** + * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset. + * @ptdev: Device. + * @reg: The offset of the register to read. + * + * Return: The counter value. + */ +static u64 +panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg) +{ + u32 hi, lo; + + do { + hi = gpu_read(ptdev, reg + 0x4); + lo = gpu_read(ptdev, reg); + } while (hi != gpu_read(ptdev, reg + 0x4)); + + return ((u64)hi << 32) | lo; +} + +/** + * panthor_gpu_read_timestamp() - Read the timestamp register. + * @ptdev: Device. + * + * Return: The GPU timestamp value. + */ +u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev) +{ + return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO); +} + +/** + * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register. + * @ptdev: Device. + * + * Return: The GPU timestamp offset value. + */ +u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev) +{ + u32 hi, lo; + + hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI); + lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO); + + return ((u64)hi << 32) | lo; +} diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h index bba7555dd3c6..7f6133a66127 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.h +++ b/drivers/gpu/drm/panthor/panthor_gpu.h @@ -5,6 +5,8 @@ #ifndef __PANTHOR_GPU_H__ #define __PANTHOR_GPU_H__ +#include <linux/types.h> + struct panthor_device; int panthor_gpu_init(struct panthor_device *ptdev); @@ -48,5 +50,7 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev); int panthor_gpu_flush_caches(struct panthor_device *ptdev, u32 l2, u32 lsc, u32 other); int panthor_gpu_soft_reset(struct panthor_device *ptdev); +u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev); +u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev); #endif diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 0e6f94df690d..a49132f3778b 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -841,7 +841,7 @@ static void panthor_vm_stop(struct panthor_vm *vm) static void panthor_vm_start(struct panthor_vm *vm) { - drm_sched_start(&vm->sched); + drm_sched_start(&vm->sched, 0); } /** @@ -2730,9 +2730,9 @@ int panthor_mmu_init(struct panthor_device *ptdev) * which passes iova as an unsigned long. Patch the mmu_features to reflect this * limitation. */ - if (sizeof(unsigned long) * 8 < va_bits) { + if (va_bits > BITS_PER_LONG) { ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0); - ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8; + ptdev->gpu_info.mmu_features |= BITS_PER_LONG; } return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 9929e22f4d8d..ef4bec7ff9c7 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -93,6 +93,9 @@ #define MIN_CSGS 3 #define MAX_CSG_PRIO 0xf +#define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64)) +#define MAX_INSTRS_PER_JOB 24 + struct panthor_group; /** @@ -137,8 +140,6 @@ enum panthor_csg_priority { * non-real-time groups. When such a group becomes executable, * it will evict the group with the lowest non-rt priority if * there's no free group slot available. - * - * Currently not exposed to userspace. */ PANTHOR_CSG_PRIORITY_RT, @@ -476,6 +477,18 @@ struct panthor_queue { */ struct list_head in_flight_jobs; } fence_ctx; + + /** @profiling: Job profiling data slots and access information. */ + struct { + /** @slots: Kernel BO holding the slots. */ + struct panthor_kernel_bo *slots; + + /** @slot_count: Number of jobs ringbuffer can hold at once. */ + u32 slot_count; + + /** @seqno: Index of the next available profiling information slot. */ + u32 seqno; + } profiling; }; /** @@ -605,6 +618,18 @@ struct panthor_group { */ struct panthor_kernel_bo *syncobjs; + /** @fdinfo: Per-file total cycle and timestamp values reference. */ + struct { + /** @data: Total sampled values for jobs in queues from this group. */ + struct panthor_gpu_usage data; + + /** + * @lock: Mutex to govern concurrent access from drm file's fdinfo callback + * and job post-completion processing function + */ + struct mutex lock; + } fdinfo; + /** @state: Group state. */ enum panthor_group_state state; @@ -662,6 +687,18 @@ struct panthor_group { struct list_head wait_node; }; +struct panthor_job_profiling_data { + struct { + u64 before; + u64 after; + } cycles; + + struct { + u64 before; + u64 after; + } time; +}; + /** * group_queue_work() - Queue a group work * @group: Group to queue the work for. @@ -775,6 +812,15 @@ struct panthor_job { /** @done_fence: Fence signaled when the job is finished or cancelled. */ struct dma_fence *done_fence; + + /** @profiling: Job profiling information. */ + struct { + /** @mask: Current device job profiling enablement bitmask. */ + u32 mask; + + /** @slot: Job index in the profiling slots BO. */ + u32 slot; + } profiling; }; static void @@ -839,6 +885,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * panthor_kernel_bo_destroy(queue->ringbuf); panthor_kernel_bo_destroy(queue->iface.mem); + panthor_kernel_bo_destroy(queue->profiling.slots); /* Release the last_fence we were holding, if any. */ dma_fence_put(queue->fence_ctx.last_fence); @@ -853,6 +900,8 @@ static void group_release_work(struct work_struct *work) release_work); u32 i; + mutex_destroy(&group->fdinfo.lock); + for (i = 0; i < group->queue_count; i++) group_free_queue(group, group->queues[i]); @@ -1989,8 +2038,6 @@ tick_ctx_init(struct panthor_scheduler *sched, } } -#define NUM_INSTRS_PER_SLOT 16 - static void group_term_post_processing(struct panthor_group *group) { @@ -2546,7 +2593,7 @@ static void queue_start(struct panthor_queue *queue) list_for_each_entry(job, &queue->scheduler.pending_list, base.list) job->base.s_fence->parent = dma_fence_get(job->done_fence); - drm_sched_start(&queue->scheduler); + drm_sched_start(&queue->scheduler, 0); } static void panthor_group_stop(struct panthor_group *group) @@ -2790,6 +2837,41 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) } } +static void update_fdinfo_stats(struct panthor_job *job) +{ + struct panthor_group *group = job->group; + struct panthor_queue *queue = group->queues[job->queue_idx]; + struct panthor_gpu_usage *fdinfo = &group->fdinfo.data; + struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; + struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; + + mutex_lock(&group->fdinfo.lock); + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) + fdinfo->cycles += data->cycles.after - data->cycles.before; + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) + fdinfo->time += data->time.after - data->time.before; + mutex_unlock(&group->fdinfo.lock); +} + +void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) +{ + struct panthor_group_pool *gpool = pfile->groups; + struct panthor_group *group; + unsigned long i; + + if (IS_ERR_OR_NULL(gpool)) + return; + + xa_for_each(&gpool->xa, i, group) { + mutex_lock(&group->fdinfo.lock); + pfile->stats.cycles += group->fdinfo.data.cycles; + pfile->stats.time += group->fdinfo.data.time; + group->fdinfo.data.cycles = 0; + group->fdinfo.data.time = 0; + mutex_unlock(&group->fdinfo.lock); + } +} + static void group_sync_upd_work(struct work_struct *work) { struct panthor_group *group = @@ -2822,6 +2904,8 @@ static void group_sync_upd_work(struct work_struct *work) dma_fence_end_signalling(cookie); list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { + if (job->profiling.mask) + update_fdinfo_stats(job); list_del_init(&job->node); panthor_job_put(&job->base); } @@ -2829,65 +2913,198 @@ static void group_sync_upd_work(struct work_struct *work) group_put(group); } -static struct dma_fence * -queue_run_job(struct drm_sched_job *sched_job) +struct panthor_job_ringbuf_instrs { + u64 buffer[MAX_INSTRS_PER_JOB]; + u32 count; +}; + +struct panthor_job_instr { + u32 profile_mask; + u64 instr; +}; + +#define JOB_INSTR(__prof, __instr) \ + { \ + .profile_mask = __prof, \ + .instr = __instr, \ + } + +static void +copy_instrs_to_ringbuf(struct panthor_queue *queue, + struct panthor_job *job, + struct panthor_job_ringbuf_instrs *instrs) +{ + u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); + u64 start = job->ringbuf.start & (ringbuf_size - 1); + u64 size, written; + + /* + * We need to write a whole slot, including any trailing zeroes + * that may come at the end of it. Also, because instrs.buffer has + * been zero-initialised, there's no need to pad it with 0's + */ + instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); + size = instrs->count * sizeof(u64); + WARN_ON(size > ringbuf_size); + written = min(ringbuf_size - start, size); + + memcpy(queue->ringbuf->kmap + start, instrs->buffer, written); + + if (written < size) + memcpy(queue->ringbuf->kmap, + &instrs->buffer[written / sizeof(u64)], + size - written); +} + +struct panthor_job_cs_params { + u32 profile_mask; + u64 addr_reg; u64 val_reg; + u64 cycle_reg; u64 time_reg; + u64 sync_addr; u64 times_addr; + u64 cs_start; u64 cs_size; + u32 last_flush; u32 waitall_mask; +}; + +static void +get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params) { - struct panthor_job *job = container_of(sched_job, struct panthor_job, base); struct panthor_group *group = job->group; struct panthor_queue *queue = group->queues[job->queue_idx]; struct panthor_device *ptdev = group->ptdev; struct panthor_scheduler *sched = ptdev->scheduler; - u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); - u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1); - u64 addr_reg = ptdev->csif_info.cs_reg_count - - ptdev->csif_info.unpreserved_cs_reg_count; - u64 val_reg = addr_reg + 2; - u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + - job->queue_idx * sizeof(struct panthor_syncobj_64b); - u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); - struct dma_fence *done_fence; - int ret; - u64 call_instrs[NUM_INSTRS_PER_SLOT] = { - /* MOV32 rX+2, cs.latest_flush */ - (2ull << 56) | (val_reg << 48) | job->call_info.latest_flush, + params->addr_reg = ptdev->csif_info.cs_reg_count - + ptdev->csif_info.unpreserved_cs_reg_count; + params->val_reg = params->addr_reg + 2; + params->cycle_reg = params->addr_reg; + params->time_reg = params->val_reg; - /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ - (36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233, + params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + + job->queue_idx * sizeof(struct panthor_syncobj_64b); + params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) + + (job->profiling.slot * sizeof(struct panthor_job_profiling_data)); + params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); - /* MOV48 rX:rX+1, cs.start */ - (1ull << 56) | (addr_reg << 48) | job->call_info.start, + params->cs_start = job->call_info.start; + params->cs_size = job->call_info.size; + params->last_flush = job->call_info.latest_flush; - /* MOV32 rX+2, cs.size */ - (2ull << 56) | (val_reg << 48) | job->call_info.size, + params->profile_mask = job->profiling.mask; +} - /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ - (3ull << 56) | (1 << 16), +#define JOB_INSTR_ALWAYS(instr) \ + JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr)) +#define JOB_INSTR_TIMESTAMP(instr) \ + JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr)) +#define JOB_INSTR_CYCLES(instr) \ + JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr)) +static void +prepare_job_instrs(const struct panthor_job_cs_params *params, + struct panthor_job_ringbuf_instrs *instrs) +{ + const struct panthor_job_instr instr_seq[] = { + /* MOV32 rX+2, cs.latest_flush */ + JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush), + /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ + JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) | + (0 << 16) | 0x233), + /* MOV48 rX:rX+1, cycles_offset */ + JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, cycles.before))), + /* STORE_STATE cycles */ + JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), + /* MOV48 rX:rX+1, time_offset */ + JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, time.before))), + /* STORE_STATE timer */ + JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), + /* MOV48 rX:rX+1, cs.start */ + JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start), + /* MOV32 rX+2, cs.size */ + JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size), + /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ + JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)), /* CALL rX:rX+1, rX+2 */ - (32ull << 56) | (addr_reg << 40) | (val_reg << 32), - + JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) | + (params->val_reg << 32)), + /* MOV48 rX:rX+1, cycles_offset */ + JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, cycles.after))), + /* STORE_STATE cycles */ + JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), + /* MOV48 rX:rX+1, time_offset */ + JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, time.after))), + /* STORE_STATE timer */ + JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), /* MOV48 rX:rX+1, sync_addr */ - (1ull << 56) | (addr_reg << 48) | sync_addr, - + JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr), /* MOV48 rX+2, #1 */ - (1ull << 56) | (val_reg << 48) | 1, - + JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1), /* WAIT(all) */ - (3ull << 56) | (waitall_mask << 16), - + JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)), /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/ - (51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1, + JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) | + (params->val_reg << 32) | (0 << 16) | 1), + /* ERROR_BARRIER, so we can recover from faults at job boundaries. */ + JOB_INSTR_ALWAYS((47ull << 56)), + }; + u32 pad; - /* ERROR_BARRIER, so we can recover from faults at job - * boundaries. - */ - (47ull << 56), + instrs->count = 0; + + /* NEED to be cacheline aligned to please the prefetcher. */ + static_assert(sizeof(instrs->buffer) % 64 == 0, + "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline"); + + /* Make sure we have enough storage to store the whole sequence. */ + static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) == + ARRAY_SIZE(instrs->buffer), + "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch"); + + for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) { + /* If the profile mask of this instruction is not enabled, skip it. */ + if (instr_seq[i].profile_mask && + !(instr_seq[i].profile_mask & params->profile_mask)) + continue; + + instrs->buffer[instrs->count++] = instr_seq[i].instr; + } + + pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); + memset(&instrs->buffer[instrs->count], 0, + (pad - instrs->count) * sizeof(instrs->buffer[0])); + instrs->count = pad; +} + +static u32 calc_job_credits(u32 profile_mask) +{ + struct panthor_job_ringbuf_instrs instrs; + struct panthor_job_cs_params params = { + .profile_mask = profile_mask, }; - /* Need to be cacheline aligned to please the prefetcher. */ - static_assert(sizeof(call_instrs) % 64 == 0, - "call_instrs is not aligned on a cacheline"); + prepare_job_instrs(¶ms, &instrs); + return instrs.count; +} + +static struct dma_fence * +queue_run_job(struct drm_sched_job *sched_job) +{ + struct panthor_job *job = container_of(sched_job, struct panthor_job, base); + struct panthor_group *group = job->group; + struct panthor_queue *queue = group->queues[job->queue_idx]; + struct panthor_device *ptdev = group->ptdev; + struct panthor_scheduler *sched = ptdev->scheduler; + struct panthor_job_ringbuf_instrs instrs; + struct panthor_job_cs_params cs_params; + struct dma_fence *done_fence; + int ret; /* Stream size is zero, nothing to do except making sure all previously * submitted jobs are done before we signal the @@ -2914,17 +3131,23 @@ queue_run_job(struct drm_sched_job *sched_job) queue->fence_ctx.id, atomic64_inc_return(&queue->fence_ctx.seqno)); - memcpy(queue->ringbuf->kmap + ringbuf_insert, - call_instrs, sizeof(call_instrs)); + job->profiling.slot = queue->profiling.seqno++; + if (queue->profiling.seqno == queue->profiling.slot_count) + queue->profiling.seqno = 0; + + job->ringbuf.start = queue->iface.input->insert; + + get_job_cs_params(job, &cs_params); + prepare_job_instrs(&cs_params, &instrs); + copy_instrs_to_ringbuf(queue, job, &instrs); + + job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64)); panthor_job_get(&job->base); spin_lock(&queue->fence_ctx.lock); list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs); spin_unlock(&queue->fence_ctx.lock); - job->ringbuf.start = queue->iface.input->insert; - job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs); - /* Make sure the ring buffer is updated before the INSERT * register. */ @@ -3017,6 +3240,33 @@ static const struct drm_sched_backend_ops panthor_queue_sched_ops = { .free_job = queue_free_job, }; +static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, + u32 cs_ringbuf_size) +{ + u32 min_profiled_job_instrs = U32_MAX; + u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL); + + /* + * We want to calculate the minimum size of a profiled job's CS, + * because since they need additional instructions for the sampling + * of performance metrics, they might take up further slots in + * the queue's ringbuffer. This means we might not need as many job + * slots for keeping track of their profiling information. What we + * need is the maximum number of slots we should allocate to this end, + * which matches the maximum number of profiled jobs we can place + * simultaneously in the queue's ring buffer. + * That has to be calculated separately for every single job profiling + * flag, but not in the case job profiling is disabled, since unprofiled + * jobs don't need to keep track of this at all. + */ + for (u32 i = 0; i < last_flag; i++) { + min_profiled_job_instrs = + min(min_profiled_job_instrs, calc_job_credits(BIT(i))); + } + + return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); +} + static struct panthor_queue * group_create_queue(struct panthor_group *group, const struct drm_panthor_queue_create *args) @@ -3070,9 +3320,35 @@ group_create_queue(struct panthor_group *group, goto err_free_queue; } + queue->profiling.slot_count = + calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size); + + queue->profiling.slots = + panthor_kernel_bo_create(group->ptdev, group->vm, + queue->profiling.slot_count * + sizeof(struct panthor_job_profiling_data), + DRM_PANTHOR_BO_NO_MMAP, + DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | + DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, + PANTHOR_VM_KERNEL_AUTO_VA); + + if (IS_ERR(queue->profiling.slots)) { + ret = PTR_ERR(queue->profiling.slots); + goto err_free_queue; + } + + ret = panthor_kernel_bo_vmap(queue->profiling.slots); + if (ret) + goto err_free_queue; + + /* + * Credit limit argument tells us the total number of instructions + * across all CS slots in the ringbuffer, with some jobs requiring + * twice as many as others, depending on their profiling status. + */ ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, group->ptdev->scheduler->wq, 1, - args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)), + args->ringbuf_size / sizeof(u64), 0, msecs_to_jiffies(JOB_TIMEOUT_MS), group->ptdev->reset.wq, NULL, "panthor-queue", group->ptdev->base.dev); @@ -3213,6 +3489,8 @@ int panthor_group_create(struct panthor_file *pfile, } mutex_unlock(&sched->reset.lock); + mutex_init(&group->fdinfo.lock); + return gid; err_put_group: @@ -3380,6 +3658,7 @@ panthor_job_create(struct panthor_file *pfile, { struct panthor_group_pool *gpool = pfile->groups; struct panthor_job *job; + u32 credits; int ret; if (qsubmit->pad) @@ -3438,9 +3717,16 @@ panthor_job_create(struct panthor_file *pfile, } } + job->profiling.mask = pfile->ptdev->profile_mask; + credits = calc_job_credits(job->profiling.mask); + if (credits == 0) { + ret = -EINVAL; + goto err_put_job; + } + ret = drm_sched_job_init(&job->base, &job->group->queues[job->queue_idx]->entity, - 1, job->group); + credits, job->group); if (ret) goto err_put_job; diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h index 3a30d2328b30..5ae6b4bde7c5 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.h +++ b/drivers/gpu/drm/panthor/panthor_sched.h @@ -47,4 +47,6 @@ void panthor_sched_resume(struct panthor_device *ptdev); void panthor_sched_report_mmu_fault(struct panthor_device *ptdev); void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events); +void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile); + #endif diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig index 20fe1d2c0aaf..82e918820950 100644 --- a/drivers/gpu/drm/pl111/Kconfig +++ b/drivers/gpu/drm/pl111/Kconfig @@ -5,6 +5,7 @@ config DRM_PL111 depends on ARM || ARM64 || COMPILE_TEST depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_BRIDGE diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 02e6b74d5016..13362150b9c6 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -47,6 +47,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> @@ -225,6 +226,7 @@ static const struct drm_driver pl111_drm_driver = { .patchlevel = 0, .dumb_create = drm_gem_dma_dumb_create, .gem_prime_import_sg_table = pl111_gem_import_sg_table, + DRM_FBDEV_DMA_DRIVER_OPS, #if defined(CONFIG_DEBUG_FS) .debugfs_init = pl111_debugfs_init, @@ -305,7 +307,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev, if (ret < 0) goto dev_put; - drm_fbdev_dma_setup(drm, priv->variant->fb_depth); + drm_client_setup_with_color_mode(drm, priv->variant->fb_depth); return 0; diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 17d6927e5e23..98a148bea628 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -2,6 +2,7 @@ config DRM_QXL tristate "QXL virtual GPU" depends on DRM && PCI && MMU && HAS_IOPORT + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_TTM select DRM_TTM_HELPER diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 5eb3f5719fdf..21f752644242 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -29,13 +29,14 @@ #include "qxl_drv.h" +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vgaarb.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_file.h> @@ -91,7 +92,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, qxl_driver.name); if (ret) goto disable_pci; @@ -118,7 +119,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto modeset_cleanup; - drm_fbdev_ttm_setup(&qdev->ddev, 32); + drm_client_setup(&qdev->ddev, NULL); return 0; modeset_cleanup: @@ -293,6 +294,7 @@ static struct drm_driver qxl_driver = { .debugfs_init = qxl_debugfs_init, #endif .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, + DRM_FBDEV_TTM_DRIVER_OPS, .fops = &qxl_fops, .ioctls = qxl_ioctls, .num_ioctls = ARRAY_SIZE(qxl_ioctls), diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index f98356be0af2..9c6c74a75778 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -5,6 +5,7 @@ config DRM_RADEON depends on DRM && PCI && MMU depends on AGP || !AGP select FW_LOADER + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 1b2d31c4d77c..ac77d1246b94 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = radeon_get_ib_value(p, idx+1) << 8; + offset = (u64)radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", offset, track->vgt_strmout_bo_offset[idx_value]); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 554b236c2328..6f071e61f764 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -35,6 +35,7 @@ #include <linux/vgaarb.h> #include <drm/drm_cache.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_device.h> #include <drm/drm_file.h> @@ -1542,7 +1543,7 @@ void radeon_device_fini(struct radeon_device *rdev) * Called at driver suspend. */ int radeon_suspend_kms(struct drm_device *dev, bool suspend, - bool fbcon, bool freeze) + bool notify_clients, bool freeze) { struct radeon_device *rdev; struct pci_dev *pdev; @@ -1634,9 +1635,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (fbcon) { + if (notify_clients) { console_lock(); - radeon_fbdev_set_suspend(rdev, 1); + drm_client_dev_suspend(dev, true); console_unlock(); } return 0; @@ -1649,7 +1650,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, * Returns 0 for success or an error on failure. * Called at driver resume. */ -int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; @@ -1660,14 +1661,14 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) { + if (notify_clients) { console_lock(); } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pci_enable_device(pdev)) { - if (fbcon) + if (notify_clients) console_unlock(); return -1; } @@ -1730,7 +1731,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) /* reset hpd state */ radeon_hpd_init(rdev); /* blat the mode back in */ - if (fbcon) { + if (notify_clients) { drm_helper_resume_force_mode(dev); /* turn on display hw */ drm_modeset_lock_all(dev); @@ -1746,8 +1747,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); + if (notify_clients) { + drm_client_dev_resume(dev, true); console_unlock(); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e5a6f3e7c75b..23d6d1a2586d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -29,7 +29,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - +#include <linux/aperture.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -37,9 +37,10 @@ #include <linux/mmu_notifier.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> #include <drm/drm_pciids.h> @@ -261,6 +262,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, unsigned long flags = 0; struct drm_device *ddev; struct radeon_device *rdev; + const struct drm_format_info *format; int ret; if (!ent) @@ -297,7 +299,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, return -EPROBE_DEFER; /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &kms_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, kms_driver.name); if (ret) return ret; @@ -324,7 +326,14 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret) goto err_agp; - radeon_fbdev_setup(ddev->dev_private); + if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) + format = drm_format_info(DRM_FORMAT_C8); + else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024)) + format = drm_format_info(DRM_FORMAT_RGB565); + else + format = NULL; + + drm_client_setup(ddev, format); return 0; @@ -591,6 +600,8 @@ static const struct drm_driver kms_driver = { .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, + RADEON_FBDEV_DRIVER_OPS, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index fb70de29545c..d4a58bd679db 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -198,12 +198,11 @@ static const struct fb_ops radeon_fbdev_fb_ops = { .fb_destroy = radeon_fbdev_fb_destroy, }; -/* - * Fbdev helpers and struct drm_fb_helper_funcs - */ +static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = { +}; -static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = fb_helper->dev->dev_private; struct drm_mode_fb_cmd2 mode_cmd = { }; @@ -243,6 +242,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, } /* setup helper */ + fb_helper->funcs = &radeon_fbdev_fb_helper_funcs; fb_helper->fb = fb; /* okay we have an object now allocate the framebuffer */ @@ -288,116 +288,6 @@ err_radeon_fbdev_destroy_pinned_object: return ret; } -static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = { - .fb_probe = radeon_fbdev_fb_helper_fb_probe, -}; - -/* - * Fbdev client and struct drm_client_funcs - */ - -static void radeon_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = fb_helper->dev; - struct radeon_device *rdev = dev->dev_private; - - if (fb_helper->info) { - vga_switcheroo_client_fb_set(rdev->pdev, NULL); - drm_helper_force_disable_all(dev); - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int radeon_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - vga_switcheroo_process_delayed_switch(); - - return 0; -} - -static int radeon_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - struct radeon_device *rdev = dev->dev_private; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - vga_switcheroo_client_fb_set(rdev->pdev, fb_helper->info); - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup radeon fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs radeon_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = radeon_fbdev_client_unregister, - .restore = radeon_fbdev_client_restore, - .hotplug = radeon_fbdev_client_hotplug, -}; - -void radeon_fbdev_setup(struct radeon_device *rdev) -{ - struct drm_fb_helper *fb_helper; - int bpp_sel = 32; - int ret; - - if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) - bpp_sel = 8; - else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024)) - bpp_sel = 16; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(rdev_to_drm(rdev), fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs); - - ret = drm_client_init(rdev_to_drm(rdev), &fb_helper->client, "radeon-fbdev", - &radeon_fbdev_client_funcs); - if (ret) { - drm_err(rdev_to_drm(rdev), "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} - -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ - if (rdev_to_drm(rdev)->fb_helper) - drm_fb_helper_set_suspend(rdev_to_drm(rdev)->fb_helper, state); -} - bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 421c83fc70dc..4063d3801e81 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -38,6 +38,9 @@ #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct edid; struct drm_edid; struct radeon_bo; @@ -935,14 +938,14 @@ void dce8_program_fmt(struct drm_encoder *encoder); /* fbdev layer */ #if defined(CONFIG_DRM_FBDEV_EMULATION) -void radeon_fbdev_setup(struct radeon_device *rdev); -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); +int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +#define RADEON_FBDEV_DRIVER_OPS \ + .fbdev_probe = radeon_fbdev_driver_fbdev_probe bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); #else -static inline void radeon_fbdev_setup(struct radeon_device *rdev) -{ } -static inline void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ } +#define RADEON_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { return false; diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig index e1f41468a9a6..840305fdeb49 100644 --- a/drivers/gpu/drm/renesas/rcar-du/Kconfig +++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig @@ -4,6 +4,7 @@ config DRM_RCAR_DU depends on DRM && OF depends on ARM || ARM64 || COMPILE_TEST depends on ARCH_RENESAS || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c index fb719d9aff10..4e0bafc86f50 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c @@ -19,6 +19,7 @@ #include <linux/wait.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -606,6 +607,7 @@ static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .dumb_create = rcar_du_dumb_create, .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rcar_du_fops, .name = "rcar-du", .desc = "Renesas R-Car Display Unit", @@ -716,7 +718,7 @@ static int rcar_du_probe(struct platform_device *pdev) drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev)); - drm_fbdev_dma_setup(&rcdu->ddev, 32); + drm_client_setup(&rcdu->ddev, NULL); return 0; diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c index e445fac8e0b4..c546ab0805d6 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c @@ -680,6 +680,12 @@ static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = { .atomic_update = rcar_du_plane_atomic_update, }; +static const struct drm_plane_helper_funcs rcar_du_primary_plane_helper_funcs = { + .atomic_check = rcar_du_plane_atomic_check, + .atomic_update = rcar_du_plane_atomic_update, + .get_scanout_buffer = drm_fb_dma_get_scanout_buffer, +}; + static struct drm_plane_state * rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane) { @@ -812,8 +818,12 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) if (ret < 0) return ret; - drm_plane_helper_add(&plane->plane, - &rcar_du_plane_helper_funcs); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&plane->plane, + &rcar_du_primary_plane_helper_funcs); + else + drm_plane_helper_add(&plane->plane, + &rcar_du_plane_helper_funcs); drm_plane_create_alpha_property(&plane->plane); diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig index 89bdb598e0ae..7c1817240846 100644 --- a/drivers/gpu/drm/renesas/rz-du/Kconfig +++ b/drivers/gpu/drm/renesas/rz-du/Kconfig @@ -4,6 +4,7 @@ config DRM_RZG2L_DU depends on ARCH_RZG2L || COMPILE_TEST depends on DRM && OF depends on VIDEO_RENESAS_VSP1 + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c index bc7c381f92ac..bbd7003335da 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -79,6 +80,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops); static const struct drm_driver rzg2l_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .dumb_create = rzg2l_du_dumb_create, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rzg2l_du_fops, .name = "rzg2l-du", .desc = "Renesas RZ/G2L Display Unit", @@ -160,7 +162,7 @@ static int rzg2l_du_probe(struct platform_device *pdev) drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev)); - drm_fbdev_dma_setup(&rcdu->ddev, 32); + drm_client_setup(&rcdu->ddev, NULL); return 0; diff --git a/drivers/gpu/drm/renesas/shmobile/Kconfig b/drivers/gpu/drm/renesas/shmobile/Kconfig index c329ab8a7a8b..52e160464001 100644 --- a/drivers/gpu/drm/renesas/shmobile/Kconfig +++ b/drivers/gpu/drm/renesas/shmobile/Kconfig @@ -4,6 +4,7 @@ config DRM_SHMOBILE depends on DRM && PM depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c index ff2883c7fd46..8d3effe3f598 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c @@ -18,8 +18,10 @@ #include <linux/slab.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> @@ -101,6 +103,7 @@ DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops); static const struct drm_driver shmob_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &shmob_drm_fops, .name = "shmob-drm", .desc = "Renesas SH Mobile DRM", @@ -257,7 +260,7 @@ static int shmob_drm_probe(struct platform_device *pdev) if (ret < 0) goto err_modeset_cleanup; - drm_fbdev_dma_setup(ddev, 16); + drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c index 07ad17d24294..9d166ab2af8b 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c @@ -273,6 +273,13 @@ static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = { .atomic_disable = shmob_drm_plane_atomic_disable, }; +static const struct drm_plane_helper_funcs shmob_drm_primary_plane_helper_funcs = { + .atomic_check = shmob_drm_plane_atomic_check, + .atomic_update = shmob_drm_plane_atomic_update, + .atomic_disable = shmob_drm_plane_atomic_disable, + .get_scanout_buffer = drm_fb_dma_get_scanout_buffer, +}; + static const struct drm_plane_funcs shmob_drm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -310,7 +317,12 @@ struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev, splane->index = index; - drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&splane->base, + &shmob_drm_primary_plane_helper_funcs); + else + drm_plane_helper_add(&splane->base, + &shmob_drm_plane_helper_funcs); return &splane->base; } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 23c49e91f1cc..3ac579615749 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -2,12 +2,14 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL select VIDEOMODE_HELPERS select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI + select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI @@ -63,6 +65,14 @@ config ROCKCHIP_DW_HDMI enable HDMI on RK3288 or RK3399 based SoC, you should select this option. +config ROCKCHIP_DW_HDMI_QP + bool "Rockchip specific extensions for Synopsys DW HDMI QP" + select DRM_BRIDGE_CONNECTOR + help + This selects support for Rockchip SoC specific extensions + for the Synopsys DesignWare HDMI QP driver. If you want to + enable HDMI on RK3588 based SoC, you should select this option. + config ROCKCHIP_DW_MIPI_DSI bool "Rockchip specific extensions for Synopsys DW MIPI DSI" select GENERIC_PHY_MIPI_DPHY diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 3ff7b21c0414..3eab662a5a1d 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -11,6 +11,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_VOP) += rockchip_drm_vop.o rockchip_vop_reg.o rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o +rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h index 441248b7a79e..c7780ae3272a 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h @@ -77,7 +77,7 @@ #define SOURCE_PIF_PKT_ALLOC_WR_EN 0x30830 #define SOURCE_PIF_SW_RESET 0x30834 -/* bellow registers need access by mailbox */ +/* below registers need access by mailbox */ /* source car addr */ #define SOURCE_HDTX_CAR 0x0900 #define SOURCE_DPTX_CAR 0x0904 diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 240552eb517f..96e1097f993d 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -76,6 +76,7 @@ struct rockchip_hdmi { struct rockchip_encoder encoder; const struct rockchip_hdmi_chip_data *chip_data; const struct dw_hdmi_plat_data *plat_data; + struct clk *hdmiphy_clk; struct clk *ref_clk; struct clk *grf_clk; struct dw_hdmi *hdmi; @@ -91,74 +92,70 @@ static struct rockchip_hdmi *to_rockchip_hdmi(struct drm_encoder *encoder) static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = { { - 27000000, { - { 0x00b3, 0x0000}, - { 0x2153, 0x0000}, - { 0x40f3, 0x0000} + 30666000, { + { 0x00b3, 0x0000 }, + { 0x2153, 0x0000 }, + { 0x40f3, 0x0000 }, }, }, { - 36000000, { - { 0x00b3, 0x0000}, - { 0x2153, 0x0000}, - { 0x40f3, 0x0000} + 36800000, { + { 0x00b3, 0x0000 }, + { 0x2153, 0x0000 }, + { 0x40a2, 0x0001 }, }, }, { - 40000000, { - { 0x00b3, 0x0000}, - { 0x2153, 0x0000}, - { 0x40f3, 0x0000} + 46000000, { + { 0x00b3, 0x0000 }, + { 0x2142, 0x0001 }, + { 0x40a2, 0x0001 }, }, }, { - 54000000, { - { 0x0072, 0x0001}, - { 0x2142, 0x0001}, - { 0x40a2, 0x0001}, + 61333000, { + { 0x0072, 0x0001 }, + { 0x2142, 0x0001 }, + { 0x40a2, 0x0001 }, }, }, { - 65000000, { - { 0x0072, 0x0001}, - { 0x2142, 0x0001}, - { 0x40a2, 0x0001}, + 73600000, { + { 0x0072, 0x0001 }, + { 0x2142, 0x0001 }, + { 0x4061, 0x0002 }, }, }, { - 66000000, { - { 0x013e, 0x0003}, - { 0x217e, 0x0002}, - { 0x4061, 0x0002} + 92000000, { + { 0x0072, 0x0001 }, + { 0x2145, 0x0002 }, + { 0x4061, 0x0002 }, }, }, { - 74250000, { - { 0x0072, 0x0001}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} + 122666000, { + { 0x0051, 0x0002 }, + { 0x2145, 0x0002 }, + { 0x4061, 0x0002 }, }, }, { - 83500000, { - { 0x0072, 0x0001}, + 147200000, { + { 0x0051, 0x0002 }, + { 0x2145, 0x0002 }, + { 0x4064, 0x0003 }, }, }, { - 108000000, { - { 0x0051, 0x0002}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} + 184000000, { + { 0x0051, 0x0002 }, + { 0x214c, 0x0003 }, + { 0x4064, 0x0003 }, }, }, { - 106500000, { - { 0x0051, 0x0002}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} - }, - }, { - 146250000, { - { 0x0051, 0x0002}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} + 226666000, { + { 0x0040, 0x0003 }, + { 0x214c, 0x0003 }, + { 0x4064, 0x0003 }, }, }, { - 148500000, { - { 0x0051, 0x0003}, - { 0x214c, 0x0003}, - { 0x4064, 0x0003} + 272000000, { + { 0x0040, 0x0003 }, + { 0x214c, 0x0003 }, + { 0x5a64, 0x0003 }, }, }, { 340000000, { @@ -167,10 +164,16 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = { { 0x5a64, 0x0003 }, }, }, { + 600000000, { + { 0x1a40, 0x0003 }, + { 0x3b4c, 0x0003 }, + { 0x5a64, 0x0003 }, + }, + }, { ~0UL, { - { 0x00a0, 0x000a }, - { 0x2001, 0x000f }, - { 0x4002, 0x000f }, + { 0x0000, 0x0000 }, + { 0x0000, 0x0000 }, + { 0x0000, 0x0000 }, }, } }; @@ -178,31 +181,18 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = { static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = { /* pixelclk bpp8 bpp10 bpp12 */ { - 40000000, { 0x0018, 0x0018, 0x0018 }, - }, { - 65000000, { 0x0028, 0x0028, 0x0028 }, - }, { - 66000000, { 0x0038, 0x0038, 0x0038 }, - }, { - 74250000, { 0x0028, 0x0038, 0x0038 }, - }, { - 83500000, { 0x0028, 0x0038, 0x0038 }, - }, { - 146250000, { 0x0038, 0x0038, 0x0038 }, - }, { - 148500000, { 0x0000, 0x0038, 0x0038 }, - }, { 600000000, { 0x0000, 0x0000, 0x0000 }, }, { - ~0UL, { 0x0000, 0x0000, 0x0000}, + ~0UL, { 0x0000, 0x0000, 0x0000 }, } }; static const struct dw_hdmi_phy_config rockchip_phy_config[] = { /*pixelclk symbol term vlev*/ { 74250000, 0x8009, 0x0004, 0x0272}, - { 148500000, 0x802b, 0x0004, 0x028d}, + { 165000000, 0x802b, 0x0004, 0x0209}, { 297000000, 0x8039, 0x0005, 0x028d}, + { 594000000, 0x8039, 0x0000, 0x019d}, { ~0UL, 0x0000, 0x0000, 0x0000} }; @@ -251,10 +241,7 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data, const struct drm_display_mode *mode) { struct rockchip_hdmi *hdmi = data; - const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg; int pclk = mode->clock * 1000; - bool exact_match = hdmi->plat_data->phy_force_vendor; - int i; if (hdmi->chip_data->max_tmds_clock && mode->clock > hdmi->chip_data->max_tmds_clock) @@ -263,26 +250,18 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data, if (hdmi->ref_clk) { int rpclk = clk_round_rate(hdmi->ref_clk, pclk); - if (abs(rpclk - pclk) > pclk / 1000) + if (rpclk < 0 || abs(rpclk - pclk) > pclk / 1000) return MODE_NOCLOCK; } - for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) { - /* - * For vendor specific phys force an exact match of the pixelclock - * to preserve the original behaviour of the driver. - */ - if (exact_match && pclk == mpll_cfg[i].mpixelclock) - return MODE_OK; - /* - * The Synopsys phy can work with pixelclocks up to the value given - * in the corresponding mpll_cfg entry. - */ - if (!exact_match && pclk <= mpll_cfg[i].mpixelclock) - return MODE_OK; + if (hdmi->hdmiphy_clk) { + int rpclk = clk_round_rate(hdmi->hdmiphy_clk, pclk); + + if (rpclk < 0 || abs(rpclk - pclk) > pclk / 1000) + return MODE_NOCLOCK; } - return MODE_BAD; + return MODE_OK; } static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder) @@ -502,7 +481,7 @@ static struct rockchip_hdmi_chip_data rk3399_chip_data = { .lcdsel_grf_reg = RK3399_GRF_SOC_CON20, .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL), .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL), - .max_tmds_clock = 340000, + .max_tmds_clock = 594000, }; static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { @@ -516,7 +495,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { static struct rockchip_hdmi_chip_data rk3568_chip_data = { .lcdsel_grf_reg = -1, - .max_tmds_clock = 340000, + .max_tmds_clock = 594000, }; static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = { @@ -607,6 +586,15 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + if (hdmi->phy) { + struct of_phandle_args clkspec; + + clkspec.np = hdmi->phy->dev.of_node; + hdmi->hdmiphy_clk = of_clk_get_from_provider(&clkspec); + if (IS_ERR(hdmi->hdmiphy_clk)) + hdmi->hdmiphy_clk = NULL; + } + if (hdmi->chip_data == &rk3568_chip_data) { regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1, HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK | diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c new file mode 100644 index 000000000000..9c796ee4c303 --- /dev/null +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * + * Author: Algea Cao <algea.cao@rock-chips.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> + */ + +#include <linux/clk.h> +#include <linux/gpio/consumer.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> +#include <linux/workqueue.h> + +#include <drm/bridge/dw_hdmi_qp.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "rockchip_drm_drv.h" + +#define RK3588_GRF_SOC_CON2 0x0308 +#define RK3588_HDMI0_HPD_INT_MSK BIT(13) +#define RK3588_HDMI0_HPD_INT_CLR BIT(12) +#define RK3588_GRF_SOC_CON7 0x031c +#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12) +#define RK3588_GRF_SOC_STATUS1 0x0384 +#define RK3588_HDMI0_LEVEL_INT BIT(16) +#define RK3588_GRF_VO1_CON3 0x000c +#define RK3588_SCLIN_MASK BIT(9) +#define RK3588_SDAIN_MASK BIT(10) +#define RK3588_MODE_MASK BIT(11) +#define RK3588_I2S_SEL_MASK BIT(13) +#define RK3588_GRF_VO1_CON9 0x0024 +#define RK3588_HDMI0_GRANT_SEL BIT(10) + +#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) +#define HOTPLUG_DEBOUNCE_MS 150 + +struct rockchip_hdmi_qp { + struct device *dev; + struct regmap *regmap; + struct regmap *vo_regmap; + struct rockchip_encoder encoder; + struct clk *ref_clk; + struct dw_hdmi_qp *hdmi; + struct phy *phy; + struct gpio_desc *enable_gpio; + struct delayed_work hpd_work; +}; + +static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder) +{ + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); + + return container_of(rkencoder, struct rockchip_hdmi_qp, encoder); +} + +static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder) +{ + struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); + struct drm_crtc *crtc = encoder->crtc; + unsigned long long rate; + + /* Unconditionally switch to TMDS as FRL is not yet supported */ + gpiod_set_value(hdmi->enable_gpio, 1); + + if (crtc && crtc->state) { + rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode, + 8, HDMI_COLORSPACE_RGB); + clk_set_rate(hdmi->ref_clk, rate); + /* + * FIXME: Temporary workaround to pass pixel clock rate + * to the PHY driver until phy_configure_opts_hdmi + * becomes available in the PHY API. See also the related + * comment in rk_hdptx_phy_power_on() from + * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c + */ + phy_set_bus_width(hdmi->phy, rate / 100); + } +} + +static int +dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + s->output_type = DRM_MODE_CONNECTOR_HDMIA; + + return 0; +} + +static const struct +drm_encoder_helper_funcs dw_hdmi_qp_rockchip_encoder_helper_funcs = { + .enable = dw_hdmi_qp_rockchip_encoder_enable, + .atomic_check = dw_hdmi_qp_rockchip_encoder_atomic_check, +}; + +static int dw_hdmi_qp_rk3588_phy_init(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + return phy_power_on(hdmi->phy); +} + +static void dw_hdmi_qp_rk3588_phy_disable(struct dw_hdmi_qp *dw_hdmi, + void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + phy_power_off(hdmi->phy); +} + +static enum drm_connector_status +dw_hdmi_qp_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + u32 val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val); + + return val & RK3588_HDMI0_LEVEL_INT ? + connector_status_connected : connector_status_disconnected; +} + +static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + regmap_write(hdmi->regmap, + RK3588_GRF_SOC_CON2, + HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR, + RK3588_HDMI0_HPD_INT_CLR | + RK3588_HDMI0_HPD_INT_MSK)); +} + +static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = { + .init = dw_hdmi_qp_rk3588_phy_init, + .disable = dw_hdmi_qp_rk3588_phy_disable, + .read_hpd = dw_hdmi_qp_rk3588_read_hpd, + .setup_hpd = dw_hdmi_qp_rk3588_setup_hpd, +}; + +static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work) +{ + struct rockchip_hdmi_qp *hdmi = container_of(work, + struct rockchip_hdmi_qp, + hpd_work.work); + struct drm_device *drm = hdmi->encoder.encoder.dev; + bool changed; + + if (drm) { + changed = drm_helper_hpd_irq_event(drm); + if (changed) + drm_dbg(hdmi, "connector status changed\n"); + } +} + +static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id) +{ + struct rockchip_hdmi_qp *hdmi = dev_id; + u32 intr_stat, val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat); + + if (intr_stat) { + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, + RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id) +{ + struct rockchip_hdmi_qp *hdmi = dev_id; + u32 intr_stat, val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat); + if (!intr_stat) + return IRQ_NONE; + + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR, + RK3588_HDMI0_HPD_INT_CLR); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + mod_delayed_work(system_wq, &hdmi->hpd_work, + msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS)); + + val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + return IRQ_HANDLED; +} + +static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = { + { .compatible = "rockchip,rk3588-dw-hdmi-qp", + .data = &rk3588_hdmi_phy_ops }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids); + +static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, + void *data) +{ + static const char * const clk_names[] = { + "pclk", "earc", "aud", "hdp", "hclk_vo1", + "ref" /* keep "ref" last */ + }; + struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_qp_plat_data plat_data; + struct drm_device *drm = data; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct rockchip_hdmi_qp *hdmi; + struct clk *clk; + int ret, irq, i; + u32 val; + + if (!pdev->dev.of_node) + return -ENODEV; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + plat_data.phy_ops = of_device_get_match_data(dev); + if (!plat_data.phy_ops) + return -ENODEV; + + plat_data.phy_data = hdmi; + hdmi->dev = &pdev->dev; + + encoder = &hdmi->encoder.encoder; + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); + + rockchip_drm_encoder_set_crtc_endpoint_id(&hdmi->encoder, + dev->of_node, 0, 0); + /* + * If we failed to find the CRTC(s) which this encoder is + * supposed to be connected to, it's because the CRTC has + * not been registered yet. Defer probing, and hope that + * the required CRTC is added later. + */ + if (encoder->possible_crtcs == 0) + return -EPROBE_DEFER; + + hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); + if (IS_ERR(hdmi->regmap)) { + drm_err(hdmi, "Unable to get rockchip,grf\n"); + return PTR_ERR(hdmi->regmap); + } + + hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,vo-grf"); + if (IS_ERR(hdmi->vo_regmap)) { + drm_err(hdmi, "Unable to get rockchip,vo-grf\n"); + return PTR_ERR(hdmi->vo_regmap); + } + + for (i = 0; i < ARRAY_SIZE(clk_names); i++) { + clk = devm_clk_get_enabled(hdmi->dev, clk_names[i]); + + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + if (ret != -EPROBE_DEFER) + drm_err(hdmi, "Failed to get %s clock: %d\n", + clk_names[i], ret); + return ret; + } + } + hdmi->ref_clk = clk; + + hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", + GPIOD_OUT_HIGH); + if (IS_ERR(hdmi->enable_gpio)) { + ret = PTR_ERR(hdmi->enable_gpio); + drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret); + return ret; + } + + hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + if (ret != -EPROBE_DEFER) + drm_err(hdmi, "failed to get phy: %d\n", ret); + return ret; + } + + val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | + HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | + HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | + HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val); + + val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, + RK3588_SET_HPD_PATH_MASK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, + RK3588_HDMI0_GRANT_SEL); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work); + + plat_data.main_irq = platform_get_irq_byname(pdev, "main"); + if (plat_data.main_irq < 0) + return plat_data.main_irq; + + irq = platform_get_irq_byname(pdev, "hpd"); + if (irq < 0) + return irq; + + ret = devm_request_threaded_irq(hdmi->dev, irq, + dw_hdmi_qp_rk3588_hardirq, + dw_hdmi_qp_rk3588_irq, + IRQF_SHARED, "dw-hdmi-qp-hpd", + hdmi); + if (ret) + return ret; + + drm_encoder_helper_add(encoder, &dw_hdmi_qp_rockchip_encoder_helper_funcs); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); + + platform_set_drvdata(pdev, hdmi); + + hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data); + if (IS_ERR(hdmi->hdmi)) { + ret = PTR_ERR(hdmi->hdmi); + drm_encoder_cleanup(encoder); + return ret; + } + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + drm_err(hdmi, "failed to init bridge connector: %d\n", ret); + return ret; + } + + return drm_connector_attach_encoder(connector, encoder); +} + +static void dw_hdmi_qp_rockchip_unbind(struct device *dev, + struct device *master, + void *data) +{ + struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&hdmi->hpd_work); + + drm_encoder_cleanup(&hdmi->encoder.encoder); +} + +static const struct component_ops dw_hdmi_qp_rockchip_ops = { + .bind = dw_hdmi_qp_rockchip_bind, + .unbind = dw_hdmi_qp_rockchip_unbind, +}; + +static int dw_hdmi_qp_rockchip_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dw_hdmi_qp_rockchip_ops); +} + +static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops); +} + +static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev) +{ + struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); + u32 val; + + val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | + HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | + HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | + HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val); + + val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, + RK3588_SET_HPD_PATH_MASK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, + RK3588_HDMI0_GRANT_SEL); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + + dw_hdmi_qp_resume(dev, hdmi->hdmi); + + if (hdmi->encoder.encoder.dev) + drm_helper_hpd_irq_event(hdmi->encoder.encoder.dev); + + return 0; +} + +static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume) +}; + +struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = { + .probe = dw_hdmi_qp_rockchip_probe, + .remove = dw_hdmi_qp_rockchip_remove, + .driver = { + .name = "dwhdmiqp-rockchip", + .pm = &dw_hdmi_qp_rockchip_pm, + .of_match_table = dw_hdmi_qp_rockchip_dt_ids, + }, +}; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 11e5d10de4d7..585355de696b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -6,6 +6,7 @@ * based on exynos_drm_drv.c */ +#include <linux/aperture.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -16,7 +17,7 @@ #include <linux/console.h> #include <linux/iommu.h> -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -145,7 +146,7 @@ static int rockchip_drm_bind(struct device *dev) int ret; /* Remove existing drivers that may own the framebuffer memory. */ - ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver); + ret = aperture_remove_all_conflicting_devices(rockchip_drm_driver.name); if (ret) { DRM_DEV_ERROR(dev, "Failed to remove existing framebuffers - %d.\n", @@ -195,7 +196,7 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_kms_helper_poll_fini; - drm_fbdev_dma_setup(drm_dev, 0); + drm_client_setup(drm_dev, NULL); return 0; err_kms_helper_poll_fini: @@ -230,6 +231,7 @@ static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = rockchip_gem_dumb_create, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -358,11 +360,34 @@ static void rockchip_drm_match_remove(struct device *dev) device_link_del(link); } +/* list of preferred vop devices */ +static const char *const rockchip_drm_match_preferred[] = { + "rockchip,rk3399-vop-big", + NULL, +}; + static struct component_match *rockchip_drm_match_add(struct device *dev) { struct component_match *match = NULL; + struct device_node *port; int i; + /* add preferred vop device match before adding driver device matches */ + for (i = 0; ; i++) { + port = of_parse_phandle(dev->of_node, "ports", i); + if (!port) + break; + + if (of_device_is_available(port->parent) && + of_device_compatible_match(port->parent, + rockchip_drm_match_preferred)) + drm_of_component_match_add(dev, &match, + component_compare_of, + port->parent); + + of_node_put(port); + } + for (i = 0; i < num_rockchip_sub_drivers; i++) { struct platform_driver *drv = rockchip_sub_drivers[i]; struct device *p = NULL, *d; @@ -507,6 +532,8 @@ static int __init rockchip_drm_init(void) ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP); ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver, CONFIG_ROCKCHIP_DW_HDMI); + ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver, + CONFIG_ROCKCHIP_DW_HDMI_QP); ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver, CONFIG_ROCKCHIP_DW_MIPI_DSI); ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 8d566fcd80a2..24b4ce5ceaf1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -88,6 +88,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder, int rockchip_drm_endpoint_is_subdriver(struct device_node *ep); extern struct platform_driver cdn_dp_driver; extern struct platform_driver dw_hdmi_rockchip_pltfm_driver; +extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver; extern struct platform_driver dw_mipi_dsi_rockchip_driver; extern struct platform_driver inno_hdmi_driver; extern struct platform_driver rockchip_dp_driver; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index a75eede8bf8d..69bcf0e99d57 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -51,7 +51,7 @@ * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * - * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. @@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, /* We start in an idle state. */ complete_all(&entity->entity_idle); - spin_lock_init(&entity->rq_lock); + spin_lock_init(&entity->lock); spsc_queue_init(&entity->job_queue); atomic_set(&entity->fence_seq, 0); @@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, { WARN_ON(!num_sched_list || !sched_list); - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->sched_list = sched_list; entity->num_sched_list = num_sched_list; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_modify_sched); @@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) if (!entity->rq) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->stopped = true; drm_sched_rq_remove_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); /* Make sure this entity is not used by the scheduler at the moment */ wait_for_completion(&entity->entity_idle); @@ -372,8 +372,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, } /* - * drm_sched_entity_clear_dep - callback to clear the entities dependency and - * wake up scheduler + * drm_sched_entity_wakeup - callback to clear the entity's dependency and + * wake up the scheduler */ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) @@ -391,14 +391,14 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, * @entity: scheduler entity * @priority: scheduler priority * - * Update the priority of runqueus used for the entity. + * Update the priority of runqueues used for the entity. */ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) { - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->priority = priority; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_set_priority); @@ -514,8 +514,17 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) struct drm_sched_job *next; next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); - if (next) - drm_sched_rq_update_fifo(entity, next->submit_ts); + if (next) { + struct drm_sched_rq *rq; + + spin_lock(&entity->lock); + rq = entity->rq; + spin_lock(&rq->lock); + drm_sched_rq_update_fifo_locked(entity, rq, + next->submit_ts); + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); + } } /* Jobs and entities might have different lifecycles. Since we're @@ -555,14 +564,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) if (fence && !dma_fence_is_signaled(fence)) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); rq = sched ? sched->sched_rq[entity->priority] : NULL; if (rq != entity->rq) { drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; } - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); if (entity->num_sched_list == 1) entity->sched_list = NULL; @@ -576,8 +585,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init(). - * - * Returns 0 for success, negative error code otherwise. */ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { @@ -603,9 +610,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) struct drm_sched_rq *rq; /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); if (entity->stopped) { - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); DRM_ERROR("Trying to push to a killed entity\n"); return; @@ -614,11 +621,14 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) rq = entity->rq; sched = rq->sched; + spin_lock(&rq->lock); drm_sched_rq_add_entity(rq, entity); - spin_unlock(&entity->rq_lock); if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_update_fifo(entity, submit_ts); + drm_sched_rq_update_fifo_locked(entity, rq, submit_ts); + + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); drm_sched_wakeup(sched); } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index e97c6c60bc96..7ce25281c74c 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,7 +41,7 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on * the hardware. * - * The jobs in a entity are always scheduled in the order that they were pushed. + * The jobs in an entity are always scheduled in the order in which they were pushed. * * Note that once a job was taken from the entities queue and pushed to the * hardware, i.e. the pending queue, the entity must not be referenced anymore @@ -159,35 +159,33 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); } -static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) +static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) { - struct drm_sched_rq *rq = entity->rq; - if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); RB_CLEAR_NODE(&entity->rb_tree_node); } } -void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + ktime_t ts) { /* * Both locks need to be grabbed, one to protect from entity->rq change * for entity from within concurrent drm_sched_entity_select_rq and the * other to update the rb tree structure. */ - spin_lock(&entity->rq_lock); - spin_lock(&entity->rq->lock); + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); entity->oldest_job_waiting = ts; - rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, + rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, drm_sched_entity_compare_before); - - spin_unlock(&entity->rq->lock); - spin_unlock(&entity->rq_lock); } /** @@ -219,15 +217,14 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, void drm_sched_rq_add_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); + if (!list_empty(&entity->list)) return; - spin_lock(&rq->lock); - atomic_inc(rq->sched->score); list_add_tail(&entity->list, &rq->entities); - - spin_unlock(&rq->lock); } /** @@ -241,6 +238,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + if (list_empty(&entity->list)) return; @@ -253,7 +252,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, rq->current_entity = NULL; if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); spin_unlock(&rq->lock); } @@ -355,7 +354,6 @@ drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, return ERR_PTR(-ENOSPC); } - rq->current_entity = entity; reinit_completion(&entity->entity_idle); break; } @@ -601,6 +599,9 @@ static void drm_sched_job_timedout(struct work_struct *work) * callers responsibility to release it manually if it's not part of the * pending list any more. * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler teardown, i.e., before calling drm_sched_fini(). */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { @@ -673,16 +674,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) */ cancel_delayed_work(&sched->work_tdr); } - EXPORT_SYMBOL(drm_sched_stop); /** * drm_sched_start - recover jobs after a reset * * @sched: scheduler instance + * @errno: error to set on the pending fences * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler startup. The scheduler itself is fully operational after + * drm_sched_init() succeeded. */ -void drm_sched_start(struct drm_gpu_scheduler *sched) +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) { struct drm_sched_job *s_job, *tmp; @@ -697,13 +702,13 @@ void drm_sched_start(struct drm_gpu_scheduler *sched) atomic_add(s_job->credits, &sched->credit_count); if (!fence) { - drm_sched_job_done(s_job, -ECANCELED); + drm_sched_job_done(s_job, errno ?: -ECANCELED); continue; } if (dma_fence_add_callback(fence, &s_job->cb, drm_sched_job_done_cb)) - drm_sched_job_done(s_job, fence->error); + drm_sched_job_done(s_job, fence->error ?: errno); } drm_sched_start_timeout_unlocked(sched); @@ -778,6 +783,10 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); * Drivers must make sure drm_sched_job_cleanup() if this function returns * successfully, even when @job is aborted before drm_sched_job_arm() is called. * + * Note that this function does not assign a valid value to each struct member + * of struct drm_sched_job. Take a look at that struct's documentation to see + * who sets which struct member with what lifetime. + * * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware * has died, which can mean that there's no valid runqueue for a @entity. * This function returns -ENOENT in this case (which probably should be -EIO as @@ -803,6 +812,14 @@ int drm_sched_job_init(struct drm_sched_job *job, return -EINVAL; } + /* + * We don't know for sure how the user has allocated. Thus, zero the + * struct so that unallowed (i.e., too early) usage of pointers that + * this function does not set is guaranteed to lead to a NULL pointer + * exception instead of UB. + */ + memset(job, 0, sizeof(*job)); + job->entity = entity; job->credits = credits; job->s_fence = drm_sched_fence_alloc(entity, owner); @@ -1333,6 +1350,19 @@ EXPORT_SYMBOL(drm_sched_init); * @sched: scheduler instance * * Tears down and cleans up the scheduler. + * + * This stops submission of new jobs to the hardware through + * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job() + * will not be called for all jobs still in drm_gpu_scheduler.pending_list. + * There is no solution for this currently. Thus, it is up to the driver to make + * sure that + * a) drm_sched_fini() is only called after for all submitted jobs + * drm_sched_backend_ops.free_job() has been called or that + * b) the jobs for which drm_sched_backend_ops.free_job() has not been called + * after drm_sched_fini() ran are freed manually. + * + * FIXME: Take care of the above problem and prevent this function from leaking + * the jobs in drm_gpu_scheduler.pending_list under any circumstances. */ void drm_sched_fini(struct drm_gpu_scheduler *sched) { @@ -1348,7 +1378,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) list_for_each_entry(s_entity, &rq->entities, list) /* * Prevents reinsertion and marks job_queue as idle, - * it will removed from rq in drm_sched_entity_fini + * it will be removed from the rq in drm_sched_entity_fini() * eventually */ s_entity->stopped = true; @@ -1428,8 +1458,10 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready); /** * drm_sched_wqueue_stop - stop scheduler submission - * * @sched: scheduler instance + * + * Stops the scheduler from pulling new jobs from entities. It also stops + * freeing jobs automatically through drm_sched_backend_ops.free_job(). */ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) { @@ -1441,8 +1473,12 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop); /** * drm_sched_wqueue_start - start scheduler submission - * * @sched: scheduler instance + * + * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it. + * + * This function is not necessary for 'conventional' startup. The scheduler is + * fully operational after drm_sched_init() succeeded. */ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) { diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig index c3ee956c2bb9..400a6cab3a67 100644 --- a/drivers/gpu/drm/solomon/Kconfig +++ b/drivers/gpu/drm/solomon/Kconfig @@ -2,6 +2,7 @@ config DRM_SSD130X tristate "DRM support for Solomon SSD13xx OLED displays" depends on DRM && MMU select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 6f51bcf774e2..29b2f82d81f8 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> @@ -1780,6 +1781,7 @@ DEFINE_DRM_GEM_FOPS(ssd130x_fops); static const struct drm_driver ssd130x_drm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -2029,7 +2031,7 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap) if (ret) return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n")); - drm_fbdev_shmem_setup(drm, 32); + drm_client_setup(drm, NULL); return ssd130x; } diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c index 0b69c140eab3..44a7a579660f 100644 --- a/drivers/gpu/drm/sprd/sprd_dsi.c +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -209,7 +209,7 @@ static int regmap_tst_io_read(void *context, u32 reg, u32 *val) return 0; } -static struct regmap_bus regmap_tst_io = { +static const struct regmap_bus regmap_tst_io = { .reg_write = regmap_tst_io_write, .reg_read = regmap_tst_io_read, }; diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 75c301aadcbc..ec341a4720d4 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -3,6 +3,7 @@ config DRM_STI tristate "DRM Support for STMicroelectronics SoC stiH4xx Series" depends on OF && DRM && (ARCH_STI || COMPILE_TEST) select RESET_CONTROLLER + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index db0a1eb53532..c59fcb4dca32 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -200,6 +200,9 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane, return 0; crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 1799c12babf5..65f180c8e8e2 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -136,6 +137,7 @@ static const struct drm_driver sti_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &sti_driver_fops, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = sti_drm_dbg_init, @@ -203,7 +205,7 @@ static int sti_bind(struct device *dev) drm_mode_config_reset(ddev); - drm_fbdev_dma_setup(ddev, 32); + drm_client_setup(ddev, NULL); return 0; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 43c72c2604a0..f046f5f7ad25 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -638,6 +638,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, mixer = to_sti_mixer(crtc); crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index acbf70b95aeb..5793cf2cb897 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1037,6 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, return 0; crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index d7f41a87808e..635be0ac00af 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -3,6 +3,7 @@ config DRM_STM tristate "DRM Support for STMicroelectronics SoC Series" depends on DRM && (ARCH_STM32 || COMPILE_TEST) depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index e1232f74dfa5..478dc129d5c2 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -8,6 +8,7 @@ * Mickael Reulier <mickael.reulier@st.com> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/mod_devicetable.h> @@ -15,11 +16,12 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -66,6 +68,7 @@ static const struct drm_driver drv_driver = { .patchlevel = 0, .fops = &drv_driver_fops, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, }; static int drv_load(struct drm_device *ddev) @@ -188,7 +191,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev) DRM_DEBUG("%s\n", __func__); - ret = drm_aperture_remove_framebuffers(&drv_driver); + ret = aperture_remove_all_conflicting_devices(drv_driver.name); if (ret) return ret; @@ -206,7 +209,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(ddev, 16); + drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 4037e085430e..b56ba00aabca 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -3,6 +3,7 @@ config DRM_SUN4I tristate "DRM Support for Allwinner A10 Display Engine" depends on DRM && COMMON_CLK depends on ARCH_SUNXI || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 35d7a7ffd208..3f880d8a5666 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -6,6 +6,7 @@ * Maxime Ripard <maxime.ripard@free-electrons.com> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/kfifo.h> @@ -14,8 +15,8 @@ #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -55,6 +56,7 @@ static const struct drm_driver sun4i_drv_driver = { /* GEM Operations */ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, }; static int sun4i_drv_bind(struct device *dev) @@ -98,7 +100,7 @@ static int sun4i_drv_bind(struct device *dev) goto unbind_all; /* Remove early framebuffers (ie. simplefb) */ - ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver); + ret = aperture_remove_all_conflicting_devices(sun4i_drv_driver.name); if (ret) goto unbind_all; @@ -111,7 +113,7 @@ static int sun4i_drv_bind(struct device *dev) if (ret) goto finish_poll; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); dev_set_drvdata(dev, drm); diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index e688d8104652..8a3b16aac5d6 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -5,6 +5,7 @@ config DRM_TEGRA depends on COMMON_CLK depends on DRM depends on OF + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 34d22ba210b0..bf3421667ecc 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -4,6 +4,7 @@ * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. */ +#include <linux/aperture.h> #include <linux/bitops.h> #include <linux/host1x.h> #include <linux/idr.h> @@ -12,9 +13,9 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> @@ -892,6 +893,8 @@ static const struct drm_driver tegra_drm_driver = { .dumb_create = tegra_bo_dumb_create, + TEGRA_FBDEV_DRIVER_OPS, + .ioctls = tegra_drm_ioctls, .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), .fops = &tegra_drm_fops, @@ -1255,7 +1258,7 @@ static int host1x_drm_probe(struct host1x_device *dev) * will not expose any modesetting features. */ if (drm->mode_config.num_crtc > 0) { - err = drm_aperture_remove_framebuffers(&tegra_drm_driver); + err = aperture_remove_all_conflicting_devices(tegra_drm_driver.name); if (err < 0) goto hub; } else { @@ -1270,7 +1273,7 @@ static int host1x_drm_probe(struct host1x_device *dev) if (err < 0) goto hub; - tegra_fbdev_setup(drm); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 2f3781e04b0a..0b65e69f3a8a 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -25,6 +25,9 @@ /* XXX move to include/uapi/drm/drm_fourcc.h? */ #define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22) +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct edid; struct reset_control; @@ -190,10 +193,13 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, const struct drm_mode_fb_cmd2 *cmd); #ifdef CONFIG_DRM_FBDEV_EMULATION -void tegra_fbdev_setup(struct drm_device *drm); +int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +#define TEGRA_FBDEV_DRIVER_OPS \ + .fbdev_probe = tegra_fbdev_driver_fbdev_probe #else -static inline void tegra_fbdev_setup(struct drm_device *drm) -{ } +#define TEGRA_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif extern struct platform_driver tegra_display_hub_driver; diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c index db6eaac3d30e..cd9d798f8870 100644 --- a/drivers/gpu/drm/tegra/fbdev.c +++ b/drivers/gpu/drm/tegra/fbdev.c @@ -66,8 +66,11 @@ static const struct fb_ops tegra_fb_ops = { .fb_destroy = tegra_fbdev_fb_destroy, }; -static int tegra_fbdev_probe(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static const struct drm_fb_helper_funcs tegra_fbdev_helper_funcs = { +}; + +int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; @@ -112,6 +115,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, return PTR_ERR(fb); } + helper->funcs = &tegra_fbdev_helper_funcs; helper->fb = fb; helper->info = info; @@ -144,93 +148,3 @@ destroy: drm_framebuffer_remove(fb); return err; } - -static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = { - .fb_probe = tegra_fbdev_probe, -}; - -/* - * struct drm_client - */ - -static void tegra_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int tegra_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int tegra_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs tegra_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = tegra_fbdev_client_unregister, - .restore = tegra_fbdev_client_restore, - .hotplug = tegra_fbdev_client_hotplug, -}; - -void tegra_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - helper = kzalloc(sizeof(*helper), GFP_KERNEL); - if (!helper) - return; - drm_fb_helper_prepare(dev, helper, 32, &tegra_fb_helper_funcs); - - ret = drm_client_init(dev, &helper->client, "fbdev", &tegra_fbdev_client_funcs); - if (ret) - goto err_drm_client_init; - - drm_client_register(&helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(helper); - kfree(helper); -} diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index b4eb030ea961..d275404ad0e9 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -76,8 +76,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_ /* * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. */ - if (gem->import_attach) { - struct dma_buf *buf = gem->import_attach->dmabuf; + if (obj->dma_buf) { + struct dma_buf *buf = obj->dma_buf; map->attach = dma_buf_attach(buf, dev); if (IS_ERR(map->attach)) { @@ -184,8 +184,8 @@ static void *tegra_bo_mmap(struct host1x_bo *bo) if (obj->vaddr) return obj->vaddr; - if (obj->gem.import_attach) { - ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map); + if (obj->dma_buf) { + ret = dma_buf_vmap_unlocked(obj->dma_buf, &map); if (ret < 0) return ERR_PTR(ret); @@ -208,8 +208,8 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) if (obj->vaddr) return; - if (obj->gem.import_attach) - return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map); + if (obj->dma_buf) + return dma_buf_vunmap_unlocked(obj->dma_buf, &map); vunmap(addr); } @@ -465,27 +465,32 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, if (IS_ERR(bo)) return bo; - attach = dma_buf_attach(buf, drm->dev); - if (IS_ERR(attach)) { - err = PTR_ERR(attach); - goto free; - } - - get_dma_buf(buf); + /* + * If we need to use IOMMU API to map the dma-buf into the internally managed + * domain, map it first to the DRM device to get an sgt. + */ + if (tegra->domain) { + attach = dma_buf_attach(buf, drm->dev); + if (IS_ERR(attach)) { + err = PTR_ERR(attach); + goto free; + } - bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); - if (IS_ERR(bo->sgt)) { - err = PTR_ERR(bo->sgt); - goto detach; - } + bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); + if (IS_ERR(bo->sgt)) { + err = PTR_ERR(bo->sgt); + goto detach; + } - if (tegra->domain) { err = tegra_bo_iommu_map(tegra, bo); if (err < 0) goto detach; + + bo->gem.import_attach = attach; } - bo->gem.import_attach = attach; + get_dma_buf(buf); + bo->dma_buf = buf; return bo; @@ -516,17 +521,21 @@ void tegra_bo_free_object(struct drm_gem_object *gem) dev_name(mapping->dev)); } - if (tegra->domain) + if (tegra->domain) { tegra_bo_iommu_unmap(tegra, bo); - if (gem->import_attach) { - dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, - DMA_TO_DEVICE); - drm_prime_gem_destroy(gem, NULL); - } else { - tegra_bo_free(gem->dev, bo); + if (gem->import_attach) { + dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, + DMA_TO_DEVICE); + dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach); + } } + tegra_bo_free(gem->dev, bo); + + if (bo->dma_buf) + dma_buf_put(bo->dma_buf); + drm_gem_object_release(gem); kfree(bo); } diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index cb5146a67668..bf2cbd48eb3f 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -32,6 +32,26 @@ struct tegra_bo_tiling { enum tegra_bo_sector_layout sector_layout; }; +/* + * How memory is referenced within a tegra_bo: + * + * Buffer source | Mapping API(*) | Fields + * ---------------+-----------------+--------------- + * Allocated here | DMA API | iova (IOVA mapped to drm->dev), vaddr (CPU VA) + * + * Allocated here | IOMMU API | pages/num_pages (Phys. memory), sgt (Mapped to drm->dev), + * | iova/size (Mapped to domain) + * + * Imported | DMA API | dma_buf (Imported dma_buf) + * + * Imported | IOMMU API | dma_buf (Imported dma_buf), + * | gem->import_attach (Attachment on drm->dev), + * | sgt (Mapped to drm->dev) + * | iova/size (Mapped to domain) + * + * (*) If tegra->domain is set, i.e. TegraDRM IOMMU domain is directly managed through IOMMU API, + * this is IOMMU API. Otherwise DMA API. + */ struct tegra_bo { struct drm_gem_object gem; struct host1x_bo base; @@ -39,6 +59,7 @@ struct tegra_bo { struct sg_table *sgt; dma_addr_t iova; void *vaddr; + struct dma_buf *dma_buf; struct drm_mm_node *mm; unsigned long num_pages; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 09987e372e3e..6bf2dae82ca0 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -434,7 +434,7 @@ tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock, static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) { - const unsigned int freqs[] = { + static const unsigned int freqs[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 }; unsigned int i; diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c index 06f03b78c9c4..6ea04cc8f324 100644 --- a/drivers/gpu/drm/tests/drm_framebuffer_test.c +++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c @@ -5,11 +5,15 @@ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net> */ +#include <kunit/device.h> #include <kunit/test.h> #include <drm/drm_device.h> +#include <drm/drm_drv.h> #include <drm/drm_mode.h> +#include <drm/drm_framebuffer.h> #include <drm/drm_fourcc.h> +#include <drm/drm_kunit_helpers.h> #include <drm/drm_print.h> #include "../drm_crtc_internal.h" @@ -19,6 +23,8 @@ #define MIN_HEIGHT 4 #define MAX_HEIGHT 4096 +#define DRM_MODE_FB_INVALID BIT(2) + struct drm_framebuffer_test { int buffer_created; struct drm_mode_fb_cmd2 cmd; @@ -83,6 +89,24 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { .pitches = { 4 * MAX_WIDTH, 0, 0 }, } }, + +/* + * All entries in members that represents per-plane values (@modifier, @handles, + * @pitches and @offsets) must be zero when unused. + */ +{ .buffer_created = 0, .name = "ABGR8888 Buffer offset for inexistent plane", + .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888, + .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, UINT_MAX / 2, 0 }, + .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS, + } +}, + +{ .buffer_created = 0, .name = "ABGR8888 Invalid flag", + .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888, + .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, + .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_INVALID, + } +}, { .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers", .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888, .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, @@ -262,6 +286,13 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) }, } }, +{ .buffer_created = 0, .name = "YUV420_10BIT Invalid modifier(DRM_FORMAT_MOD_LINEAR)", + .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YUV420_10BIT, + .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS, + .modifier = { DRM_FORMAT_MOD_LINEAR, 0, 0 }, + .pitches = { MAX_WIDTH, 0, 0 }, + } +}, { .buffer_created = 1, .name = "X0L2 Normal sizes", .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 } @@ -317,12 +348,26 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { }, }; +/* + * This struct is intended to provide a way to mocked functions communicate + * with the outer test when it can't be achieved by using its return value. In + * this way, the functions that receive the mocked drm_device, for example, can + * grab a reference to this and actually return something to be used on some + * expectation. + */ +struct drm_framebuffer_test_priv { + struct drm_device dev; + bool buffer_created; + bool buffer_freed; +}; + static struct drm_framebuffer *fb_create_mock(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - int *buffer_created = dev->dev_private; - *buffer_created = 1; + struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev); + + priv->buffer_created = true; return ERR_PTR(-EINVAL); } @@ -332,42 +377,338 @@ static struct drm_mode_config_funcs mock_config_funcs = { static int drm_framebuffer_test_init(struct kunit *test) { - struct drm_device *mock; + struct device *parent; + struct drm_framebuffer_test_priv *priv; + struct drm_device *dev; + + parent = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); - mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock); + priv = drm_kunit_helper_alloc_drm_device(test, parent, typeof(*priv), + dev, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + dev = &priv->dev; - mock->mode_config.min_width = MIN_WIDTH; - mock->mode_config.max_width = MAX_WIDTH; - mock->mode_config.min_height = MIN_HEIGHT; - mock->mode_config.max_height = MAX_HEIGHT; - mock->mode_config.funcs = &mock_config_funcs; + dev->mode_config.min_width = MIN_WIDTH; + dev->mode_config.max_width = MAX_WIDTH; + dev->mode_config.min_height = MIN_HEIGHT; + dev->mode_config.max_height = MAX_HEIGHT; + dev->mode_config.funcs = &mock_config_funcs; - test->priv = mock; + test->priv = priv; return 0; } static void drm_test_framebuffer_create(struct kunit *test) { const struct drm_framebuffer_test *params = test->param_value; - struct drm_device *mock = test->priv; - int buffer_created = 0; + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; - mock->dev_private = &buffer_created; - drm_internal_framebuffer_create(mock, ¶ms->cmd, NULL); - KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created); + priv->buffer_created = false; + drm_internal_framebuffer_create(dev, ¶ms->cmd, NULL); + KUNIT_EXPECT_EQ(test, params->buffer_created, priv->buffer_created); } static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc) { - strcpy(desc, t->name); + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); } KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases, drm_framebuffer_test_to_desc); +/* Tries to create a framebuffer with modifiers without drm_device supporting it */ +static void drm_test_framebuffer_modifiers_not_supported(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_framebuffer *fb; + + /* A valid cmd with modifier */ + struct drm_mode_fb_cmd2 cmd = { + .width = MAX_WIDTH, .height = MAX_HEIGHT, + .pixel_format = DRM_FORMAT_ABGR8888, .handles = { 1, 0, 0 }, + .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, + .flags = DRM_MODE_FB_MODIFIERS, + }; + + priv->buffer_created = false; + dev->mode_config.fb_modifiers_not_supported = 1; + + fb = drm_internal_framebuffer_create(dev, &cmd, NULL); + KUNIT_EXPECT_EQ(test, priv->buffer_created, false); + KUNIT_EXPECT_EQ(test, PTR_ERR(fb), -EINVAL); +} + +/* Parameters for testing drm_framebuffer_check_src_coords function */ +struct drm_framebuffer_check_src_coords_case { + const char *name; + const int expect; + const unsigned int fb_size; + const uint32_t src_x; + const uint32_t src_y; + + /* Deltas to be applied on source */ + const uint32_t dsrc_w; + const uint32_t dsrc_h; +}; + +static const struct drm_framebuffer_check_src_coords_case +drm_framebuffer_check_src_coords_cases[] = { + { .name = "Success: source fits into fb", + .expect = 0, + }, + { .name = "Fail: overflowing fb with x-axis coordinate", + .expect = -ENOSPC, .src_x = 1, .fb_size = UINT_MAX, + }, + { .name = "Fail: overflowing fb with y-axis coordinate", + .expect = -ENOSPC, .src_y = 1, .fb_size = UINT_MAX, + }, + { .name = "Fail: overflowing fb with source width", + .expect = -ENOSPC, .dsrc_w = 1, .fb_size = UINT_MAX - 1, + }, + { .name = "Fail: overflowing fb with source height", + .expect = -ENOSPC, .dsrc_h = 1, .fb_size = UINT_MAX - 1, + }, +}; + +static void drm_test_framebuffer_check_src_coords(struct kunit *test) +{ + const struct drm_framebuffer_check_src_coords_case *params = test->param_value; + const uint32_t src_x = params->src_x; + const uint32_t src_y = params->src_y; + const uint32_t src_w = (params->fb_size << 16) + params->dsrc_w; + const uint32_t src_h = (params->fb_size << 16) + params->dsrc_h; + const struct drm_framebuffer fb = { + .width = params->fb_size, + .height = params->fb_size + }; + int ret; + + ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, &fb); + KUNIT_EXPECT_EQ(test, ret, params->expect); +} + +static void +check_src_coords_test_to_desc(const struct drm_framebuffer_check_src_coords_case *t, + char *desc) +{ + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(check_src_coords, drm_framebuffer_check_src_coords_cases, + check_src_coords_test_to_desc); + +/* + * Test if drm_framebuffer_cleanup() really pops out the framebuffer object + * from device's fb_list and decrement the number of framebuffers for that + * device, which is the only things it does. + */ +static void drm_test_framebuffer_cleanup(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct list_head *fb_list = &dev->mode_config.fb_list; + struct drm_format_info format = { }; + struct drm_framebuffer fb1 = { .dev = dev, .format = &format }; + struct drm_framebuffer fb2 = { .dev = dev, .format = &format }; + + /* This will result on [fb_list] -> fb2 -> fb1 */ + drm_framebuffer_init(dev, &fb1, NULL); + drm_framebuffer_init(dev, &fb2, NULL); + + drm_framebuffer_cleanup(&fb1); + + /* Now fb2 is the only one element on fb_list */ + KUNIT_ASSERT_TRUE(test, list_is_singular(&fb2.head)); + KUNIT_ASSERT_EQ(test, dev->mode_config.num_fb, 1); + + drm_framebuffer_cleanup(&fb2); + + /* Now fb_list is empty */ + KUNIT_ASSERT_TRUE(test, list_empty(fb_list)); + KUNIT_ASSERT_EQ(test, dev->mode_config.num_fb, 0); +} + +/* + * Initialize a framebuffer, lookup its id and test if the returned reference + * matches. + */ +static void drm_test_framebuffer_lookup(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_format_info format = { }; + struct drm_framebuffer expected_fb = { .dev = dev, .format = &format }; + struct drm_framebuffer *returned_fb; + uint32_t id = 0; + int ret; + + ret = drm_framebuffer_init(dev, &expected_fb, NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + id = expected_fb.base.id; + + /* Looking for expected_fb */ + returned_fb = drm_framebuffer_lookup(dev, NULL, id); + KUNIT_EXPECT_PTR_EQ(test, returned_fb, &expected_fb); + drm_framebuffer_put(returned_fb); + + drm_framebuffer_cleanup(&expected_fb); +} + +/* Try to lookup an id that is not linked to a framebuffer */ +static void drm_test_framebuffer_lookup_inexistent(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_framebuffer *fb; + uint32_t id = 0; + + /* Looking for an inexistent framebuffer */ + fb = drm_framebuffer_lookup(dev, NULL, id); + KUNIT_EXPECT_NULL(test, fb); +} + +/* + * Test if drm_framebuffer_init initializes the framebuffer successfully, + * asserting that its modeset object struct and its refcount are correctly + * set and that strictly one framebuffer is initialized. + */ +static void drm_test_framebuffer_init(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_format_info format = { }; + struct drm_framebuffer fb1 = { .dev = dev, .format = &format }; + struct drm_framebuffer_funcs funcs = { }; + int ret; + + ret = drm_framebuffer_init(dev, &fb1, &funcs); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* Check if fb->funcs is actually set to the drm_framebuffer_funcs passed on */ + KUNIT_EXPECT_PTR_EQ(test, fb1.funcs, &funcs); + + /* The fb->comm must be set to the current running process */ + KUNIT_EXPECT_STREQ(test, fb1.comm, current->comm); + + /* The fb->base must be successfully initialized */ + KUNIT_EXPECT_NE(test, fb1.base.id, 0); + KUNIT_EXPECT_EQ(test, fb1.base.type, DRM_MODE_OBJECT_FB); + KUNIT_EXPECT_EQ(test, kref_read(&fb1.base.refcount), 1); + KUNIT_EXPECT_PTR_EQ(test, fb1.base.free_cb, &drm_framebuffer_free); + + /* There must be just that one fb initialized */ + KUNIT_EXPECT_EQ(test, dev->mode_config.num_fb, 1); + KUNIT_EXPECT_PTR_EQ(test, dev->mode_config.fb_list.prev, &fb1.head); + KUNIT_EXPECT_PTR_EQ(test, dev->mode_config.fb_list.next, &fb1.head); + + drm_framebuffer_cleanup(&fb1); +} + +/* Try to init a framebuffer without setting its format */ +static void drm_test_framebuffer_init_bad_format(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_framebuffer fb1 = { .dev = dev, .format = NULL }; + struct drm_framebuffer_funcs funcs = { }; + int ret; + + /* Fails if fb.format isn't set */ + ret = drm_framebuffer_init(dev, &fb1, &funcs); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); +} + +/* + * Test calling drm_framebuffer_init() passing a framebuffer linked to a + * different drm_device parent from the one passed on the first argument, which + * must fail. + */ +static void drm_test_framebuffer_init_dev_mismatch(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *right_dev = &priv->dev; + struct drm_device *wrong_dev; + struct device *wrong_dev_parent; + struct drm_format_info format = { }; + struct drm_framebuffer fb1 = { .dev = right_dev, .format = &format }; + struct drm_framebuffer_funcs funcs = { }; + int ret; + + wrong_dev_parent = kunit_device_register(test, "drm-kunit-wrong-device-mock"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, wrong_dev_parent); + + wrong_dev = __drm_kunit_helper_alloc_drm_device(test, wrong_dev_parent, + sizeof(struct drm_device), + 0, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, wrong_dev); + + /* Fails if fb->dev doesn't point to the drm_device passed on first arg */ + ret = drm_framebuffer_init(wrong_dev, &fb1, &funcs); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); +} + +static void destroy_free_mock(struct drm_framebuffer *fb) +{ + struct drm_framebuffer_test_priv *priv = container_of(fb->dev, typeof(*priv), dev); + + priv->buffer_freed = true; +} + +static struct drm_framebuffer_funcs framebuffer_funcs_free_mock = { + .destroy = destroy_free_mock, +}; + +/* + * In summary, the drm_framebuffer_free() function must implicitly call + * fb->funcs->destroy() and garantee that the framebufer object is unregistered + * from the drm_device idr pool. + */ +static void drm_test_framebuffer_free(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_mode_object *obj; + struct drm_framebuffer fb = { + .dev = dev, + .funcs = &framebuffer_funcs_free_mock, + }; + int id, ret; + + priv->buffer_freed = false; + + /* + * Mock a framebuffer that was not unregistered at the moment of the + * drm_framebuffer_free() call. + */ + ret = drm_mode_object_add(dev, &fb.base, DRM_MODE_OBJECT_FB); + KUNIT_ASSERT_EQ(test, ret, 0); + id = fb.base.id; + + drm_framebuffer_free(&fb.base.refcount); + + /* The framebuffer object must be unregistered */ + obj = drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_FB); + KUNIT_EXPECT_PTR_EQ(test, obj, NULL); + KUNIT_EXPECT_EQ(test, fb.base.id, 0); + + /* Test if fb->funcs->destroy() was called */ + KUNIT_EXPECT_EQ(test, priv->buffer_freed, true); +} + static struct kunit_case drm_framebuffer_tests[] = { + KUNIT_CASE_PARAM(drm_test_framebuffer_check_src_coords, check_src_coords_gen_params), + KUNIT_CASE(drm_test_framebuffer_cleanup), KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params), + KUNIT_CASE(drm_test_framebuffer_free), + KUNIT_CASE(drm_test_framebuffer_init), + KUNIT_CASE(drm_test_framebuffer_init_bad_format), + KUNIT_CASE(drm_test_framebuffer_init_dev_mismatch), + KUNIT_CASE(drm_test_framebuffer_lookup), + KUNIT_CASE(drm_test_framebuffer_lookup_inexistent), + KUNIT_CASE(drm_test_framebuffer_modifiers_not_supported), { } }; diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig index 2385c56493b9..31ad582b7602 100644 --- a/drivers/gpu/drm/tidss/Kconfig +++ b/drivers/gpu/drm/tidss/Kconfig @@ -2,6 +2,7 @@ config DRM_TIDSS tristate "DRM Support for TI Keystone" depends on DRM && OF depends on ARM || ARM64 || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index d15f836dca95..2428b9aaa003 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -11,6 +11,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -109,6 +110,7 @@ static const struct drm_driver tidss_driver = { .fops = &tidss_fops, .release = tidss_release, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "tidss", .desc = "TI Keystone DSS", .date = "20180215", @@ -186,7 +188,7 @@ static int tidss_probe(struct platform_device *pdev) goto err_irq_uninstall; } - drm_fbdev_dma_setup(ddev, 32); + drm_client_setup(ddev, NULL); dev_dbg(dev, "%s done\n", __func__); diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index d3bd2d7a181e..24f9a245ba59 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -2,6 +2,7 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" depends on DRM && OF && ARM + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_BRIDGE diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index cd5eefa06060..8c9f3705aa6c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -14,6 +14,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -374,7 +375,8 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) goto init_failed; priv->is_registered = true; - drm_fbdev_dma_setup(ddev, bpp); + drm_client_setup_with_color_mode(ddev, bpp); + return 0; init_failed: @@ -472,6 +474,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver tilcdc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, #ifdef CONFIG_DEBUG_FS .debugfs_init = tilcdc_debugfs_init, #endif diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index f6889f649bc1..94cbdb1337c0 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -3,6 +3,7 @@ config DRM_ARCPGU tristate "ARC PGU" depends on DRM && OF + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER help @@ -13,10 +14,9 @@ config DRM_ARCPGU config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER - select DRM_VRAM_HELPER - select DRM_TTM - select DRM_TTM_HELPER help This is a KMS driver for qemu's stdvga output. Choose this option for qemu. @@ -26,6 +26,7 @@ config DRM_BOCHS config DRM_CIRRUS_QEMU tristate "Cirrus driver for QEMU emulated device" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help @@ -45,6 +46,7 @@ config DRM_CIRRUS_QEMU config DRM_GM12U320 tristate "GM12U320 driver for USB projectors" depends on DRM && USB && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help @@ -55,6 +57,7 @@ config DRM_OFDRM tristate "Open Firmware display driver" depends on DRM && MMU && OF && (PPC || COMPILE_TEST) select APERTURE_HELPERS + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help @@ -67,6 +70,7 @@ config DRM_OFDRM config DRM_PANEL_MIPI_DBI tristate "DRM support for MIPI DBI compatible panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -83,6 +87,7 @@ config DRM_SIMPLEDRM tristate "Simple framebuffer driver" depends on DRM && MMU select APERTURE_HELPERS + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help @@ -99,6 +104,7 @@ config DRM_SIMPLEDRM config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -113,6 +119,7 @@ config TINYDRM_ILI9163 tristate "DRM support for ILI9163 display panels" depends on DRM && SPI select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DBI @@ -125,6 +132,7 @@ config TINYDRM_ILI9163 config TINYDRM_ILI9225 tristate "DRM support for ILI9225 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -137,6 +145,7 @@ config TINYDRM_ILI9225 config TINYDRM_ILI9341 tristate "DRM support for ILI9341 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -150,6 +159,7 @@ config TINYDRM_ILI9341 config TINYDRM_ILI9486 tristate "DRM support for ILI9486 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -164,6 +174,7 @@ config TINYDRM_ILI9486 config TINYDRM_MI0283QT tristate "DRM support for MI0283QT" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -175,6 +186,7 @@ config TINYDRM_MI0283QT config TINYDRM_REPAPER tristate "DRM support for Pervasive Displays RePaper panels (V231)" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER help @@ -186,9 +198,31 @@ config TINYDRM_REPAPER If M is selected the module will be called repaper. +config TINYDRM_SHARP_MEMORY + tristate "DRM support for Sharp Memory LCD panels" + depends on DRM && SPI + select DRM_CLIENT_SELECTION + select DRM_GEM_DMA_HELPER + select DRM_KMS_HELPER + help + DRM Driver for the following Sharp Memory Panels: + * 1.00" Sharp Memory LCD (LS010B7DH04) + * 1.10" Sharp Memory LCD (LS011B7DH03) + * 1.20" Sharp Memory LCD (LS012B7DD01) + * 1.28" Sharp Memory LCD (LS013B7DH03) + * 1.26" Sharp Memory LCD (LS013B7DH05) + * 1.80" Sharp Memory LCD (LS018B7DH02) + * 2.70" Sharp Memory LCD (LS027B7DH01) + * 2.70" Sharp Memory LCD (LS027B7DH01A) + * 3.20" Sharp Memory LCD (LS032B7DD02) + * 4.40" Sharp Memory LCD (LS044Q7DH01) + + If M is selected the module will be called sharp_memory. + config TINYDRM_ST7586 tristate "DRM support for Sitronix ST7586 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -201,6 +235,7 @@ config TINYDRM_ST7586 config TINYDRM_ST7735R tristate "DRM support for Sitronix ST7715R/ST7735R display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 76dde89a044b..4aaf56f8707d 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -14,5 +14,6 @@ obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o +obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o obj-$(CONFIG_TINYDRM_ST7586) += st7586.o obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c index 4f8f3172379e..81abedec435d 100644 --- a/drivers/gpu/drm/tiny/arcpgu.c +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -371,6 +372,7 @@ static const struct drm_driver arcpgu_drm_driver = { .patchlevel = 0, .fops = &arcpgu_drm_ops, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, #ifdef CONFIG_DEBUG_FS .debugfs_init = arcpgu_debugfs_init, #endif @@ -394,7 +396,7 @@ static int arcpgu_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(&arcpgu->drm, 16); + drm_client_setup_with_fourcc(&arcpgu->drm, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index e738bb858316..6f91ff1dbf7e 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -1,22 +1,26 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/bug.h> +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fbdev_ttm.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_gem_vram_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> +#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> #include <video/vga.h> @@ -72,6 +76,8 @@ enum bochs_types { }; struct bochs_device { + struct drm_device dev; + /* hw */ void __iomem *mmio; int ioports; @@ -86,14 +92,19 @@ struct bochs_device { u16 yres_virtual; u32 stride; u32 bpp; - const struct drm_edid *drm_edid; /* drm */ - struct drm_device *dev; - struct drm_simple_display_pipe pipe; + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; struct drm_connector connector; }; +static struct bochs_device *to_bochs_device(const struct drm_device *dev) +{ + return container_of(dev, struct bochs_device, dev); +} + /* ---------------------------------------------------------------------- */ static __always_inline bool bochs_uses_mmio(struct bochs_device *bochs) @@ -178,12 +189,14 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs) #define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b) #endif -static int bochs_get_edid_block(void *data, u8 *buf, - unsigned int block, size_t len) +static int bochs_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct bochs_device *bochs = data; size_t i, start = block * EDID_LENGTH; + if (!bochs->mmio) + return -1; + if (start + len > 0x400 /* vga register offset */) return -1; @@ -193,43 +206,38 @@ static int bochs_get_edid_block(void *data, u8 *buf, return 0; } -static int bochs_hw_load_edid(struct bochs_device *bochs) +static const struct drm_edid *bochs_hw_read_edid(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct bochs_device *bochs = to_bochs_device(dev); u8 header[8]; - if (!bochs->mmio) - return -1; - /* check header to detect whenever edid support is enabled in qemu */ bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header)); if (drm_edid_header_is_valid(header) != 8) - return -1; + return NULL; - drm_edid_free(bochs->drm_edid); - bochs->drm_edid = drm_edid_read_custom(&bochs->connector, - bochs_get_edid_block, bochs); - if (!bochs->drm_edid) - return -1; + drm_dbg(dev, "Found EDID data blob.\n"); - return 0; + return drm_edid_read_custom(connector, bochs_get_edid_block, bochs); } -static int bochs_hw_init(struct drm_device *dev) +static int bochs_hw_init(struct bochs_device *bochs) { - struct bochs_device *bochs = dev->dev_private; + struct drm_device *dev = &bochs->dev; struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned long addr, size, mem, ioaddr, iosize; u16 id; if (pdev->resource[2].flags & IORESOURCE_MEM) { + ioaddr = pci_resource_start(pdev, 2); + iosize = pci_resource_len(pdev, 2); /* mmio bar with vga and bochs registers present */ - if (pci_request_region(pdev, 2, "bochs-drm") != 0) { + if (!devm_request_mem_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request mmio region\n"); return -EBUSY; } - ioaddr = pci_resource_start(pdev, 2); - iosize = pci_resource_len(pdev, 2); - bochs->mmio = ioremap(ioaddr, iosize); + bochs->mmio = devm_ioremap(&pdev->dev, ioaddr, iosize); if (bochs->mmio == NULL) { DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; @@ -237,7 +245,7 @@ static int bochs_hw_init(struct drm_device *dev) } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) { ioaddr = VBE_DISPI_IOPORT_INDEX; iosize = 2; - if (!request_region(ioaddr, iosize, "bochs-drm")) { + if (!devm_request_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request ioports\n"); return -EBUSY; } @@ -267,10 +275,10 @@ static int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) + if (!devm_request_mem_region(&pdev->dev, addr, size, "bochs-drm")) DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); - bochs->fb_map = ioremap(addr, size); + bochs->fb_map = devm_ioremap_wc(&pdev->dev, addr, size); if (bochs->fb_map == NULL) { DRM_ERROR("Cannot map framebuffer\n"); return -ENOMEM; @@ -299,22 +307,6 @@ noext: return 0; } -static void bochs_hw_fini(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - - /* TODO: shot down existing vram mappings */ - - if (bochs->mmio) - iounmap(bochs->mmio); - if (bochs->ioports) - release_region(VBE_DISPI_IOPORT_INDEX, 2); - if (bochs->fb_map) - iounmap(bochs->fb_map); - pci_release_regions(to_pci_dev(dev->dev)); - drm_edid_free(bochs->drm_edid); -} - static void bochs_hw_blank(struct bochs_device *bochs, bool blank) { DRM_DEBUG_DRIVER("hw_blank %d\n", blank); @@ -330,7 +322,7 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode { int idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; bochs->xres = mode->hdisplay; @@ -366,7 +358,7 @@ static void bochs_hw_setformat(struct bochs_device *bochs, const struct drm_form { int idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; DRM_DEBUG_DRIVER("format %c%c%c%c\n", @@ -397,7 +389,7 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid unsigned long offset; unsigned int vx, vy, vwidth, idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; bochs->stride = stride; @@ -419,83 +411,156 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid /* ---------------------------------------------------------------------- */ -static const uint32_t bochs_formats[] = { +static const uint32_t bochs_primary_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_BGRX8888, }; -static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state) +static int bochs_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { - struct drm_gem_vram_object *gbo; - s64 gpu_addr; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; - if (!state->fb || !bochs->stride) + return 0; +} + +static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct bochs_device *bochs = to_bochs_device(dev); + struct drm_plane_state *plane_state = plane->state; + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_atomic_helper_damage_iter iter; + struct drm_rect damage; + + if (!fb || !bochs->stride) return; - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); - gpu_addr = drm_gem_vram_offset(gbo); - if (WARN_ON_ONCE(gpu_addr < 0)) - return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map); + iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, &damage)); + drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage); + } + + /* Always scanout image at VRAM offset 0 */ bochs_hw_setbase(bochs, - state->crtc_x, - state->crtc_y, - state->fb->pitches[0], - state->fb->offsets[0] + gpu_addr); - bochs_hw_setformat(bochs, state->fb->format); + plane_state->crtc_x, + plane_state->crtc_y, + fb->pitches[0], + 0); + bochs_hw_setformat(bochs, fb->format); } -static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) +static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = bochs_primary_plane_helper_atomic_check, + .atomic_update = bochs_primary_plane_helper_atomic_update, +}; + +static const struct drm_plane_funcs bochs_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + DRM_GEM_SHADOW_PLANE_FUNCS +}; + +static void bochs_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct bochs_device *bochs = to_bochs_device(crtc->dev); + struct drm_crtc_state *crtc_state = crtc->state; bochs_hw_setmode(bochs, &crtc_state->mode); - bochs_plane_update(bochs, plane_state); } -static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe) +static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - bochs_hw_blank(bochs, true); + if (!crtc_state->enable) + return 0; + + return drm_atomic_helper_check_crtc_primary_plane(crtc_state); } -static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) +static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; +} - bochs_plane_update(bochs, pipe->plane.state); +static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *crtc_state) +{ + struct bochs_device *bochs = to_bochs_device(crtc->dev); + + bochs_hw_blank(bochs, true); } -static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { - .enable = bochs_pipe_enable, - .disable = bochs_pipe_disable, - .update = bochs_pipe_update, - .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, - .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, +static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = { + .mode_set_nofb = bochs_crtc_helper_mode_set_nofb, + .atomic_check = bochs_crtc_helper_atomic_check, + .atomic_enable = bochs_crtc_helper_atomic_enable, + .atomic_disable = bochs_crtc_helper_atomic_disable, +}; + +static const struct drm_crtc_funcs bochs_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; -static int bochs_connector_get_modes(struct drm_connector *connector) +static const struct drm_encoder_funcs bochs_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int bochs_connector_helper_get_modes(struct drm_connector *connector) { + const struct drm_edid *edid; int count; - count = drm_edid_connector_add_modes(connector); + edid = bochs_hw_read_edid(connector); - if (!count) { + if (edid) { + drm_edid_connector_update(connector, edid); + count = drm_edid_connector_add_modes(connector); + drm_edid_free(edid); + } else { + drm_edid_connector_update(connector, NULL); count = drm_add_modes_noedid(connector, 8192, 8192); drm_set_preferred_mode(connector, defx, defy); } + return count; } -static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { - .get_modes = bochs_connector_get_modes, +static const struct drm_connector_helper_funcs bochs_connector_helper_funcs = { + .get_modes = bochs_connector_helper_get_modes, }; -static const struct drm_connector_funcs bochs_connector_connector_funcs = { +static const struct drm_connector_funcs bochs_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, @@ -503,68 +568,89 @@ static const struct drm_connector_funcs bochs_connector_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static void bochs_connector_init(struct drm_device *dev) +static enum drm_mode_status bochs_mode_config_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode) { - struct bochs_device *bochs = dev->dev_private; - struct drm_connector *connector = &bochs->connector; - - drm_connector_init(dev, connector, &bochs_connector_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs); - - bochs_hw_load_edid(bochs); - if (bochs->drm_edid) { - DRM_INFO("Found EDID data blob.\n"); - drm_connector_attach_edid_property(connector); - drm_edid_connector_update(&bochs->connector, bochs->drm_edid); - } -} + struct bochs_device *bochs = to_bochs_device(dev); + const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888); + u64 pitch; -static struct drm_framebuffer * -bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && - mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) - return ERR_PTR(-EINVAL); + if (drm_WARN_ON(dev, !format)) + return MODE_ERROR; + + pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay); + if (!pitch) + return MODE_BAD_WIDTH; + if (mode->vdisplay > DIV_ROUND_DOWN_ULL(bochs->fb_size, pitch)) + return MODE_MEM; - return drm_gem_fb_create(dev, file, mode_cmd); + return MODE_OK; } -static const struct drm_mode_config_funcs bochs_mode_funcs = { - .fb_create = bochs_gem_fb_create, - .mode_valid = drm_vram_helper_mode_valid, +static const struct drm_mode_config_funcs bochs_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .mode_valid = bochs_mode_config_mode_valid, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int bochs_kms_init(struct bochs_device *bochs) { + struct drm_device *dev = &bochs->dev; + struct drm_plane *primary_plane; + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_encoder *encoder; int ret; - ret = drmm_mode_config_init(bochs->dev); + ret = drmm_mode_config_init(dev); if (ret) return ret; - bochs->dev->mode_config.max_width = 8192; - bochs->dev->mode_config.max_height = 8192; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.quirk_addfb_prefer_host_byte_order = true; + + dev->mode_config.funcs = &bochs_mode_config_funcs; - bochs->dev->mode_config.preferred_depth = 24; - bochs->dev->mode_config.prefer_shadow = 0; - bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; + primary_plane = &bochs->primary_plane; + ret = drm_universal_plane_init(dev, primary_plane, 0, + &bochs_primary_plane_funcs, + bochs_primary_plane_formats, + ARRAY_SIZE(bochs_primary_plane_formats), + NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + drm_plane_helper_add(primary_plane, &bochs_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); + + crtc = &bochs->crtc; + ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, + &bochs_crtc_funcs, NULL); + if (ret) + return ret; + drm_crtc_helper_add(crtc, &bochs_crtc_helper_funcs); - bochs->dev->mode_config.funcs = &bochs_mode_funcs; + encoder = &bochs->encoder; + ret = drm_encoder_init(dev, encoder, &bochs_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); - bochs_connector_init(bochs->dev); - drm_simple_display_pipe_init(bochs->dev, - &bochs->pipe, - &bochs_pipe_funcs, - bochs_formats, - ARRAY_SIZE(bochs_formats), - NULL, - &bochs->connector); + connector = &bochs->connector; + ret = drm_connector_init(dev, connector, &bochs_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) + return ret; + drm_connector_helper_add(connector, &bochs_connector_helper_funcs); + drm_connector_attach_edid_property(connector); + drm_connector_attach_encoder(connector, encoder); - drm_mode_config_reset(bochs->dev); + drm_mode_config_reset(dev); return 0; } @@ -572,34 +658,19 @@ static int bochs_kms_init(struct bochs_device *bochs) /* ---------------------------------------------------------------------- */ /* drm interface */ -static int bochs_load(struct drm_device *dev) +static int bochs_load(struct bochs_device *bochs) { - struct bochs_device *bochs; int ret; - bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); - if (bochs == NULL) - return -ENOMEM; - dev->dev_private = bochs; - bochs->dev = dev; - - ret = bochs_hw_init(dev); + ret = bochs_hw_init(bochs); if (ret) return ret; - ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size); - if (ret) - goto err_hw_fini; - ret = bochs_kms_init(bochs); if (ret) - goto err_hw_fini; + return ret; return 0; - -err_hw_fini: - bochs_hw_fini(dev); - return ret; } DEFINE_DRM_GEM_FOPS(bochs_fops); @@ -612,7 +683,8 @@ static const struct drm_driver bochs_driver = { .date = "20130925", .major = 1, .minor = 0, - DRM_GEM_VRAM_DRIVER, + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; /* ---------------------------------------------------------------------- */ @@ -644,23 +716,18 @@ static const struct dev_pm_ops bochs_pm_ops = { static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct bochs_device *bochs; struct drm_device *dev; - unsigned long fbsize; int ret; - fbsize = pci_resource_len(pdev, 0); - if (fbsize < 4 * 1024 * 1024) { - DRM_ERROR("less than 4 MB video memory, ignoring device\n"); - return -ENOMEM; - } - - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, bochs_driver.name); if (ret) return ret; - dev = drm_dev_alloc(&bochs_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + bochs = devm_drm_dev_alloc(&pdev->dev, &bochs_driver, struct bochs_device, dev); + if (IS_ERR(bochs)) + return PTR_ERR(bochs); + dev = &bochs->dev; ret = pcim_enable_device(pdev); if (ret) @@ -668,19 +735,18 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent pci_set_drvdata(pdev, dev); - ret = bochs_load(dev); + ret = bochs_load(bochs); if (ret) goto err_free_dev; ret = drm_dev_register(dev, 0); if (ret) - goto err_hw_fini; + goto err_free_dev; + + drm_client_setup(dev, NULL); - drm_fbdev_ttm_setup(dev, 32); return ret; -err_hw_fini: - bochs_hw_fini(dev); err_free_dev: drm_dev_put(dev); return ret; @@ -692,7 +758,6 @@ static void bochs_pci_remove(struct pci_dev *pdev) drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); - bochs_hw_fini(dev); drm_dev_put(dev); } diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index e31e1df029ab..4d2adcaeaa60 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,6 +16,7 @@ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com> */ +#include <linux/aperture.h> #include <linux/iosys-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -23,10 +24,10 @@ #include <video/cirrus.h> #include <video/vga.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> @@ -664,6 +665,7 @@ static const struct drm_driver cirrus_driver = { .fops = &cirrus_fops, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; static int cirrus_pci_probe(struct pci_dev *pdev, @@ -673,7 +675,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, struct cirrus_device *cirrus; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, cirrus_driver.name); if (ret) return ret; @@ -718,7 +720,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, if (ret) return ret; - drm_fbdev_shmem_setup(dev, 16); + drm_client_setup(dev, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 0bd7707c053e..0c17ae532fb4 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> @@ -632,6 +633,7 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gm12u320_gem_prime_import, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = { @@ -706,7 +708,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface, if (ret) goto err_put_device; - drm_fbdev_shmem_setup(dev, 0); + drm_client_setup(dev, NULL); return 0; diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 2e631282edeb..6b0d1846cfcf 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -17,6 +17,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -194,6 +195,7 @@ static const struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hx8357d_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "hx8357d", .desc = "HX8357D", @@ -256,7 +258,7 @@ static int hx8357d_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index 86f9d8834901..5eb39ca1a855 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -8,6 +8,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -113,6 +114,7 @@ static struct drm_driver ili9163_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9163_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9163", .desc = "Ilitek ILI9163", @@ -185,7 +187,7 @@ static int ili9163_probe(struct spi_device *spi) if (ret) return ret; - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index b6b7a49147bf..875e2d09729a 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -17,6 +17,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> @@ -360,6 +361,7 @@ static const struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9225_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "ili9225", .desc = "Ilitek ILI9225", .date = "20171106", @@ -426,7 +428,7 @@ static int ili9225_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 8bcada30af71..c1dfdfbbd30c 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -16,6 +16,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -150,6 +151,7 @@ static const struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9341_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", .desc = "Ilitek ILI9341", @@ -218,7 +220,7 @@ static int ili9341_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 70d366260041..7e46a720d5e2 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -15,6 +15,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -172,6 +173,7 @@ static const struct drm_driver ili9486_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9486_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9486", .desc = "Ilitek ILI9486", @@ -247,7 +249,7 @@ static int ili9486_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index cdc5423990ca..f1461c55dba6 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -14,6 +14,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -154,6 +155,7 @@ static const struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &mi0283qt_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", .desc = "Multi-Inno MI0283QT", @@ -226,7 +228,7 @@ static int mi0283qt_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 35996f7eedac..220c1244b3c0 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -1,12 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/aperture.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> @@ -1219,7 +1220,7 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, fb_pgbase = round_down(fb_base, PAGE_SIZE); fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE); - ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize); + ret = devm_aperture_acquire_for_platform_device(pdev, fb_pgbase, fb_pgsize); if (ret) { drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret); return ERR_PTR(ret); @@ -1344,6 +1345,7 @@ DEFINE_DRM_GEM_FOPS(ofdrm_fops); static struct drm_driver ofdrm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -1361,7 +1363,6 @@ static int ofdrm_probe(struct platform_device *pdev) { struct ofdrm_device *odev; struct drm_device *dev; - unsigned int color_mode; int ret; odev = ofdrm_device_create(&ofdrm_driver, pdev); @@ -1373,11 +1374,7 @@ static int ofdrm_probe(struct platform_device *pdev) if (ret) return ret; - color_mode = drm_format_info_bpp(odev->format, 0); - if (color_mode == 16) - color_mode = odev->format->depth; // can be 15 or 16 - - drm_fbdev_shmem_setup(dev, color_mode); + drm_client_setup(dev, odev->format); return 0; } diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index f753cdffe6f8..e66729b31bd6 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -15,6 +15,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -264,6 +265,7 @@ static const struct drm_driver panel_mipi_dbi_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &panel_mipi_dbi_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "panel-mipi-dbi", .desc = "MIPI DBI compatible display panel", @@ -388,7 +390,7 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 1f78aa3d26bb..77944eb17b3c 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -22,6 +22,7 @@ #include <linux/thermal.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> @@ -913,6 +914,7 @@ static const struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &repaper_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "repaper", .desc = "Pervasive Displays RePaper e-ink panels", .date = "20170405", @@ -1118,7 +1120,7 @@ static int repaper_probe(struct spi_device *spi) DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c new file mode 100644 index 000000000000..2d2315bd6aef --- /dev/null +++ b/drivers/gpu/drm/tiny/sharp-memory.c @@ -0,0 +1,671 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> +#include <drm/drm_connector.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_dma_helper.h> +#include <drm/drm_fbdev_dma.h> +#include <drm/drm_format_helper.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_modes.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_rect.h> +#include <linux/bitrev.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/kthread.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pwm.h> +#include <linux/spi/spi.h> + +#define SHARP_MODE_PERIOD 8 +#define SHARP_ADDR_PERIOD 8 +#define SHARP_DUMMY_PERIOD 8 + +#define SHARP_MEMORY_DISPLAY_MAINTAIN_MODE 0 +#define SHARP_MEMORY_DISPLAY_UPDATE_MODE 1 +#define SHARP_MEMORY_DISPLAY_CLEAR_MODE 4 + +enum sharp_memory_model { + LS010B7DH04, + LS011B7DH03, + LS012B7DD01, + LS013B7DH03, + LS013B7DH05, + LS018B7DH02, + LS027B7DH01, + LS027B7DH01A, + LS032B7DD02, + LS044Q7DH01, +}; + +enum sharp_memory_vcom_mode { + SHARP_MEMORY_SOFTWARE_VCOM, + SHARP_MEMORY_EXTERNAL_VCOM, + SHARP_MEMORY_PWM_VCOM +}; + +struct sharp_memory_device { + struct drm_device drm; + struct spi_device *spi; + + const struct drm_display_mode *mode; + + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector connector; + + struct gpio_desc *enable_gpio; + + struct task_struct *sw_vcom_signal; + struct pwm_device *pwm_vcom_signal; + + enum sharp_memory_vcom_mode vcom_mode; + u8 vcom; + + u32 pitch; + u32 tx_buffer_size; + u8 *tx_buffer; + + /* When vcom_mode == "software" a kthread is used to periodically send a + * 'maintain display' message over spi. This mutex ensures tx_buffer access + * and spi bus usage is synchronized in this case. + */ + struct mutex tx_mutex; +}; + +static inline int sharp_memory_spi_write(struct spi_device *spi, void *buf, size_t len) +{ + /* Reverse the bit order */ + for (u8 *b = buf; b < ((u8 *)buf) + len; ++b) + *b = bitrev8(*b); + + return spi_write(spi, buf, len); +} + +static inline struct sharp_memory_device *drm_to_sharp_memory_device(struct drm_device *drm) +{ + return container_of(drm, struct sharp_memory_device, drm); +} + +DEFINE_DRM_GEM_DMA_FOPS(sharp_memory_fops); + +static const struct drm_driver sharp_memory_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &sharp_memory_fops, + DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, + .name = "sharp_memory_display", + .desc = "Sharp Display Memory LCD", + .date = "20231129", + .major = 1, + .minor = 0, +}; + +static inline void sharp_memory_set_tx_buffer_mode(u8 *buffer, u8 mode, u8 vcom) +{ + *buffer = mode | (vcom << 1); +} + +static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer, + struct drm_rect clip, + u32 pitch) +{ + for (u32 line = 0; line < clip.y2; ++line) + buffer[line * pitch] = line + 1; +} + +static void sharp_memory_set_tx_buffer_data(u8 *buffer, + struct drm_framebuffer *fb, + struct drm_rect clip, + u32 pitch, + struct drm_format_conv_state *fmtcnv_state) +{ + int ret; + struct iosys_map dst, vmap; + struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return; + + iosys_map_set_vaddr(&dst, buffer); + iosys_map_set_vaddr(&vmap, dma_obj->vaddr); + + drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state); + + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); +} + +static int sharp_memory_update_display(struct sharp_memory_device *smd, + struct drm_framebuffer *fb, + struct drm_rect clip, + struct drm_format_conv_state *fmtcnv_state) +{ + int ret; + u32 pitch = smd->pitch; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + u32 tx_buffer_size = smd->tx_buffer_size; + + mutex_lock(&smd->tx_mutex); + + /* Populate the transmit buffer with frame data */ + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], + SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom); + sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch); + sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state); + + ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static int sharp_memory_maintain_display(struct sharp_memory_device *smd) +{ + int ret; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + + mutex_lock(&smd->tx_mutex); + + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_MAINTAIN_MODE, vcom); + tx_buffer[1] = 0; /* Write dummy data */ + ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static int sharp_memory_clear_display(struct sharp_memory_device *smd) +{ + int ret; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + + mutex_lock(&smd->tx_mutex); + + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_CLEAR_MODE, vcom); + tx_buffer[1] = 0; /* write dummy data */ + ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect, + struct drm_format_conv_state *fmtconv_state) +{ + struct drm_rect clip; + struct sharp_memory_device *smd = drm_to_sharp_memory_device(fb->dev); + + /* Always update a full line regardless of what is dirty */ + clip.x1 = 0; + clip.x2 = fb->width; + clip.y1 = rect->y1; + clip.y2 = rect->y2; + + sharp_memory_update_display(smd, fb, clip, fmtconv_state); +} + +static int sharp_memory_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct sharp_memory_device *smd; + struct drm_crtc_state *crtc_state; + + smd = container_of(plane, struct sharp_memory_device, plane); + crtc_state = drm_atomic_get_new_crtc_state(state, &smd->crtc); + + return drm_atomic_helper_check_plane_state(plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} + +static void sharp_memory_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *plane_state = plane->state; + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; + struct sharp_memory_device *smd; + struct drm_rect rect; + + smd = container_of(plane, struct sharp_memory_device, plane); + if (!smd->crtc.state->active) + return; + + if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect)) + sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state); + + drm_format_conv_state_release(&fmtcnv_state); +} + +static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = sharp_memory_plane_atomic_check, + .atomic_update = sharp_memory_plane_atomic_update, +}; + +static bool sharp_memory_format_mod_supported(struct drm_plane *plane, + u32 format, + u64 modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + +static const struct drm_plane_funcs sharp_memory_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = sharp_memory_format_mod_supported, +}; + +static enum drm_mode_status sharp_memory_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, smd->mode); +} + +static int sharp_memory_crtc_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + int ret; + + if (!crtc_state->enable) + goto out; + + ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state); + if (ret) + return ret; + +out: + return drm_atomic_add_affected_planes(state, crtc); +} + +static int sharp_memory_sw_vcom_signal_thread(void *data) +{ + struct sharp_memory_device *smd = data; + + while (!kthread_should_stop()) { + smd->vcom ^= 1; /* Toggle vcom */ + sharp_memory_maintain_display(smd); + msleep(1000); + } + + return 0; +} + +static void sharp_memory_crtc_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + sharp_memory_clear_display(smd); + + if (smd->enable_gpio) + gpiod_set_value(smd->enable_gpio, 1); +} + +static void sharp_memory_crtc_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + sharp_memory_clear_display(smd); + + if (smd->enable_gpio) + gpiod_set_value(smd->enable_gpio, 0); +} + +static const struct drm_crtc_helper_funcs sharp_memory_crtc_helper_funcs = { + .mode_valid = sharp_memory_crtc_mode_valid, + .atomic_check = sharp_memory_crtc_check, + .atomic_enable = sharp_memory_crtc_enable, + .atomic_disable = sharp_memory_crtc_disable, +}; + +static const struct drm_crtc_funcs sharp_memory_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_encoder_funcs sharp_memory_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int sharp_memory_connector_get_modes(struct drm_connector *connector) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(connector->dev); + + return drm_connector_helper_get_modes_fixed(connector, smd->mode); +} + +static const struct drm_connector_helper_funcs sharp_memory_connector_hfuncs = { + .get_modes = sharp_memory_connector_get_modes, +}; + +static const struct drm_connector_funcs sharp_memory_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + +}; + +static const struct drm_mode_config_funcs sharp_memory_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const struct drm_display_mode sharp_memory_ls010b7dh04_mode = { + DRM_SIMPLE_MODE(128, 128, 18, 18), +}; + +static const struct drm_display_mode sharp_memory_ls011b7dh03_mode = { + DRM_SIMPLE_MODE(160, 68, 25, 10), +}; + +static const struct drm_display_mode sharp_memory_ls012b7dd01_mode = { + DRM_SIMPLE_MODE(184, 38, 29, 6), +}; + +static const struct drm_display_mode sharp_memory_ls013b7dh03_mode = { + DRM_SIMPLE_MODE(128, 128, 23, 23), +}; + +static const struct drm_display_mode sharp_memory_ls013b7dh05_mode = { + DRM_SIMPLE_MODE(144, 168, 20, 24), +}; + +static const struct drm_display_mode sharp_memory_ls018b7dh02_mode = { + DRM_SIMPLE_MODE(230, 303, 27, 36), +}; + +static const struct drm_display_mode sharp_memory_ls027b7dh01_mode = { + DRM_SIMPLE_MODE(400, 240, 58, 35), +}; + +static const struct drm_display_mode sharp_memory_ls032b7dd02_mode = { + DRM_SIMPLE_MODE(336, 536, 42, 68), +}; + +static const struct drm_display_mode sharp_memory_ls044q7dh01_mode = { + DRM_SIMPLE_MODE(320, 240, 89, 67), +}; + +static const struct spi_device_id sharp_memory_ids[] = { + {"ls010b7dh04", (kernel_ulong_t)&sharp_memory_ls010b7dh04_mode}, + {"ls011b7dh03", (kernel_ulong_t)&sharp_memory_ls011b7dh03_mode}, + {"ls012b7dd01", (kernel_ulong_t)&sharp_memory_ls012b7dd01_mode}, + {"ls013b7dh03", (kernel_ulong_t)&sharp_memory_ls013b7dh03_mode}, + {"ls013b7dh05", (kernel_ulong_t)&sharp_memory_ls013b7dh05_mode}, + {"ls018b7dh02", (kernel_ulong_t)&sharp_memory_ls018b7dh02_mode}, + {"ls027b7dh01", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode}, + {"ls027b7dh01a", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode}, + {"ls032b7dd02", (kernel_ulong_t)&sharp_memory_ls032b7dd02_mode}, + {"ls044q7dh01", (kernel_ulong_t)&sharp_memory_ls044q7dh01_mode}, + {}, +}; +MODULE_DEVICE_TABLE(spi, sharp_memory_ids); + +static const struct of_device_id sharp_memory_of_match[] = { + {.compatible = "sharp,ls010b7dh04", &sharp_memory_ls010b7dh04_mode}, + {.compatible = "sharp,ls011b7dh03", &sharp_memory_ls011b7dh03_mode}, + {.compatible = "sharp,ls012b7dd01", &sharp_memory_ls012b7dd01_mode}, + {.compatible = "sharp,ls013b7dh03", &sharp_memory_ls013b7dh03_mode}, + {.compatible = "sharp,ls013b7dh05", &sharp_memory_ls013b7dh05_mode}, + {.compatible = "sharp,ls018b7dh02", &sharp_memory_ls018b7dh02_mode}, + {.compatible = "sharp,ls027b7dh01", &sharp_memory_ls027b7dh01_mode}, + {.compatible = "sharp,ls027b7dh01a", &sharp_memory_ls027b7dh01_mode}, + {.compatible = "sharp,ls032b7dd02", &sharp_memory_ls032b7dd02_mode}, + {.compatible = "sharp,ls044q7dh01", &sharp_memory_ls044q7dh01_mode}, + {}, +}; +MODULE_DEVICE_TABLE(of, sharp_memory_of_match); + +static const u32 sharp_memory_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static int sharp_memory_pipe_init(struct drm_device *dev, + struct sharp_memory_device *smd, + const u32 *formats, unsigned int format_count, + const u64 *format_modifiers) +{ + int ret; + struct drm_encoder *encoder = &smd->encoder; + struct drm_plane *plane = &smd->plane; + struct drm_crtc *crtc = &smd->crtc; + struct drm_connector *connector = &smd->connector; + + drm_plane_helper_add(plane, &sharp_memory_plane_helper_funcs); + ret = drm_universal_plane_init(dev, plane, 0, + &sharp_memory_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &sharp_memory_crtc_helper_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &sharp_memory_crtc_funcs, NULL); + if (ret) + return ret; + + encoder->possible_crtcs = drm_crtc_mask(crtc); + ret = drm_encoder_init(dev, encoder, &sharp_memory_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + ret = drm_connector_init(&smd->drm, &smd->connector, + &sharp_memory_connector_funcs, + DRM_MODE_CONNECTOR_SPI); + if (ret) + return ret; + + drm_connector_helper_add(&smd->connector, + &sharp_memory_connector_hfuncs); + + return drm_connector_attach_encoder(connector, encoder); +} + +static int sharp_memory_init_pwm_vcom_signal(struct sharp_memory_device *smd) +{ + int ret; + struct device *dev = &smd->spi->dev; + struct pwm_state pwm_state; + + smd->pwm_vcom_signal = devm_pwm_get(dev, NULL); + if (IS_ERR(smd->pwm_vcom_signal)) + return dev_err_probe(dev, PTR_ERR(smd->pwm_vcom_signal), + "Could not get pwm device\n"); + + pwm_init_state(smd->pwm_vcom_signal, &pwm_state); + pwm_set_relative_duty_cycle(&pwm_state, 1, 10); + pwm_state.enabled = true; + ret = pwm_apply_might_sleep(smd->pwm_vcom_signal, &pwm_state); + if (ret) + return dev_err_probe(dev, -EINVAL, "Could not apply pwm state\n"); + + return 0; +} + +static int sharp_memory_probe(struct spi_device *spi) +{ + int ret; + struct device *dev; + struct sharp_memory_device *smd; + struct drm_device *drm; + const char *vcom_mode_str; + + dev = &spi->dev; + + ret = spi_setup(spi); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to setup spi device\n"); + + if (!dev->coherent_dma_mask) { + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return dev_err_probe(dev, ret, "Failed to set dma mask\n"); + } + + smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver, + struct sharp_memory_device, drm); + if (!smd) + return -ENOMEM; + + spi_set_drvdata(spi, smd); + + smd->spi = spi; + drm = &smd->drm; + ret = drmm_mode_config_init(drm); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize drm config\n"); + + smd->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); + if (!smd->enable_gpio) + dev_warn(dev, "Enable gpio not defined\n"); + + drm->mode_config.funcs = &sharp_memory_mode_config_funcs; + smd->mode = spi_get_device_match_data(spi); + + smd->pitch = (SHARP_ADDR_PERIOD + smd->mode->hdisplay + SHARP_DUMMY_PERIOD) / 8; + smd->tx_buffer_size = (SHARP_MODE_PERIOD + + (SHARP_ADDR_PERIOD + (smd->mode->hdisplay) + SHARP_DUMMY_PERIOD) * + smd->mode->vdisplay) / 8; + + smd->tx_buffer = devm_kzalloc(dev, smd->tx_buffer_size, GFP_KERNEL); + if (!smd->tx_buffer) + return -ENOMEM; + + mutex_init(&smd->tx_mutex); + + /* + * VCOM is a signal that prevents DC bias from being built up in + * the panel resulting in pixels being forever stuck in one state. + * + * This driver supports three different methods to generate this + * signal depending on EXTMODE pin: + * + * software (EXTMODE = L) - This mode uses a kthread to + * periodically send a "maintain display" message to the display, + * toggling the vcom bit on and off with each message + * + * external (EXTMODE = H) - This mode relies on an external + * clock to generate the signal on the EXTCOMM pin + * + * pwm (EXTMODE = H) - This mode uses a pwm device to generate + * the signal on the EXTCOMM pin + * + */ + if (device_property_read_string(dev, "sharp,vcom-mode", &vcom_mode_str)) + return dev_err_probe(dev, -EINVAL, + "Unable to find sharp,vcom-mode node in device tree\n"); + + if (!strcmp("software", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_SOFTWARE_VCOM; + smd->sw_vcom_signal = kthread_run(sharp_memory_sw_vcom_signal_thread, + smd, "sw_vcom_signal"); + + } else if (!strcmp("external", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_EXTERNAL_VCOM; + + } else if (!strcmp("pwm", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_PWM_VCOM; + ret = sharp_memory_init_pwm_vcom_signal(smd); + if (ret) + return ret; + } else { + return dev_err_probe(dev, -EINVAL, "Invalid value set for vcom-mode\n"); + } + + drm->mode_config.min_width = smd->mode->hdisplay; + drm->mode_config.max_width = smd->mode->hdisplay; + drm->mode_config.min_height = smd->mode->vdisplay; + drm->mode_config.max_height = smd->mode->vdisplay; + + ret = sharp_memory_pipe_init(drm, smd, sharp_memory_formats, + ARRAY_SIZE(sharp_memory_formats), + NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize display pipeline.\n"); + + drm_plane_enable_fb_damage_clips(&smd->plane); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return dev_err_probe(dev, ret, "Failed to register drm device.\n"); + + drm_client_setup(drm, NULL); + + return 0; +} + +static void sharp_memory_remove(struct spi_device *spi) +{ + struct sharp_memory_device *smd = spi_get_drvdata(spi); + + drm_dev_unplug(&smd->drm); + drm_atomic_helper_shutdown(&smd->drm); + + switch (smd->vcom_mode) { + case SHARP_MEMORY_SOFTWARE_VCOM: + kthread_stop(smd->sw_vcom_signal); + break; + + case SHARP_MEMORY_EXTERNAL_VCOM: + break; + + case SHARP_MEMORY_PWM_VCOM: + pwm_disable(smd->pwm_vcom_signal); + break; + } +} + +static struct spi_driver sharp_memory_spi_driver = { + .driver = { + .name = "sharp_memory", + .of_match_table = sharp_memory_of_match, + }, + .probe = sharp_memory_probe, + .remove = sharp_memory_remove, + .id_table = sharp_memory_ids, +}; +module_spi_driver(sharp_memory_spi_driver); + +MODULE_AUTHOR("Alex Lanzano <lanzano.alex@gmail.com>"); +MODULE_DESCRIPTION("SPI Protocol driver for the sharp_memory display"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index d19e10289428..3182d32f1b8f 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/of_clk.h> #include <linux/minmax.h> @@ -9,9 +10,9 @@ #include <linux/pm_domain.h> #include <linux/regulator/consumer.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> @@ -882,7 +883,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (mem) { void *screen_base; - ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem)); + ret = devm_aperture_acquire_for_platform_device(pdev, mem->start, + resource_size(mem)); if (ret) { drm_err(dev, "could not acquire memory range %pr: %d\n", mem, ret); return ERR_PTR(ret); @@ -902,7 +904,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (!res) return ERR_PTR(-EINVAL); - ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res)); + ret = devm_aperture_acquire_for_platform_device(pdev, res->start, + resource_size(res)); if (ret) { drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret); return ERR_PTR(ret); @@ -1009,6 +1012,7 @@ DEFINE_DRM_GEM_FOPS(simpledrm_fops); static struct drm_driver simpledrm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -1026,7 +1030,6 @@ static int simpledrm_probe(struct platform_device *pdev) { struct simpledrm_device *sdev; struct drm_device *dev; - unsigned int color_mode; int ret; sdev = simpledrm_device_create(&simpledrm_driver, pdev); @@ -1038,11 +1041,7 @@ static int simpledrm_probe(struct platform_device *pdev) if (ret) return ret; - color_mode = drm_format_info_bpp(sdev->format, 0); - if (color_mode == 16) - color_mode = sdev->format->depth; // can be 15 or 16 - - drm_fbdev_shmem_setup(dev, color_mode); + drm_client_setup(dev, sdev->format); return 0; } diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index b9c6ed352182..97013685c62f 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -13,6 +13,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> @@ -290,6 +291,7 @@ static const struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7586_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", .desc = "Sitronix ST7586", @@ -371,7 +373,7 @@ static int st7586_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index 1676da00883d..0747ebd999cc 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -17,6 +17,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -155,6 +156,7 @@ static const struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7735r_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", .desc = "Sitronix ST7735R", @@ -241,7 +243,7 @@ static int st7735r_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index f0a7eb62116c..3139fd9128d8 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -308,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test) err = ttm_resource_alloc(bo, place, &res2); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, - list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1); + list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1); ttm_bo_unreserve(bo); KUNIT_ASSERT_EQ(test, - list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1); + list_is_last(&res1->lru.link, &priv->ttm_dev->unevictable), 1); ttm_resource_free(bo, &res1); ttm_resource_free(bo, &res2); diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c index 22260e7aea58..a9f4b81921c3 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c @@ -164,18 +164,18 @@ static void ttm_resource_init_pinned(struct kunit *test) res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, res); - KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned)); + KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable)); dma_resv_lock(bo->base.resv, NULL); ttm_bo_pin(bo); ttm_resource_init(bo, place, res); - KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned)); + KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->unevictable)); ttm_bo_unpin(bo); ttm_resource_fini(man, res); dma_resv_unlock(bo->base.resv); - KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned)); + KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable)); } static void ttm_resource_fini_basic(struct kunit *test) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 320592435252..48c5365efca1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -139,7 +139,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) goto out_err; } @@ -594,7 +594,8 @@ void ttm_bo_pin(struct ttm_buffer_object *bo) spin_lock(&bo->bdev->lru_lock); if (bo->resource) ttm_resource_del_bulk_move(bo->resource, bo); - ++bo->pin_count; + if (!bo->pin_count++ && bo->resource) + ttm_resource_move_to_lru_tail(bo->resource); spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_pin); @@ -613,9 +614,10 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) return; spin_lock(&bo->bdev->lru_lock); - --bo->pin_count; - if (bo->resource) + if (!--bo->pin_count && bo->resource) { ttm_resource_add_bulk_move(bo->resource, bo); + ttm_resource_move_to_lru_tail(bo->resource); + } spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_unpin); @@ -1128,9 +1130,20 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) if (bo->bdev->funcs->swap_notify) bo->bdev->funcs->swap_notify(bo); - if (ttm_tt_is_populated(bo->ttm)) + if (ttm_tt_is_populated(bo->ttm)) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags); + spin_lock(&bo->bdev->lru_lock); + if (ret) + ttm_resource_add_bulk_move(bo->resource, bo); + ttm_resource_move_to_lru_tail(bo->resource); + spin_unlock(&bo->bdev->lru_lock); + } + out: /* Consider -ENOMEM and -ENOSPC non-fatal. */ if (ret == -ENOMEM || ret == -ENOSPC) @@ -1180,3 +1193,47 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->bdev, bo->ttm); bo->ttm = NULL; } + +/** + * ttm_bo_populate() - Ensure that a buffer object has backing pages + * @bo: The buffer object + * @ctx: The ttm_operation_ctx governing the operation. + * + * For buffer objects in a memory type whose manager uses + * struct ttm_tt for backing pages, ensure those backing pages + * are present and with valid content. The bo's resource is also + * placed on the correct LRU list if it was previously swapped + * out. + * + * Return: 0 if successful, negative error code on failure. + * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible + * is set to true. + */ +int ttm_bo_populate(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) +{ + struct ttm_tt *tt = bo->ttm; + bool swapped; + int ret; + + dma_resv_assert_held(bo->base.resv); + + if (!tt) + return 0; + + swapped = ttm_tt_is_swapped(tt); + ret = ttm_tt_populate(bo->bdev, tt, ctx); + if (ret) + return ret; + + if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count && + bo->resource) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(bo->resource, bo); + ttm_resource_move_to_lru_tail(bo->resource); + spin_unlock(&bo->bdev->lru_lock); + } + + return 0; +} +EXPORT_SYMBOL(ttm_bo_populate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3c07f4712d5c..d939925efa81 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -163,7 +163,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, src_man = ttm_manager_type(bdev, src_mem->mem_type); if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || dst_man->use_tt)) { - ret = ttm_tt_populate(bdev, ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) return ret; } @@ -350,7 +350,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, BUG_ON(!ttm); - ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; @@ -507,7 +507,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) pgprot_t prot; void *vaddr; - ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4212b8c91dd4..2c699ed1963a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -224,7 +224,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, }; ttm = bo->ttm; - err = ttm_tt_populate(bdev, bo->ttm, &ctx); + err = ttm_bo_populate(bo, &ctx); if (err) { if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN) diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index e7cc4954c1bc..02e797fd1891 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -216,7 +216,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func bdev->vma_manager = vma_manager; spin_lock_init(&bdev->lru_lock); - INIT_LIST_HEAD(&bdev->pinned); + INIT_LIST_HEAD(&bdev->unevictable); bdev->dev_mapping = mapping; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); @@ -283,7 +283,7 @@ void ttm_device_clear_dma_mappings(struct ttm_device *bdev) struct ttm_resource_manager *man; unsigned int i, j; - ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned); + ttm_device_clear_lru_dma_mappings(bdev, &bdev->unevictable); for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) { man = ttm_manager_type(bdev, i); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 6d764ba88aab..a87665eb28a6 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -30,6 +30,7 @@ #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_tt.h> #include <drm/drm_util.h> @@ -235,11 +236,26 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, } } +static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) +{ + /* + * Take care when creating a new resource for a bo, that it is not considered + * swapped if it's not the current resource for the bo and is thus logically + * associated with the ttm_tt. Think a VRAM resource created to move a + * swapped-out bo to VRAM. + */ + if (bo->resource != res || !bo->ttm) + return false; + + dma_resv_assert_held(bo->base.resv); + return ttm_tt_is_swapped(bo->ttm); +} + /* Add the resource to a bulk move if the BO is configured for it */ void ttm_resource_add_bulk_move(struct ttm_resource *res, struct ttm_buffer_object *bo) { - if (bo->bulk_move && !bo->pin_count) + if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) ttm_lru_bulk_move_add(bo->bulk_move, res); } @@ -247,7 +263,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res, void ttm_resource_del_bulk_move(struct ttm_resource *res, struct ttm_buffer_object *bo) { - if (bo->bulk_move && !bo->pin_count) + if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) ttm_lru_bulk_move_del(bo->bulk_move, res); } @@ -259,8 +275,8 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res) lockdep_assert_held(&bo->bdev->lru_lock); - if (bo->pin_count) { - list_move_tail(&res->lru.link, &bdev->pinned); + if (bo->pin_count || ttm_resource_is_swapped(res, bo)) { + list_move_tail(&res->lru.link, &bdev->unevictable); } else if (bo->bulk_move) { struct ttm_lru_bulk_move_pos *pos = @@ -301,8 +317,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo, man = ttm_manager_type(bo->bdev, place->mem_type); spin_lock(&bo->bdev->lru_lock); - if (bo->pin_count) - list_add_tail(&res->lru.link, &bo->bdev->pinned); + if (bo->pin_count || ttm_resource_is_swapped(res, bo)) + list_add_tail(&res->lru.link, &bo->bdev->unevictable); else list_add_tail(&res->lru.link, &man->lru[bo->priority]); man->usage += res->size; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 4b51b9023126..3baf215eca23 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -367,7 +367,10 @@ error: } return ret; } + +#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST) EXPORT_SYMBOL(ttm_tt_populate); +#endif void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig index 5121fed571a5..a9d6fe535d88 100644 --- a/drivers/gpu/drm/tve200/Kconfig +++ b/drivers/gpu/drm/tve200/Kconfig @@ -6,6 +6,7 @@ config DRM_TVE200 depends on ARM || COMPILE_TEST depends on OF select DRM_BRIDGE + select DRM_CLIENT_SELECTION select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index acce210e2554..b30340a2141d 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -39,8 +39,10 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -149,6 +151,7 @@ static const struct drm_driver tve200_drm_driver = { .minor = 0, .patchlevel = 0, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, }; static int tve200_probe(struct platform_device *pdev) @@ -221,11 +224,7 @@ static int tve200_probe(struct platform_device *pdev) if (ret < 0) goto clk_disable; - /* - * Passing in 16 here will make the RGB565 mode the default - * Passing in 32 will use XRGB8888 mode - */ - drm_fbdev_dma_setup(drm, 16); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index c744175c6992..d7a6abef7d78 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -5,6 +5,7 @@ config DRM_UDL depends on USB depends on USB_ARCH_HAS_HCD depends on MMU + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 280a09a6e2ad..8d8ae40f945c 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <drm/drm_drv.h> +#include <drm/drm_client_setup.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> #include <drm/drm_gem_shmem_helper.h> @@ -73,6 +74,7 @@ static const struct drm_driver driver = { .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = udl_driver_gem_prime_import, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -117,7 +119,7 @@ static int udl_usb_probe(struct usb_interface *interface, DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); - drm_fbdev_shmem_setup(&udl->drm, 0); + drm_client_setup(&udl->drm, NULL); return 0; } diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index b7d673f1153b..fcf710926057 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -13,7 +13,8 @@ v3d-y := \ v3d_trace_points.o \ v3d_sched.o \ v3d_sysfs.o \ - v3d_submit.o + v3d_submit.o \ + v3d_gemfs.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index ebe52bef4ffb..73ab7dd31b17 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -107,6 +107,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); struct sg_table *sgt; + u64 align; int ret; /* So far we pin the BO in the MMU for its lifetime, so use @@ -116,6 +117,15 @@ v3d_bo_create_finish(struct drm_gem_object *obj) if (IS_ERR(sgt)) return PTR_ERR(sgt); + if (!v3d->gemfs) + align = SZ_4K; + else if (obj->size >= SZ_1M) + align = SZ_1M; + else if (obj->size >= SZ_64K) + align = SZ_64K; + else + align = SZ_4K; + spin_lock(&v3d->mm_lock); /* Allocate the object's space in the GPU's page tables. * Inserting PTEs will happen later, but the offset is for the @@ -123,7 +133,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) */ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, obj->size >> V3D_MMU_PAGE_SHIFT, - GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, 0, 0); + align >> V3D_MMU_PAGE_SHIFT, 0, 0); spin_unlock(&v3d->mm_lock); if (ret) return ret; @@ -143,10 +153,12 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t unaligned_size) { struct drm_gem_shmem_object *shmem_obj; + struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_bo *bo; int ret; - shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, + v3d->gemfs); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index d7ff1f5fa481..fb35c5c3f1a7 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -36,6 +36,13 @@ #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 +/* Only expose the `super_pages` modparam if THP is enabled. */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool super_pages = true; +module_param_named(super_pages, super_pages, bool, 0400); +MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support."); +#endif + static int v3d_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -97,6 +104,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_MAX_PERF_COUNTERS: args->value = v3d->perfmon_info.max_counters; return 0; + case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES: + args->value = !!v3d->gemfs; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index cf4b23369dc4..de73eefff9ac 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -19,9 +19,8 @@ struct clk; struct platform_device; struct reset_control; -#define GMP_GRANULARITY (128 * 1024) - #define V3D_MMU_PAGE_SHIFT 12 +#define V3D_PAGE_FACTOR (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) #define V3D_MAX_QUEUES (V3D_CPU + 1) @@ -137,6 +136,11 @@ struct v3d_dev { struct drm_mm mm; spinlock_t mm_lock; + /* + * tmpfs instance used for shmem backed objects + */ + struct vfsmount *gemfs; + struct work_struct overflow_mem_work; struct v3d_bin_job *bin_job; @@ -534,6 +538,11 @@ void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); +/* v3d_gemfs.c */ +extern bool super_pages; +void v3d_gemfs_init(struct v3d_dev *v3d); +void v3d_gemfs_fini(struct v3d_dev *v3d); + /* v3d_submit.c */ void v3d_job_cleanup(struct v3d_job *job); void v3d_job_put(struct v3d_job *job); @@ -553,6 +562,7 @@ void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); /* v3d_mmu.c */ +int v3d_mmu_flush_all(struct v3d_dev *v3d); int v3d_mmu_set_page_table(struct v3d_dev *v3d); void v3d_mmu_insert_ptes(struct v3d_bo *bo); void v3d_mmu_remove_ptes(struct v3d_bo *bo); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index da8faf3b9011..b1e681630ded 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -288,11 +288,14 @@ v3d_gem_init(struct drm_device *dev) v3d_init_hw_state(v3d); v3d_mmu_set_page_table(v3d); + v3d_gemfs_init(v3d); + ret = v3d_sched_init(v3d); if (ret) { drm_mm_takedown(&v3d->mm); - dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, + dma_free_coherent(v3d->drm.dev, pt_size, (void *)v3d->pt, v3d->pt_paddr); + return ret; } return 0; @@ -304,6 +307,7 @@ v3d_gem_destroy(struct drm_device *dev) struct v3d_dev *v3d = to_v3d_dev(dev); v3d_sched_fini(v3d); + v3d_gemfs_fini(v3d); /* Waiting for jobs to finish would need to be done before * unregistering V3D. diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c new file mode 100644 index 000000000000..4c5e18590a5c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_gemfs.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2024 Raspberry Pi */ + +#include <linux/fs.h> +#include <linux/mount.h> + +#include "v3d_drv.h" + +void v3d_gemfs_init(struct v3d_dev *v3d) +{ + char huge_opt[] = "huge=within_size"; + struct file_system_type *type; + struct vfsmount *gemfs; + + /* + * By creating our own shmemfs mountpoint, we can pass in + * mount flags that better match our usecase. However, we + * only do so on platforms which benefit from it. + */ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + goto err; + + /* The user doesn't want to enable Super Pages */ + if (!super_pages) + goto err; + + type = get_fs_type("tmpfs"); + if (!type) + goto err; + + gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); + if (IS_ERR(gemfs)) + goto err; + + v3d->gemfs = gemfs; + drm_info(&v3d->drm, "Using Transparent Hugepages\n"); + + return; + +err: + v3d->gemfs = NULL; + drm_notice(&v3d->drm, + "Transparent Hugepage support is recommended for optimal performance on this platform!\n"); +} + +void v3d_gemfs_fini(struct v3d_dev *v3d) +{ + if (v3d->gemfs) + kern_unmount(v3d->gemfs); +} diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index d469bda52c1a..20bf33702c3c 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -70,6 +70,8 @@ v3d_overflow_mem_work(struct work_struct *work) list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); + v3d_mmu_flush_all(v3d); + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT); V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 14f3af40d6f6..0f564fd7160c 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -25,39 +25,37 @@ * superpage bit set. */ #define V3D_PTE_SUPERPAGE BIT(31) +#define V3D_PTE_BIGPAGE BIT(30) #define V3D_PTE_WRITEABLE BIT(29) #define V3D_PTE_VALID BIT(28) -static int v3d_mmu_flush_all(struct v3d_dev *v3d) +static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment) { - int ret; - - /* Make sure that another flush isn't already running when we - * start this one. - */ - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); - if (ret) - dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); + return IS_ALIGNED(page, alignment >> V3D_MMU_PAGE_SHIFT) && + IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT); +} - V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | - V3D_MMU_CTL_TLB_CLEAR); +int v3d_mmu_flush_all(struct v3d_dev *v3d) +{ + int ret; - V3D_WRITE(V3D_MMUC_CONTROL, - V3D_MMUC_CONTROL_FLUSH | + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH | V3D_MMUC_CONTROL_ENABLE); - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); if (ret) { - dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); + dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); return ret; } - ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & - V3D_MMUC_CONTROL_FLUSHING), 100); + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | + V3D_MMU_CTL_TLB_CLEAR); + + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); if (ret) - dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); + dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n"); return ret; } @@ -87,19 +85,40 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) struct drm_gem_shmem_object *shmem_obj = &bo->base; struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; - u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; - struct sg_dma_page_iter dma_iter; - - for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { - dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); - u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; - u32 pte = page_prot | page_address; - u32 i; - - BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= - BIT(24)); - for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) - v3d->pt[page++] = pte + i; + struct scatterlist *sgl; + unsigned int count; + + for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, count) { + dma_addr_t dma_addr = sg_dma_address(sgl); + u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT; + unsigned int len = sg_dma_len(sgl); + + while (len > 0) { + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; + u32 page_address = page_prot | pfn; + unsigned int i, page_size; + + BUG_ON(pfn + V3D_PAGE_FACTOR >= BIT(24)); + + if (len >= SZ_1M && + v3d_mmu_is_aligned(page, page_address, SZ_1M)) { + page_size = SZ_1M; + page_address |= V3D_PTE_SUPERPAGE; + } else if (len >= SZ_64K && + v3d_mmu_is_aligned(page, page_address, SZ_64K)) { + page_size = SZ_64K; + page_address |= V3D_PTE_BIGPAGE; + } else { + page_size = SZ_4K; + } + + for (i = 0; i < page_size >> V3D_MMU_PAGE_SHIFT; i++) { + v3d->pt[page++] = page_address + i; + pfn++; + } + + len -= page_size; + } } WARN_ON_ONCE(page - bo->node.start != diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 00cd081d7873..156be13ab2ef 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -409,11 +409,7 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, if (req->pad != 0) return -EINVAL; - mutex_lock(&v3d_priv->perfmon.lock); - perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); - v3d_perfmon_get(perfmon); - mutex_unlock(&v3d_priv->perfmon.lock); - + perfmon = v3d_perfmon_find(v3d_priv, req->id); if (!perfmon) return -EINVAL; diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 08d2a2739582..99ac4995b5a1 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -135,8 +135,31 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) struct v3d_stats *global_stats = &v3d->queue[queue].stats; struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); - - preempt_disable(); + unsigned long flags; + + /* + * We only need to disable local interrupts to appease lockdep who + * otherwise would think v3d_job_start_stats vs v3d_stats_update has an + * unsafe in-irq vs no-irq-off usage problem. This is a false positive + * because all the locks are per queue and stats type, and all jobs are + * completely one at a time serialised. More specifically: + * + * 1. Locks for GPU queues are updated from interrupt handlers under a + * spin lock and started here with preemption disabled. + * + * 2. Locks for CPU queues are updated from the worker with preemption + * disabled and equally started here with preemption disabled. + * + * Therefore both are consistent. + * + * 3. Because next job can only be queued after the previous one has + * been signaled, and locks are per queue, there is also no scope for + * the start part to race with the update part. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_save(flags); + else + preempt_disable(); write_seqcount_begin(&local_stats->lock); local_stats->start_ns = now; @@ -146,7 +169,10 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) global_stats->start_ns = now; write_seqcount_end(&global_stats->lock); - preempt_enable(); + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_restore(flags); + else + preempt_enable(); } static void @@ -167,11 +193,21 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) struct v3d_stats *global_stats = &v3d->queue[queue].stats; struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); + unsigned long flags; + + /* See comment in v3d_job_start_stats() */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_save(flags); + else + preempt_disable(); - preempt_disable(); v3d_stats_update(local_stats, now); v3d_stats_update(global_stats, now); - preempt_enable(); + + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_restore(flags); + else + preempt_enable(); } static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) @@ -667,7 +703,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_start(&v3d->queue[q].sched); + drm_sched_start(&v3d->queue[q].sched, 0); } mutex_unlock(&v3d->reset_lock); diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig index 45fe135d6e43..180e30b82ab9 100644 --- a/drivers/gpu/drm/vboxvideo/Kconfig +++ b/drivers/gpu/drm/vboxvideo/Kconfig @@ -2,6 +2,7 @@ config DRM_VBOXVIDEO tristate "Virtual Box Graphics Card" depends on DRM && X86 && PCI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index ef36834c8673..a536c467e2b2 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -7,12 +7,14 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ + +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vt_kern.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_file.h> @@ -44,7 +46,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; @@ -80,7 +82,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_irq_fini; - drm_fbdev_ttm_setup(&vbox->ddev, 32); + drm_client_setup(&vbox->ddev, NULL); return 0; @@ -193,6 +195,7 @@ static const struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, DRM_GEM_VRAM_DRIVER, + DRM_FBDEV_TTM_DRIVER_OPS, }; drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset); diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 269b5f26b2ea..c5f30b317698 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -9,6 +9,7 @@ config DRM_VC4 depends on SND && SND_SOC depends on COMMON_CLK depends on PM + select DRM_CLIENT_SELECTION select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDMI_STATE_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c index 0731a7d85d7a..6527fb1db71e 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c @@ -155,11 +155,11 @@ KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister, drm_dev_unregister, struct drm_device *); -static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) +static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen) { struct drm_device *drm; - const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver; - const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock; + const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver; + const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock; struct vc4_dev *vc4; struct device *dev; int ret; @@ -173,9 +173,9 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); vc4->dev = dev; - vc4->is_vc5 = is_vc5; + vc4->gen = gen; - vc4->hvs = __vc4_hvs_alloc(vc4, NULL); + vc4->hvs = __vc4_hvs_alloc(vc4, NULL, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs); drm = &vc4->base; @@ -198,10 +198,10 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) struct vc4_dev *vc4_mock_device(struct kunit *test) { - return __mock_device(test, false); + return __mock_device(test, VC4_GEN_4); } struct vc4_dev *vc5_mock_device(struct kunit *test) { - return __mock_device(test, true); + return __mock_device(test, VC4_GEN_5); } diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 3f72be7490d5..fb450b6a4d44 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->purgeable.lock); @@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* list_del_init() is used here because the caller might release @@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); bo = kzalloc(sizeof(*bo), GFP_KERNEL); @@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, struct drm_gem_dma_object *dma_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); if (size == 0) @@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *file_priv, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; ret = vc4_dumb_fixup_args(args); @@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo) struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* Fast path: if the BO is already retained by someone, no need to @@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* Fast path: if the BO is still retained by someone, no need to test @@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; ret = vc4_grab_bin_bo(vc4, vc4file); @@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_vc4_mmap_bo *args = data; struct drm_gem_object *gem_obj; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; gem_obj = drm_gem_object_lookup(file_priv, args->handle); @@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->size == 0) @@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo; bool t_format; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->flags != 0) @@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->flags != 0 || args->modifier != 0) @@ -1007,7 +1007,7 @@ int vc4_bo_cache_init(struct drm_device *dev) int ret; int i; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* Create the initial set of BO labels that the kernel will @@ -1071,7 +1071,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; int ret = 0, label; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!args->len) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 8b5a7e5eb146..575900ee67a5 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -105,6 +105,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, struct vc4_hvs *hvs = vc4->hvs; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int channel = vc4_crtc_state->assigned_channel; unsigned int cob_size; u32 val; int fifo_lines; @@ -121,7 +122,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, * Read vertical scanline which is currently composed for our * pixelvalve by the HVS, and also the scaler status. */ - val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel)); + val = HVS_READ(SCALER_DISPSTATX(channel)); /* Get optional system timestamp after query. */ if (etime) @@ -137,11 +138,11 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, *vpos /= 2; /* Use hpos to correct for field offset in interlaced mode. */ - if (vc4_hvs_get_fifo_frame_count(hvs, vc4_crtc_state->assigned_channel) % 2) + if (vc4_hvs_get_fifo_frame_count(hvs, channel) % 2) *hpos += mode->crtc_htotal / 2; } - cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel); + cob_size = vc4_crtc_get_cob_allocation(vc4, channel); /* This is the offset we need for translating hvs -> pv scanout pos. */ fifo_lines = cob_size / mode->crtc_hdisplay; @@ -263,7 +264,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) * Removing 1 from the FIFO full level however * seems to completely remove that issue. */ - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; @@ -428,7 +429,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) CRTC_WRITE(PV_MUX_CFG, VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); @@ -735,10 +736,17 @@ int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (conn_state->crtc != crtc) continue; - vc4_state->margins.left = conn_state->tv.margins.left; - vc4_state->margins.right = conn_state->tv.margins.right; - vc4_state->margins.top = conn_state->tv.margins.top; - vc4_state->margins.bottom = conn_state->tv.margins.bottom; + if (memcmp(&vc4_state->margins, &conn_state->tv.margins, + sizeof(vc4_state->margins))) { + memcpy(&vc4_state->margins, &conn_state->tv.margins, + sizeof(vc4_state->margins)); + + /* + * Need to force the dlist entries for all planes to be + * updated so that the dest rectangles are changed. + */ + crtc_state->zpos_changed = true; + } break; } @@ -913,7 +921,7 @@ static int vc4_async_set_fence_cb(struct drm_device *dev, struct dma_fence *fence; int ret; - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno, @@ -1000,7 +1008,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* @@ -1043,7 +1051,7 @@ int vc4_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - if (vc4->is_vc5) + if (vc4->gen > VC4_GEN_4) return vc5_async_page_flip(crtc, fb, event, flags); else return vc4_async_page_flip(crtc, fb, event, flags); @@ -1338,9 +1346,8 @@ int __vc4_crtc_init(struct drm_device *drm, drm_crtc_helper_add(crtc, crtc_helper_funcs); - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); - drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); /* We support CTM, but only for one CRTC at a time. It's therefore diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index c133e96b8aca..d47e5967592f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -20,6 +20,7 @@ * driver. */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/device.h> @@ -30,10 +31,11 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> @@ -98,7 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) @@ -147,7 +149,7 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); @@ -165,7 +167,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file->driver_priv; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (vc4file->bin_bo_used) @@ -212,6 +214,7 @@ const struct drm_driver vc4_drm_driver = { .gem_create_object = vc4_create_object, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .ioctls = vc4_drm_ioctls, .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), @@ -235,6 +238,7 @@ const struct drm_driver vc5_drm_driver = { #endif DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &vc4_drm_fops, @@ -291,13 +295,17 @@ static int vc4_drm_bind(struct device *dev) struct vc4_dev *vc4; struct device_node *node; struct drm_crtc *crtc; - bool is_vc5; + enum vc4_gen gen; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); - is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"); - if (is_vc5) + if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5")) + gen = VC4_GEN_5; + else + gen = VC4_GEN_4; + + if (gen > VC4_GEN_4) driver = &vc5_drm_driver; else driver = &vc4_drm_driver; @@ -315,13 +323,13 @@ static int vc4_drm_bind(struct device *dev) vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); - vc4->is_vc5 = is_vc5; + vc4->gen = gen; vc4->dev = dev; drm = &vc4->base; platform_set_drvdata(pdev, drm); - if (!is_vc5) { + if (gen == VC4_GEN_4) { ret = drmm_mutex_init(drm, &vc4->bin_bo_lock); if (ret) goto err; @@ -335,7 +343,7 @@ static int vc4_drm_bind(struct device *dev) if (ret) goto err; - if (!is_vc5) { + if (gen == VC4_GEN_4) { ret = vc4_gem_init(drm); if (ret) goto err; @@ -352,7 +360,7 @@ static int vc4_drm_bind(struct device *dev) } } - ret = drm_aperture_remove_framebuffers(driver); + ret = aperture_remove_all_conflicting_devices(driver->name); if (ret) goto err; @@ -389,7 +397,7 @@ static int vc4_drm_bind(struct device *dev) if (ret < 0) goto err; - drm_fbdev_dma_setup(drm, 16); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 08e29fa82563..c6be1997f1c7 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -15,6 +15,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_encoder.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mm.h> @@ -80,11 +81,16 @@ struct vc4_perfmon { u64 counters[] __counted_by(ncounters); }; +enum vc4_gen { + VC4_GEN_4, + VC4_GEN_5, +}; + struct vc4_dev { struct drm_device base; struct device *dev; - bool is_vc5; + enum vc4_gen gen; unsigned int irq; @@ -315,6 +321,7 @@ struct vc4_hvs { struct platform_device *pdev; void __iomem *regs; u32 __iomem *dlist; + unsigned int dlist_mem_size; struct clk *core_clk; @@ -394,7 +401,7 @@ struct vc4_plane_state { */ u32 pos0_offset; u32 pos2_offset; - u32 ptr0_offset; + u32 ptr0_offset[DRM_FORMAT_MAX_PLANES]; u32 lbm_offset; /* Offset where the plane's dlist was last stored in the @@ -404,7 +411,7 @@ struct vc4_plane_state { /* Clipped coordinates of the plane on the display. */ int crtc_x, crtc_y, crtc_w, crtc_h; - /* Clipped area being scanned from in the FB. */ + /* Clipped area being scanned from in the FB in u16.16 format */ u32 src_x, src_y; u32 src_w[2], src_h[2]; @@ -414,11 +421,6 @@ struct vc4_plane_state { bool is_unity; bool is_yuv; - /* Offset to start scanning out from the start of the plane's - * BO. - */ - u32 offsets[3]; - /* Our allocation in LBM for temporary storage during scaling. */ struct drm_mm_node lbm; @@ -598,12 +600,7 @@ struct vc4_crtc_state { bool txp_armed; unsigned int assigned_channel; - struct { - unsigned int left; - unsigned int right; - unsigned int top; - unsigned int bottom; - } margins; + struct drm_connector_tv_margins margins; unsigned long hvs_load; @@ -1002,7 +999,9 @@ void vc4_irq_reset(struct drm_device *dev); /* vc4_hvs.c */ extern struct platform_driver vc4_hvs_driver; -struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev); +struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, + void __iomem *regs, + struct platform_device *pdev); void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 24fb1b57e1dd..22bccd69eb62 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, u32 i; int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, unsigned long timeout_expire; DEFINE_WAIT(wait); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (vc4->finished_seqno >= seqno) @@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; again: @@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_device *dev) if (!exec) return; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* A previous RCL may have written to one of our textures, and @@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); bool was_empty = list_empty(&vc4->render_job_list); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; list_move_tail(&exec->head, &vc4->render_job_list); @@ -970,7 +970,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4) unsigned long irqflags; struct vc4_seqno_cb *cb, *cb_temp; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; spin_lock_irqsave(&vc4->job_lock, irqflags); @@ -1009,7 +1009,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; cb->func = func; @@ -1065,7 +1065,7 @@ vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_wait_seqno *args = data; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, @@ -1082,7 +1082,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->pad != 0) @@ -1131,7 +1131,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, args->shader_rec_size, args->bo_handle_count); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -1267,7 +1267,7 @@ int vc4_gem_init(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; vc4->dma_fence_context = dma_fence_context_alloc(1); @@ -1326,7 +1326,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; switch (args->madv) { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 6611ab7c26a6..62b82b1eeb36 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -147,6 +147,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) if (!drm_dev_enter(drm, &idx)) return -ENODEV; + WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + drm_print_regset32(&p, &vc4_hdmi->hdmi_regset); drm_print_regset32(&p, &vc4_hdmi->hd_regset); drm_print_regset32(&p, &vc4_hdmi->cec_regset); @@ -156,6 +158,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) drm_print_regset32(&p, &vc4_hdmi->ram_regset); drm_print_regset32(&p, &vc4_hdmi->rm_regset); + pm_runtime_put(&vc4_hdmi->pdev->dev); + drm_dev_exit(idx); return 0; @@ -1594,6 +1598,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_UNDERFLOW_ENABLE | VC4_HD_VID_CTL_FRAME_COUNTER_RESET | + VC4_HD_VID_CTL_BLANK_INSERT_EN | (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); @@ -1920,7 +1925,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data) } if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { - ret = -ENODEV; + ret = -ENOTSUPP; goto out_dev_exit; } @@ -2047,6 +2052,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_device *drm = vc4_hdmi->connector.dev; struct drm_connector *connector = &vc4_hdmi->connector; + struct vc4_dev *vc4 = to_vc4_dev(drm); unsigned int sample_rate = params->sample_rate; unsigned int channels = params->channels; unsigned long flags; @@ -2104,11 +2110,18 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, VC4_HDMI_AUDIO_PACKET_CEA_MASK); /* Set the MAI threshold */ - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW)); + if (vc4->gen >= VC4_GEN_5) + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW)); + else + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW)); HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index b04b2fc8d831..68455ce513e7 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -498,8 +498,11 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, field = &variant->registers[reg]; base = __vc4_hdmi_get_field_base(hdmi, field->reg); - if (!base) + if (!base) { + dev_warn(&hdmi->pdev->dev, + "Unknown register ID %u\n", reg); return; + } writel(value, base + field->offset); } diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2a835a5cff9d..1edf6e3fa7e6 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -33,7 +33,7 @@ #include "vc4_drv.h" #include "vc4_regs.h" -static const struct debugfs_reg32 hvs_regs[] = { +static const struct debugfs_reg32 vc4_hvs_regs[] = { VC4_REG32(SCALER_DISPCTRL), VC4_REG32(SCALER_DISPSTAT), VC4_REG32(SCALER_DISPID), @@ -110,7 +110,8 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct drm_printer p = drm_seq_file_printer(m); - unsigned int next_entry_start = 0; + unsigned int dlist_mem_size = hvs->dlist_mem_size; + unsigned int next_entry_start; unsigned int i, j; u32 dlist_word, dispstat; @@ -124,8 +125,9 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) } drm_printf(&p, "HVS chan %u:\n", i); + next_entry_start = 0; - for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) { + for (j = HVS_READ(SCALER_DISPLISTX(i)); j < dlist_mem_size; j++) { dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j); drm_printf(&p, "dlist: %02d: 0x%08x\n", j, dlist_word); @@ -222,6 +224,9 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs, if (!drm_dev_enter(drm, &idx)) return; + if (hvs->vc4->gen != VC4_GEN_4) + goto exit; + /* The LUT memory is laid out with each HVS channel in order, * each of which takes 256 writes for R, 256 for G, then 256 * for B. @@ -237,6 +242,7 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs, for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); +exit: drm_dev_exit(idx); } @@ -291,53 +297,60 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) u32 reg; int ret; - if (!vc4->is_vc5) + switch (vc4->gen) { + case VC4_GEN_4: return output; - /* - * NOTE: We should probably use drm_dev_enter()/drm_dev_exit() - * here, but this function is only used during the DRM device - * initialization, so we should be fine. - */ + case VC4_GEN_5: + /* + * NOTE: We should probably use + * drm_dev_enter()/drm_dev_exit() here, but this + * function is only used during the DRM device + * initialization, so we should be fine. + */ - switch (output) { - case 0: - return 0; + switch (output) { + case 0: + return 0; - case 1: - return 1; + case 1: + return 1; - case 2: - reg = HVS_READ(SCALER_DISPECTRL); - ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); - if (ret == 0) - return 2; + case 2: + reg = HVS_READ(SCALER_DISPECTRL); + ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); + if (ret == 0) + return 2; - return 0; + return 0; - case 3: - reg = HVS_READ(SCALER_DISPCTRL); - ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 3: + reg = HVS_READ(SCALER_DISPCTRL); + ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; - case 4: - reg = HVS_READ(SCALER_DISPEOLN); - ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 4: + reg = HVS_READ(SCALER_DISPEOLN); + ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; - case 5: - reg = HVS_READ(SCALER_DISPDITHER); - ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 5: + reg = HVS_READ(SCALER_DISPDITHER); + ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; + + default: + return -EPIPE; + } default: return -EPIPE; @@ -372,7 +385,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, dispctrl = SCALER_DISPCTRLX_ENABLE; dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan)); - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, @@ -394,7 +407,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE; HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | - ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) | + ((vc4->gen == VC4_GEN_4) ? SCALER_DISPBKGND_GAMMA : 0) | (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); /* Reload the LUT, since the SRAMs would have been disabled if @@ -415,13 +428,11 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan) if (!drm_dev_enter(drm, &idx)) return; - if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) + if (!(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)) goto out; - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); + HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); /* Once we leave, the scaler should be disabled and its fifo empty. */ WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); @@ -456,17 +467,29 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) if (hweight32(crtc_state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) - dlist_count += vc4_plane_dlist_size(plane_state); + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + u32 plane_dlist_count = vc4_plane_dlist_size(plane_state); + + drm_dbg_driver(dev, "[CRTC:%d:%s] Found [PLANE:%d:%s] with DLIST size: %u\n", + crtc->base.id, crtc->name, + plane->base.id, plane->name, + plane_dlist_count); + + dlist_count += plane_dlist_count; + } dlist_count++; /* Account for SCALER_CTL0_END. */ + drm_dbg_driver(dev, "[CRTC:%d:%s] Allocating DLIST block with size: %u\n", + crtc->base.id, crtc->name, dlist_count); spin_lock_irqsave(&vc4->hvs->mm_lock, flags); ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, dlist_count); spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); - if (ret) + if (ret) { + drm_err(dev, "Failed to allocate DLIST entry: %d\n", ret); return ret; + } return 0; } @@ -580,7 +603,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, } if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) - return; + goto exit; if (debug_dump_regs) { DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); @@ -663,12 +686,14 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, vc4_hvs_dump_state(hvs); } +exit: drm_dev_exit(idx); } void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) { - struct drm_device *drm = &hvs->vc4->base; + struct vc4_dev *vc4 = hvs->vc4; + struct drm_device *drm = &vc4->base; u32 dispctrl; int idx; @@ -676,8 +701,9 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) return; dispctrl = HVS_READ(SCALER_DISPCTRL); - dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel)); + dispctrl &= ~((vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPCTRL, dispctrl); @@ -686,7 +712,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) { - struct drm_device *drm = &hvs->vc4->base; + struct vc4_dev *vc4 = hvs->vc4; + struct drm_device *drm = &vc4->base; u32 dispctrl; int idx; @@ -694,8 +721,9 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) return; dispctrl = HVS_READ(SCALER_DISPCTRL); - dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel)); + dispctrl |= ((vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_EUFLOW(channel)); @@ -738,8 +766,10 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) control = HVS_READ(SCALER_DISPCTRL); for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { - dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel); + dspeislur = (vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel); + /* Interrupt masking is not always honored, so check it here. */ if (status & SCALER_DISPSTAT_EUFLOW(channel) && control & dspeislur) { @@ -767,7 +797,7 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor) if (!vc4->hvs) return -ENODEV; - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, minor->debugfs_root, &vc4->load_tracker_enabled); @@ -781,7 +811,9 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor) return 0; } -struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev) +struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, + void __iomem *regs, + struct platform_device *pdev) { struct drm_device *drm = &vc4->base; struct vc4_hvs *hvs; @@ -791,6 +823,7 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde return ERR_PTR(-ENOMEM); hvs->vc4 = vc4; + hvs->regs = regs; hvs->pdev = pdev; spin_lock_init(&hvs->mm_lock); @@ -800,16 +833,17 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde * our 16K), since we don't want to scramble the screen when * transitioning from the firmware's boot setup to runtime. */ + hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END; drm_mm_init(&hvs->dlist_mm, HVS_BOOTLOADER_DLIST_END, - (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); + hvs->dlist_mem_size); /* Set up the HVS LBM memory manager. We could have some more * complicated data structure that allowed reuse of LBM areas * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) /* 48k words of 2x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else @@ -821,79 +855,14 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde return hvs; } -static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) +static int vc4_hvs_hw_init(struct vc4_hvs *hvs) { - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); - struct vc4_hvs *hvs = NULL; - int ret; - u32 dispctrl; - u32 reg, top; - - hvs = __vc4_hvs_alloc(vc4, NULL); - if (IS_ERR(hvs)) - return PTR_ERR(hvs); - - hvs->regs = vc4_ioremap_regs(pdev, 0); - if (IS_ERR(hvs->regs)) - return PTR_ERR(hvs->regs); - - hvs->regset.base = hvs->regs; - hvs->regset.regs = hvs_regs; - hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - - if (vc4->is_vc5) { - struct rpi_firmware *firmware; - struct device_node *node; - unsigned int max_rate; - - node = rpi_firmware_find_node(); - if (!node) - return -EINVAL; - - firmware = rpi_firmware_get(node); - of_node_put(node); - if (!firmware) - return -EPROBE_DEFER; - - hvs->core_clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(hvs->core_clk)) { - dev_err(&pdev->dev, "Couldn't get core clock\n"); - return PTR_ERR(hvs->core_clk); - } - - max_rate = rpi_firmware_clk_get_max_rate(firmware, - RPI_FIRMWARE_CORE_CLK_ID); - rpi_firmware_put(firmware); - if (max_rate >= 550000000) - hvs->vc5_hdmi_enable_hdmi_20 = true; - - if (max_rate >= 600000000) - hvs->vc5_hdmi_enable_4096by2160 = true; - - hvs->max_core_rate = max_rate; - - ret = clk_prepare_enable(hvs->core_clk); - if (ret) { - dev_err(&pdev->dev, "Couldn't enable the core clock\n"); - return ret; - } - } - - if (!vc4->is_vc5) - hvs->dlist = hvs->regs + SCALER_DLIST_START; - else - hvs->dlist = hvs->regs + SCALER5_DLIST_START; + struct vc4_dev *vc4 = hvs->vc4; + u32 dispctrl, reg; - /* Upload filter kernels. We only have the one for now, so we - * keep it around for the lifetime of the driver. - */ - ret = vc4_hvs_upload_linear_kernel(hvs, - &hvs->mitchell_netravali_filter, - mitchell_netravali_1_3_1_3_kernel); - if (ret) - return ret; + dispctrl = HVS_READ(SCALER_DISPCTRL); + dispctrl |= SCALER_DISPCTRL_ENABLE; + HVS_WRITE(SCALER_DISPCTRL, dispctrl); reg = HVS_READ(SCALER_DISPECTRL); reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK; @@ -916,13 +885,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX)); dispctrl = HVS_READ(SCALER_DISPCTRL); - - dispctrl |= SCALER_DISPCTRL_ENABLE; dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | SCALER_DISPCTRL_DISPEIRQ(1) | SCALER_DISPCTRL_DISPEIRQ(2); - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER_DISPCTRL_SLVWREIRQ | SCALER_DISPCTRL_SLVRDEIRQ | @@ -962,11 +929,33 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + /* Set AXI panic mode. + * VC4 panics when < 2 lines in FIFO. + * VC5 panics when less than 1 line in the FIFO. + */ + dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK | + SCALER_DISPCTRL_PANIC1_MASK | + SCALER_DISPCTRL_PANIC2_MASK); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + HVS_WRITE(SCALER_DISPCTRL, dispctrl); - /* Recompute Composite Output Buffer (COB) allocations for the displays + return 0; +} + +static int vc4_hvs_cob_init(struct vc4_hvs *hvs) +{ + struct vc4_dev *vc4 = hvs->vc4; + u32 reg, top; + + /* + * Recompute Composite Output Buffer (COB) allocations for the + * displays */ - if (!vc4->is_vc5) { + switch (vc4->gen) { + case VC4_GEN_4: /* The COB is 20736 pixels, or just over 10 lines at 2048 wide. * The bottom 2048 pixels are full 32bpp RGBA (intended for the * TXP composing RGBA to memory), whilst the remainder are only @@ -990,7 +979,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) top = VC4_COB_SIZE; reg |= (top - 1) << 16; HVS_WRITE(SCALER_DISPBASE0, reg); - } else { + break; + + case VC4_GEN_5: /* The COB is 44416 pixels, or 10.8 lines at 4096 wide. * The bottom 4096 pixels are full RGBA (intended for the TXP * composing RGBA to memory), whilst the remainder are only @@ -1016,8 +1007,96 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) top = VC5_COB_SIZE; reg |= top << 16; HVS_WRITE(SCALER_DISPBASE0, reg); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hvs *hvs = NULL; + void __iomem *regs; + int ret; + + regs = vc4_ioremap_regs(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + hvs = __vc4_hvs_alloc(vc4, regs, pdev); + if (IS_ERR(hvs)) + return PTR_ERR(hvs); + + hvs->regset.base = hvs->regs; + hvs->regset.regs = vc4_hvs_regs; + hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs); + + if (vc4->gen == VC4_GEN_5) { + struct rpi_firmware *firmware; + struct device_node *node; + unsigned int max_rate; + + node = rpi_firmware_find_node(); + if (!node) + return -EINVAL; + + firmware = rpi_firmware_get(node); + of_node_put(node); + if (!firmware) + return -EPROBE_DEFER; + + hvs->core_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hvs->core_clk)) { + dev_err(&pdev->dev, "Couldn't get core clock\n"); + return PTR_ERR(hvs->core_clk); + } + + max_rate = rpi_firmware_clk_get_max_rate(firmware, + RPI_FIRMWARE_CORE_CLK_ID); + rpi_firmware_put(firmware); + if (max_rate >= 550000000) + hvs->vc5_hdmi_enable_hdmi_20 = true; + + if (max_rate >= 600000000) + hvs->vc5_hdmi_enable_4096by2160 = true; + + hvs->max_core_rate = max_rate; + + ret = clk_prepare_enable(hvs->core_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the core clock\n"); + return ret; + } } + if (vc4->gen == VC4_GEN_4) + hvs->dlist = hvs->regs + SCALER_DLIST_START; + else + hvs->dlist = hvs->regs + SCALER5_DLIST_START; + + ret = vc4_hvs_hw_init(hvs); + if (ret) + return ret; + + /* Upload filter kernels. We only have the one for now, so we + * keep it around for the lifetime of the driver. + */ + ret = vc4_hvs_upload_linear_kernel(hvs, + &hvs->mitchell_netravali_filter, + mitchell_netravali_1_3_1_3_kernel); + if (ret) + return ret; + + ret = vc4_hvs_cob_init(hvs); + if (ret) + return ret; + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), vc4_hvs_irq_handler, 0, "vc4 hvs", drm); if (ret) diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index ef93d8e22a35..69b399f3b802 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -263,7 +263,7 @@ vc4_irq_enable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (!vc4->v3d) @@ -280,7 +280,7 @@ vc4_irq_disable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (!vc4->v3d) @@ -303,7 +303,7 @@ int vc4_irq_install(struct drm_device *dev, int irq) struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (irq == IRQ_NOTCONNECTED) @@ -324,7 +324,7 @@ void vc4_irq_uninstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; vc4_irq_disable(dev); @@ -337,7 +337,7 @@ void vc4_irq_reset(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* Acknowledge any stale IRQs. */ diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 5495f2a94fa9..58bbb9efc2df 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -369,7 +369,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) old_hvs_state->fifo_state[channel].pending_commit = NULL; } - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { unsigned long state_rate = max(old_hvs_state->core_clock_rate, new_hvs_state->core_clock_rate); unsigned long core_rate = clamp_t(unsigned long, state_rate, @@ -388,7 +388,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) vc5_hvs_pv_muxing_commit(vc4, state); else vc4_hvs_pv_muxing_commit(vc4, state); @@ -406,7 +406,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { unsigned long core_rate = min_t(unsigned long, hvs->max_core_rate, new_hvs_state->core_clock_rate); @@ -461,7 +461,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_mode_fb_cmd2 mode_cmd_local; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); /* If the user didn't specify a modifier, use the @@ -1040,7 +1040,7 @@ int vc4_kms_load(struct drm_device *dev) * the BCM2711, but the load tracker computations are used for * the core clock rate calculation. */ - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ @@ -1056,7 +1056,7 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { dev->mode_config.max_width = 7680; dev->mode_config.max_height = 7680; } else { @@ -1064,7 +1064,7 @@ int vc4_kms_load(struct drm_device *dev) dev->mode_config.max_height = 2048; } - dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; + dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs; dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c00a5cc2316d..f1342f917cf7 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon) return; vc4 = perfmon->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; refcount_inc(&perfmon->refcnt); @@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon *perfmon) return; vc4 = perfmon->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (refcount_dec_and_test(&perfmon->refcnt)) @@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) unsigned int i; u32 mask; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) @@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, { unsigned int i; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (WARN_ON_ONCE(!vc4->active_perfmon || @@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) struct vc4_dev *vc4 = vc4file->dev; struct vc4_perfmon *perfmon; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; mutex_lock(&vc4file->perfmon.lock); @@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_init(&vc4file->perfmon.lock); @@ -131,7 +131,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4file->perfmon.lock); @@ -151,7 +151,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, unsigned int i; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -205,7 +205,7 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vc4_perfmon_destroy *req = data; struct vc4_perfmon *perfmon; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -233,7 +233,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct vc4_perfmon *perfmon; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -241,11 +241,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, return -ENODEV; } - mutex_lock(&vc4file->perfmon.lock); - perfmon = idr_find(&vc4file->perfmon.idr, req->id); - vc4_perfmon_get(perfmon); - mutex_unlock(&vc4file->perfmon.lock); - + perfmon = vc4_perfmon_find(vc4file, req->id); if (!perfmon) return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 07caf2a47c6c..ba6e86d62a77 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -110,6 +110,18 @@ static const struct hvs_format { .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB, }, { + .drm = DRM_FORMAT_YUV444, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .pixel_order = HVS_PIXEL_ORDER_XYCBCR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR, + }, + { + .drm = DRM_FORMAT_YVU444, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .pixel_order = HVS_PIXEL_ORDER_XYCRCB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB, + }, + { .drm = DRM_FORMAT_YUV420, .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, .pixel_order = HVS_PIXEL_ORDER_XYCBCR, @@ -251,9 +263,9 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) { - if (dst == src) + if (dst == src >> 16) return VC4_SCALING_NONE; - if (3 * dst >= 2 * src) + if (3 * dst >= 2 * (src >> 16)) return VC4_SCALING_PPF; else return VC4_SCALING_TPZ; @@ -438,12 +450,11 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct drm_framebuffer *fb = state->fb; - struct drm_gem_dma_object *bo; int num_planes = fb->format->num_planes; struct drm_crtc_state *crtc_state; u32 h_subsample = fb->format->hsub; u32 v_subsample = fb->format->vsub; - int i, ret; + int ret; crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); @@ -457,20 +468,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) if (ret) return ret; - for (i = 0; i < num_planes; i++) { - bo = drm_fb_dma_get_gem_obj(fb, i); - vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i]; - } - - /* - * We don't support subpixel source positioning for scaling, - * but fractional coordinates can be generated by clipping - * so just round for now - */ - vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16); - vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16); - vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x; - vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y; + vc4_state->src_x = state->src.x1; + vc4_state->src_y = state->src.y1; + vc4_state->src_w[0] = state->src.x2 - vc4_state->src_x; + vc4_state->src_h[0] = state->src.y2 - vc4_state->src_y; vc4_state->crtc_x = state->dst.x1; vc4_state->crtc_y = state->dst.y1; @@ -510,6 +511,12 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) */ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE) vc4_state->x_scaling[1] = VC4_SCALING_PPF; + + /* Similarly UV needs vertical scaling to be enabled. + * Without this a 1:1 scaled YUV422 plane isn't rendered. + */ + if (vc4_state->y_scaling[1] == VC4_SCALING_NONE) + vc4_state->y_scaling[1] = VC4_SCALING_PPF; } else { vc4_state->is_yuv = false; vc4_state->x_scaling[1] = VC4_SCALING_NONE; @@ -523,7 +530,7 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) { u32 scale, recip; - scale = (1 << 16) * src / dst; + scale = src / dst; /* The specs note that while the reciprocal would be defined * as (1<<32)/scale, ~0 is close enough. @@ -537,14 +544,61 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); } -static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) +/* phase magnitude bits */ +#define PHASE_BITS 6 + +static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst, + u32 xy, int channel) { - u32 scale = (1 << 16) * src / dst; + u32 scale = src / dst; + s32 offset, offset2; + s32 phase; + + /* + * Start the phase at 1/2 pixel from the 1st pixel at src_x. + * 1/4 pixel for YUV. + */ + if (channel) { + /* + * The phase is relative to scale_src->x, so shift it for + * display list's x value + */ + offset = (xy & 0x1ffff) >> (16 - PHASE_BITS) >> 1; + offset += -(1 << PHASE_BITS >> 2); + } else { + /* + * The phase is relative to scale_src->x, so shift it for + * display list's x value + */ + offset = (xy & 0xffff) >> (16 - PHASE_BITS); + offset += -(1 << PHASE_BITS >> 1); + + /* + * This is a kludge to make sure the scaling factors are + * consistent with YUV's luma scaling. We lose 1-bit precision + * because of this. + */ + scale &= ~1; + } + + /* + * There may be a also small error introduced by precision of scale. + * Add half of that as a compromise + */ + offset2 = src - dst * scale; + offset2 >>= 16 - PHASE_BITS; + phase = offset + (offset2 >> 1); + + /* Ensure +ve values don't touch the sign bit, then truncate negative values */ + if (phase >= 1 << PHASE_BITS) + phase = (1 << PHASE_BITS) - 1; + + phase &= SCALER_PPF_IPHASE_MASK; vc4_dlist_write(vc4_state, SCALER_PPF_AGC | VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | - VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); + VC4_SET_FIELD(phase, SCALER_PPF_IPHASE)); } static u32 vc4_lbm_size(struct drm_plane_state *state) @@ -569,7 +623,7 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ) pix_per_line = vc4_state->crtc_w; else - pix_per_line = vc4_state->src_w[0]; + pix_per_line = vc4_state->src_w[0] >> 16; if (!vc4_state->is_yuv) { if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) @@ -587,10 +641,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) } /* Align it to 64 or 128 (hvs5) bytes */ - lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64); + lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64); /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ - lbm /= vc4->is_vc5 ? 4 : 2; + lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2; return lbm; } @@ -602,27 +656,27 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state, /* Ch0 H-PPF Word 0: Scaling Parameters */ if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { - vc4_write_ppf(vc4_state, - vc4_state->src_w[channel], vc4_state->crtc_w); + vc4_write_ppf(vc4_state, vc4_state->src_w[channel], + vc4_state->crtc_w, vc4_state->src_x, channel); } /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { - vc4_write_ppf(vc4_state, - vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_write_ppf(vc4_state, vc4_state->src_h[channel], + vc4_state->crtc_h, vc4_state->src_y, channel); vc4_dlist_write(vc4_state, 0xc0c0c0c0); } /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { - vc4_write_tpz(vc4_state, - vc4_state->src_w[channel], vc4_state->crtc_w); + vc4_write_tpz(vc4_state, vc4_state->src_w[channel], + vc4_state->crtc_w); } /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { - vc4_write_tpz(vc4_state, - vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_write_tpz(vc4_state, vc4_state->src_h[channel], + vc4_state->crtc_h); vc4_dlist_write(vc4_state, 0xc0c0c0c0); } } @@ -660,7 +714,8 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) for (i = 0; i < fb->format->num_planes; i++) { /* Even if the bandwidth/plane required for a single frame is * - * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh + * (vc4_state->src_w[i] >> 16) * (vc4_state->src_h[i] >> 16) * + * cpp * vrefresh * * when downscaling, we have to read more pixels per line in * the time frame reserved for a single line, so the bandwidth @@ -669,11 +724,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) * load by this number. We're likely over-estimating the read * demand, but that's better than under-estimating it. */ - vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i], + vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i] >> 16, vc4_state->crtc_h); - vc4_state->membus_load += vc4_state->src_w[i] * - vc4_state->src_h[i] * vscale_factor * - fb->format->cpp[i]; + vc4_state->membus_load += (vc4_state->src_w[i] >> 16) * + (vc4_state->src_h[i] >> 16) * + vscale_factor * fb->format->cpp[i]; vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w; } @@ -684,7 +739,9 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) static int vc4_plane_allocate_lbm(struct drm_plane_state *state) { - struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); + struct drm_device *drm = state->plane->dev; + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct drm_plane *plane = state->plane; struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); unsigned long irqflags; u32 lbm_size; @@ -693,6 +750,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) if (!lbm_size) return 0; + if (vc4->gen == VC4_GEN_5) + lbm_size = ALIGN(lbm_size, 64); + else if (vc4->gen == VC4_GEN_4) + lbm_size = ALIGN(lbm_size, 32); + + drm_dbg_driver(drm, "[PLANE:%d:%s] LBM Allocation Size: %u\n", + plane->base.id, plane->name, lbm_size); + if (WARN_ON(!vc4_state->lbm_offset)) return -EINVAL; @@ -705,13 +770,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, - lbm_size, - vc4->is_vc5 ? 64 : 32, + lbm_size, 1, 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); - if (ret) + if (ret) { + drm_err(drm, "Failed to allocate LBM entry: %d\n", ret); return ret; + } } else { WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); } @@ -826,9 +892,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane, bool mix_plane_alpha; bool covers_screen; u32 scl0, scl1, pitch0; - u32 tiling, src_y; + u32 tiling, src_x, src_y; + u32 width, height; u32 hvs_format = format->hvs; unsigned int rotation; + u32 offsets[3] = { 0 }; int ret, i; if (vc4_state->dlist_initialized) @@ -838,6 +906,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, if (ret) return ret; + width = vc4_state->src_w[0] >> 16; + height = vc4_state->src_h[0] >> 16; + /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB * and 4:4:4, scl1 should be set to scl0 so both channels of * the scaler do the same thing. For YUV, the Y plane needs @@ -858,9 +929,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane, DRM_MODE_REFLECT_Y); /* We must point to the last line when Y reflection is enabled. */ - src_y = vc4_state->src_y; + src_y = vc4_state->src_y >> 16; if (rotation & DRM_MODE_REFLECT_Y) - src_y += vc4_state->src_h[0] - 1; + src_y += height - 1; + + src_x = vc4_state->src_x >> 16; switch (base_format_mod) { case DRM_FORMAT_MOD_LINEAR: @@ -871,13 +944,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * out. */ for (i = 0; i < num_planes; i++) { - vc4_state->offsets[i] += src_y / - (i ? v_subsample : 1) * - fb->pitches[i]; - - vc4_state->offsets[i] += vc4_state->src_x / - (i ? h_subsample : 1) * - fb->format->cpp[i]; + offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i]; + offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i]; } break; @@ -898,7 +966,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * pitch * tile_h == tile_size * tiles_per_row */ u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); - u32 tiles_l = vc4_state->src_x >> tile_w_shift; + u32 tiles_l = src_x >> tile_w_shift; u32 tiles_r = tiles_w - tiles_l; u32 tiles_t = src_y >> tile_h_shift; /* Intra-tile offsets, which modify the base address (the @@ -908,7 +976,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, u32 tile_y = (src_y >> 4) & 1; u32 subtile_y = (src_y >> 2) & 3; u32 utile_y = src_y & 3; - u32 x_off = vc4_state->src_x & tile_w_mask; + u32 x_off = src_x & tile_w_mask; u32 y_off = src_y & tile_h_mask; /* When Y reflection is requested we must set the @@ -932,19 +1000,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane, VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); - vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift); - vc4_state->offsets[0] += subtile_y << 8; - vc4_state->offsets[0] += utile_y << 4; + offsets[0] += tiles_t * (tiles_w << tile_size_shift); + offsets[0] += subtile_y << 8; + offsets[0] += utile_y << 4; /* Rows of tiles alternate left-to-right and right-to-left. */ if (tiles_t & 1) { pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR; - vc4_state->offsets[0] += (tiles_w - tiles_l) << - tile_size_shift; - vc4_state->offsets[0] -= (1 + !tile_y) << 10; + offsets[0] += (tiles_w - tiles_l) << tile_size_shift; + offsets[0] -= (1 + !tile_y) << 10; } else { - vc4_state->offsets[0] += tiles_l << tile_size_shift; - vc4_state->offsets[0] += tile_y << 10; + offsets[0] += tiles_l << tile_size_shift; + offsets[0] += tile_y << 10; } break; @@ -1004,7 +1071,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * of the 12-pixels in that 128-bit word is the * first pixel to be used */ - u32 remaining_pixels = vc4_state->src_x % 96; + u32 remaining_pixels = src_x % 96; u32 aligned = remaining_pixels / 12; u32 last_bits = remaining_pixels % 12; @@ -1026,18 +1093,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } pix_per_tile = tile_w / fb->format->cpp[0]; - x_off = (vc4_state->src_x % pix_per_tile) / + x_off = (src_x % pix_per_tile) / (i ? h_subsample : 1) * fb->format->cpp[i]; } - tile = vc4_state->src_x / pix_per_tile; + tile = src_x / pix_per_tile; - vc4_state->offsets[i] += param * tile_w * tile; - vc4_state->offsets[i] += src_y / - (i ? v_subsample : 1) * - tile_w; - vc4_state->offsets[i] += x_off & ~(i ? 1 : 0); + offsets[i] += param * tile_w * tile; + offsets[i] += src_y / (i ? v_subsample : 1) * tile_w; + offsets[i] += x_off & ~(i ? 1 : 0); } pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); @@ -1050,6 +1115,30 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } + /* fetch an extra pixel if we don't actually line up with the left edge. */ + if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16)) + width++; + + /* same for the right side */ + if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) && + vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16)) + width++; + + /* now for the top */ + if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16)) + height++; + + /* and the bottom */ + if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) && + vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16)) + height++; + + /* For YUV444 the hardware wants double the width, otherwise it doesn't + * fetch full width of chroma + */ + if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444) + width <<= 1; + /* Don't waste cycles mixing with plane alpha if the set alpha * is opaque or there is no per-pixel alpha information. * In any case we use the alpha property value as the fixed alpha. @@ -1057,7 +1146,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { /* Control word */ vc4_dlist_write(vc4_state, SCALER_CTL0_VALID | @@ -1092,10 +1181,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, vc4_dlist_write(vc4_state, (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | vc4_hvs4_get_alpha_blend_mode(state) | - VC4_SET_FIELD(vc4_state->src_w[0], - SCALER_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], - SCALER_POS2_HEIGHT)); + VC4_SET_FIELD(width, SCALER_POS2_WIDTH) | + VC4_SET_FIELD(height, SCALER_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); @@ -1148,10 +1235,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Position Word 2: Source Image Size */ vc4_state->pos2_offset = vc4_state->dlist_count; vc4_dlist_write(vc4_state, - VC4_SET_FIELD(vc4_state->src_w[0], - SCALER5_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], - SCALER5_POS2_HEIGHT)); + VC4_SET_FIELD(width, SCALER5_POS2_WIDTH) | + VC4_SET_FIELD(height, SCALER5_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); @@ -1162,9 +1247,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * * The pointers may be any byte address. */ - vc4_state->ptr0_offset = vc4_state->dlist_count; - for (i = 0; i < num_planes; i++) - vc4_dlist_write(vc4_state, vc4_state->offsets[i]); + vc4_state->ptr0_offset[0] = vc4_state->dlist_count; + + for (i = 0; i < num_planes; i++) { + struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i); + + vc4_dlist_write(vc4_state, bo->dma_addr + fb->offsets[i] + offsets[i]); + } /* Pointer Context Word 0/1/2: Written by the HVS */ for (i = 0; i < num_planes; i++) @@ -1298,7 +1387,11 @@ static int vc4_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - return vc4_plane_allocate_lbm(new_plane_state); + ret = vc4_plane_allocate_lbm(new_plane_state); + if (ret) + return ret; + + return 0; } static void vc4_plane_atomic_update(struct drm_plane *plane, @@ -1362,13 +1455,13 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) * scanout will start from this address as soon as the FIFO * needs to refill with pixels. */ - writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]); /* Also update the CPU-side dlist copy, so that any later * atomic updates that don't do a new modeset on our plane * also use our updated address. */ - vc4_state->dlist[vc4_state->ptr0_offset] = addr; + vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr; drm_dev_exit(idx); } @@ -1423,8 +1516,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, sizeof(vc4_state->y_scaling)); vc4_state->is_unity = new_vc4_state->is_unity; vc4_state->is_yuv = new_vc4_state->is_yuv; - memcpy(vc4_state->offsets, new_vc4_state->offsets, - sizeof(vc4_state->offsets)); vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill; /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ @@ -1432,8 +1523,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, new_vc4_state->dlist[vc4_state->pos0_offset]; vc4_state->dlist[vc4_state->pos2_offset] = new_vc4_state->dlist[vc4_state->pos2_offset]; - vc4_state->dlist[vc4_state->ptr0_offset] = - new_vc4_state->dlist[vc4_state->ptr0_offset]; + vc4_state->dlist[vc4_state->ptr0_offset[0]] = + new_vc4_state->dlist[vc4_state->ptr0_offset[0]]; /* Note that we can't just call vc4_plane_write_dlist() * because that would smash the context data that the HVS is @@ -1443,8 +1534,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, &vc4_state->hw_dlist[vc4_state->pos0_offset]); writel(vc4_state->dlist[vc4_state->pos2_offset], &vc4_state->hw_dlist[vc4_state->pos2_offset]); - writel(vc4_state->dlist[vc4_state->ptr0_offset], - &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + writel(vc4_state->dlist[vc4_state->ptr0_offset[0]], + &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]); drm_dev_exit(idx); } @@ -1471,7 +1562,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || - old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset || + old_vc4_state->ptr0_offset[0] != new_vc4_state->ptr0_offset[0] || vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state)) return -EINVAL; @@ -1481,7 +1572,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, for (i = 0; i < new_vc4_state->dlist_count; i++) { if (i == new_vc4_state->pos0_offset || i == new_vc4_state->pos2_offset || - i == new_vc4_state->ptr0_offset || + i == new_vc4_state->ptr0_offset[0] || (new_vc4_state->lbm_offset && i == new_vc4_state->lbm_offset)) continue; @@ -1632,7 +1723,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, }; for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { - if (!hvs_formats[i].hvs5_only || vc4->is_vc5) { + if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) { formats[num_formats] = hvs_formats[i].drm; num_formats++; } @@ -1647,7 +1738,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, return ERR_CAST(vc4_plane); plane = &vc4_plane->base; - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) drm_plane_helper_add(plane, &vc5_plane_helper_funcs); else drm_plane_helper_add(plane, &vc4_plane_helper_funcs); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 8ac9515554f8..c55dec383929 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -777,6 +777,7 @@ enum { # define VC4_HD_VID_CTL_CLRSYNC BIT(24) # define VC4_HD_VID_CTL_CLRRGB BIT(23) # define VC4_HD_VID_CTL_BLANKPIX BIT(18) +# define VC4_HD_VID_CTL_BLANK_INSERT_EN BIT(16) # define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5) # define VC4_HD_CSC_CTL_ORDER_SHIFT 5 diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 1bda5010f15a..14079853338e 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) bool has_bin = args->bin_cl_size != 0; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->min_x_tile > args->max_x_tile || diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index bf5c4e36c94e..2423826c89eb 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) int vc4_v3d_pm_get(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; mutex_lock(&vc4->power_lock); @@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4) void vc4_v3d_pm_put(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->power_lock); @@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) uint64_t seqno = 0; struct vc4_exec_info *exec; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; try_again: @@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) { int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; mutex_lock(&vc4->bin_bo_lock); @@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *ref) void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->bin_bo_lock); diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 0c17284bf6f5..5bf134968ade 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) struct drm_gem_dma_object *obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; if (hindex >= exec->bo_count) { @@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo, uint32_t utile_w = utile_width(cpp); uint32_t utile_h = utile_height(cpp); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return false; /* The shaded vertex format stores signed 12.4 fixed point @@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *dev, uint32_t dst_offset = 0; uint32_t src_offset = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; while (src_offset < len) { @@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_device *dev, uint32_t i; int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; for (i = 0; i < exec->shader_state_count; i++) { diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 9745f8810eca..2d74e786914c 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_object *shader_obj) struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_shader_validation_state validation_state; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; memset(&validation_state, 0, sizeof(validation_state)); diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index ea06ff2aa4b4..fc884fb57b7e 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO_MENU && MMU select VIRTIO + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select VIRTIO_DMA_SHARED_BUFFER diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index e5a2665e50ea..ffca6e2e1c9a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -26,14 +26,15 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/wait.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> @@ -58,7 +59,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev) vga ? "virtio-vga" : "virtio-gpu-pci", pname); if (vga) { - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; } @@ -103,7 +104,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev) if (ret) goto err_deinit; - drm_fbdev_shmem_setup(vdev->priv, 32); + drm_client_setup(vdev->priv, NULL); + return 0; err_deinit: @@ -184,6 +186,8 @@ static const struct drm_driver driver = { .dumb_create = virtio_gpu_mode_dumb_create, .dumb_map_offset = virtio_gpu_mode_dumb_mmap, + DRM_FBDEV_SHMEM_DRIVER_OPS, + #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig index b9ecdebecb0b..9def079f685b 100644 --- a/drivers/gpu/drm/vkms/Kconfig +++ b/drivers/gpu/drm/vkms/Kconfig @@ -3,6 +3,7 @@ config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" depends on DRM && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select CRC32 diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index e7441b227b3c..3f0977d746be 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -187,8 +187,15 @@ static void blend(struct vkms_writeback_job *wb, const struct pixel_argb_u16 background_color = { .a = 0xffff }; - size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay; + size_t crtc_y_limit = crtc_state->base.mode.vdisplay; + /* + * The planes are composed line-by-line to avoid heavy memory usage. It is a necessary + * complexity to avoid poor blending performance. + * + * The function vkms_compose_row() is used to read a line, pixel-by-pixel, into the staging + * buffer. + */ for (size_t y = 0; y < crtc_y_limit; y++) { fill_background(&background_color, output_buffer); @@ -263,7 +270,7 @@ static int compose_active_planes(struct vkms_writeback_job *active_wb, if (WARN_ON(check_format_funcs(crtc_state, active_wb))) return -EINVAL; - line_width = crtc_state->base.crtc->mode.hdisplay; + line_width = crtc_state->base.mode.hdisplay; stage_buffer.n_pixels = line_width; output_buffer.n_pixels = line_width; diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 40b4d084e3ce..bbf080d32d2c 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -64,8 +64,6 @@ static int vkms_enable_vblank(struct drm_crtc *crtc) struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct vkms_output *out = drm_crtc_to_vkms_output(crtc); - drm_calc_timestamping_constants(crtc, &crtc->mode); - hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); out->vblank_hrtimer.function = &vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); @@ -232,6 +230,7 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) + __acquires(&vkms_output->lock) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); @@ -243,6 +242,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) + __releases(&vkms_output->lock) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); @@ -287,7 +287,12 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs); - drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE); + ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE); + if (ret) { + DRM_ERROR("Failed to set gamma size\n"); + return ret; + } + drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE); spin_lock_init(&vkms_out->lock); diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 0c1a713b7b7b..2d1e95cb66e5 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> @@ -112,6 +113,7 @@ static const struct drm_driver vkms_driver = { .release = vkms_release, .fops = &vkms_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -225,7 +227,7 @@ static int vkms_create(struct vkms_config *config) if (ret) goto out_devres; - drm_fbdev_shmem_setup(&vkms_device->drm, 0); + drm_client_setup(&vkms_device->drm, NULL); return 0; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 5e46ea5b96dc..672fe191e239 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -25,6 +25,17 @@ #define VKMS_LUT_SIZE 256 +/** + * struct vkms_frame_info - Structure to store the state of a frame + * + * @fb: backing drm framebuffer + * @src: source rectangle of this frame in the source framebuffer, stored in 16.16 fixed-point form + * @dst: destination rectangle in the crtc buffer, stored in whole pixel units + * @map: see @drm_shadow_plane_state.data + * @rotation: rotation applied to the source. + * + * @src and @dst should have the same size modulo the rotation. + */ struct vkms_frame_info { struct drm_framebuffer *fb; struct drm_rect src, dst; @@ -52,9 +63,11 @@ struct vkms_writeback_job { }; /** - * vkms_plane_state - Driver specific plane state + * struct vkms_plane_state - Driver specific plane state * @base: base plane state * @frame_info: data required for composing computation + * @pixel_read: function to read a pixel in this plane. The creator of a struct vkms_plane_state + * must ensure that this pointer is valid */ struct vkms_plane_state { struct drm_shadow_plane_state base; @@ -73,29 +86,56 @@ struct vkms_color_lut { }; /** - * vkms_crtc_state - Driver specific CRTC state + * struct vkms_crtc_state - Driver specific CRTC state + * * @base: base CRTC state * @composer_work: work struct to compose and add CRC entries - * @n_frame_start: start frame number for computed CRC - * @n_frame_end: end frame number for computed CRC + * + * @num_active_planes: Number of active planes + * @active_planes: List containing all the active planes (counted by + * @num_active_planes). They should be stored in z-order. + * @active_writeback: Current active writeback job + * @gamma_lut: Look up table for gamma used in this CRTC + * @crc_pending: Protected by @vkms_output.composer_lock, true when the frame CRC is not computed + * yet. Used by vblank to detect if the composer is too slow. + * @wb_pending: Protected by @vkms_output.composer_lock, true when a writeback frame is requested. + * @frame_start: Protected by @vkms_output.composer_lock, saves the frame number before the start + * of the composition process. + * @frame_end: Protected by @vkms_output.composer_lock, saves the last requested frame number. + * This is used to generate enough CRC entries when the composition worker is too slow. */ struct vkms_crtc_state { struct drm_crtc_state base; struct work_struct composer_work; int num_active_planes; - /* stack of active planes for crc computation, should be in z order */ struct vkms_plane_state **active_planes; struct vkms_writeback_job *active_writeback; struct vkms_color_lut gamma_lut; - /* below four are protected by vkms_output.composer_lock */ bool crc_pending; bool wb_pending; u64 frame_start; u64 frame_end; }; +/** + * struct vkms_output - Internal representation of all output components in VKMS + * + * @crtc: Base CRTC in DRM + * @encoder: DRM encoder used for this output + * @connector: DRM connector used for this output + * @wb_connecter: DRM writeback connector used for this output + * @vblank_hrtimer: Timer used to trigger the vblank + * @period_ns: vblank period, in nanoseconds, used to configure @vblank_hrtimer and to compute + * vblank timestamps + * @composer_workq: Ordered workqueue for @composer_state.composer_work. + * @lock: Lock used to protect concurrent access to the composer + * @composer_enabled: Protected by @lock, true when the VKMS composer is active (crc needed or + * writeback) + * @composer_state: Protected by @lock, current state of this VKMS output + * @composer_lock: Lock used internally to protect @composer_state members + */ struct vkms_output { struct drm_crtc crtc; struct drm_encoder encoder; @@ -103,28 +143,38 @@ struct vkms_output { struct drm_writeback_connector wb_connector; struct hrtimer vblank_hrtimer; ktime_t period_ns; - /* ordered wq for composer_work */ struct workqueue_struct *composer_workq; - /* protects concurrent access to composer */ spinlock_t lock; - /* protected by @lock */ bool composer_enabled; struct vkms_crtc_state *composer_state; spinlock_t composer_lock; }; -struct vkms_device; - +/** + * struct vkms_config - General configuration for VKMS driver + * + * @writeback: If true, a writeback buffer can be attached to the CRTC + * @cursor: If true, a cursor plane is created in the VKMS device + * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device + * @dev: Used to store the current VKMS device. Only set when the device is instantiated. + */ struct vkms_config { bool writeback; bool cursor; bool overlay; - /* only set when instantiated */ struct vkms_device *dev; }; +/** + * struct vkms_device - Description of a VKMS device + * + * @drm - Base device in DRM + * @platform - Associated platform device + * @output - Configuration and sub-components of the VKMS device + * @config: Configuration used in this VKMS device + */ struct vkms_device { struct drm_device drm; struct platform_device *platform; @@ -132,6 +182,10 @@ struct vkms_device { const struct vkms_config *config; }; +/* + * The following helpers are used to convert a member of a struct into its parent. + */ + #define drm_crtc_to_vkms_output(target) \ container_of(target, struct vkms_output, crtc) @@ -144,12 +198,33 @@ struct vkms_device { #define to_vkms_plane_state(target)\ container_of(target, struct vkms_plane_state, base.base) -/* CRTC */ +/** + * vkms_crtc_init() - Initialize a CRTC for VKMS + * @dev: DRM device associated with the VKMS buffer + * @crtc: uninitialized CRTC device + * @primary: primary plane to attach to the CRTC + * @cursor: plane to attach to the CRTC + */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); +/** + * vkms_output_init() - Initialize all sub-components needed for a VKMS device. + * + * @vkmsdev: VKMS device to initialize + * @index: CRTC which can be attached to the planes. The caller must ensure that + * @index is positive and less or equals to 31. + */ int vkms_output_init(struct vkms_device *vkmsdev, int index); +/** + * vkms_plane_init() - Initialize a plane + * + * @vkmsdev: VKMS device containing the plane + * @type: type of plane to initialize + * @index: CRTC which can be attached to the plane. The caller must ensure that + * @index is positive and less or equals to 31. + */ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, enum drm_plane_type type, int index); diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c index 040b7f113a3b..e8a5cc235ebb 100644 --- a/drivers/gpu/drm/vkms/vkms_formats.c +++ b/drivers/gpu/drm/vkms/vkms_formats.c @@ -9,24 +9,40 @@ #include "vkms_formats.h" +/** + * pixel_offset() - Get the offset of the pixel at coordinates x/y in the first plane + * + * @frame_info: Buffer metadata + * @x: The x coordinate of the wanted pixel in the buffer + * @y: The y coordinate of the wanted pixel in the buffer + * + * The caller must ensure that the framebuffer associated with this request uses a pixel format + * where block_h == block_w == 1. + * If this requirement is not fulfilled, the resulting offset can point to an other pixel or + * outside of the buffer. + */ static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y) { return frame_info->offset + (y * frame_info->pitch) + (x * frame_info->cpp); } -/* - * packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates +/** + * packed_pixels_addr() - Get the pointer to the block containing the pixel at the given + * coordinates * * @frame_info: Buffer metadata - * @x: The x(width) coordinate of the 2D buffer - * @y: The y(Heigth) coordinate of the 2D buffer + * @x: The x (width) coordinate inside the plane + * @y: The y (height) coordinate inside the plane * * Takes the information stored in the frame_info, a pair of coordinates, and * returns the address of the first color channel. * This function assumes the channels are packed together, i.e. a color channel * comes immediately after another in the memory. And therefore, this function * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21). + * + * The caller must ensure that the framebuffer associated with this request uses a pixel format + * where block_h == block_w == 1, otherwise the returned pointer can be outside the buffer. */ static void *packed_pixels_addr(const struct vkms_frame_info *frame_info, int x, int y) @@ -51,6 +67,13 @@ static int get_x_position(const struct vkms_frame_info *frame_info, int limit, i return x; } +/* + * The following functions take pixel data from the buffer and convert them to the format + * ARGB16161616 in @out_pixel. + * + * They are used in the vkms_compose_row() function to handle multiple formats. + */ + static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel) { /* @@ -143,12 +166,11 @@ void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state } /* - * The following functions take an line of argb_u16 pixels from the - * src_buffer, convert them to a specific format, and store them in the - * destination. + * The following functions take one &struct pixel_argb_u16 and convert it to a specific format. + * The result is stored in @dst_pixels. * - * They are used in the `compose_active_planes` to convert and store a line - * from the src_buffer to the writeback buffer. + * They are used in vkms_writeback_row() to convert and store a pixel from the src_buffer to + * the writeback buffer. */ static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel) { @@ -214,6 +236,14 @@ static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel) *pixels = cpu_to_le16(r << 11 | g << 5 | b); } +/** + * vkms_writeback_row() - Generic loop for all supported writeback format. It is executed just + * after the blending to write a line in the writeback buffer. + * + * @wb: Job where to insert the final image + * @src_buffer: Line to write + * @y: Row to write in the writeback buffer + */ void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y) { @@ -227,6 +257,13 @@ void vkms_writeback_row(struct vkms_writeback_job *wb, wb->pixel_write(dst_pixels, &in_pixels[x]); } +/** + * get_pixel_conversion_function() - Retrieve the correct read_pixel function for a specific + * format. The returned pointer is NULL for unsupported pixel formats. The caller must ensure that + * the pointer is valid before using it in a vkms_plane_state. + * + * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h]) + */ void *get_pixel_conversion_function(u32 format) { switch (format) { @@ -245,6 +282,13 @@ void *get_pixel_conversion_function(u32 format) } } +/** + * get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format. + * The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the + * pointer is valid before using it in a vkms_writeback_job. + * + * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h]) + */ void *get_pixel_write_function(u32 format) { switch (format) { diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 5ce70dd946aa..25a99fde126c 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -21,6 +21,7 @@ static int vkms_conn_get_modes(struct drm_connector *connector) { int count; + /* Use the default modes list from DRM */ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); @@ -58,6 +59,12 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) int writeback; unsigned int n; + /* + * Initialize used plane. One primary plane is required to perform the composition. + * + * The overlay and cursor planes are not mandatory, but can be used to perform complex + * composition. + */ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index); if (IS_ERR(primary)) return PTR_ERR(primary); @@ -76,6 +83,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) return PTR_ERR(cursor); } + /* [1]: Allocation of a CRTC, its index will be BIT(0) = 1 */ ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base); if (ret) return ret; @@ -84,7 +92,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to init connector\n"); - goto err_connector; + return ret; } drm_connector_helper_add(connector, &vkms_conn_helper_funcs); @@ -95,7 +103,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) DRM_ERROR("Failed to init encoder\n"); goto err_encoder; } - encoder->possible_crtcs = 1; + /* + * This is a hardcoded value to select crtc for the encoder. + * BIT(0) here designate the first registered CRTC, the one allocated in [1] + */ + encoder->possible_crtcs = BIT(0); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { @@ -119,8 +131,5 @@ err_attach: err_encoder: drm_connector_cleanup(connector); -err_connector: - drm_crtc_cleanup(crtc); - return ret; } diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index bc724cbd5e3a..999d5c01ea81 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -131,8 +131,8 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn, struct drm_connector_state *conn_state = wb_conn->base.state; struct vkms_crtc_state *crtc_state = output->composer_state; struct drm_framebuffer *fb = connector_state->writeback_job->fb; - u16 crtc_height = crtc_state->base.crtc->mode.vdisplay; - u16 crtc_width = crtc_state->base.crtc->mode.hdisplay; + u16 crtc_height = crtc_state->base.mode.vdisplay; + u16 crtc_width = crtc_state->base.mode.hdisplay; struct vkms_writeback_job *active_wb; struct vkms_frame_info *wb_frame_info; u32 wb_format = fb->format->format; diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 6f1ac940cbae..6c3c2922ae8b 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -3,6 +3,7 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" depends on DRM && PCI && MMU depends on (X86 && HYPERVISOR_GUEST) || ARM64 + select DRM_CLIENT_SELECTION select DRM_TTM select DRM_TTM_HELPER select MAPPING_DIRTY_HELPERS diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 2825dd3149ed..2c46897876dd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -35,7 +35,7 @@ #include "vmwgfx_vkms.h" #include "ttm_object.h" -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_ttm_helper.h> @@ -49,6 +49,8 @@ #ifdef CONFIG_X86 #include <asm/hypervisor.h> #endif + +#include <linux/aperture.h> #include <linux/cc_platform.h> #include <linux/dma-mapping.h> #include <linux/module.h> @@ -859,8 +861,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) bool refuse_dma = false; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - dev_priv->drm.dev_private = dev_priv; - vmw_sw_context_init(dev_priv); mutex_init(&dev_priv->cmdbuf_mutex); @@ -1629,6 +1629,8 @@ static const struct drm_driver driver = { .prime_handle_to_fd = vmw_prime_handle_to_fd, .gem_prime_import_sg_table = vmw_prime_import_sg_table, + DRM_FBDEV_TTM_DRIVER_OPS, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, @@ -1653,7 +1655,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct vmw_private *vmw; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) goto out_error; @@ -1680,7 +1682,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vmw_fifo_resource_inc(vmw); vmw_svga_enable(vmw); - drm_fbdev_ttm_setup(&vmw->drm, 0); + drm_client_setup(&vmw->drm, NULL); vmw_debugfs_gem_init(vmw); vmw_debugfs_resource_managers_init(vmw); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4e2807f5f94c..b21831ef214a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -639,7 +639,7 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_private *vmw_priv(struct drm_device *dev) { - return (struct vmw_private *)dev->dev_private; + return container_of(dev, struct vmw_private, drm); } static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 10d596cb4b40..8db38927729b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -280,7 +280,7 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, struct vmw_plane_state *vps) { - struct vmw_private *dev_priv = vcp->base.dev->dev_private; + struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); u32 i; u32 cursor_max_dim, mob_max_size; @@ -519,7 +519,7 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane) struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); u32 i; - vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); + vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 116f58774135..b51a2bde73e2 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -8,12 +8,14 @@ config DRM_XE select SHMEM select TMPFS select DRM_BUDDY + select DRM_CLIENT_SELECTION select DRM_EXEC select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n select DRM_PANEL select DRM_SUBALLOC_HELPER select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug index bc177368af6c..2de0de41b8dd 100644 --- a/drivers/gpu/drm/xe/Kconfig.debug +++ b/drivers/gpu/drm/xe/Kconfig.debug @@ -40,9 +40,21 @@ config DRM_XE_DEBUG_VM If in doubt, say "N". +config DRM_XE_DEBUG_MEMIRQ + bool "Enable extra memirq debugging" + default n + help + Choose this option to enable additional debugging info for + memory based interrupts. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_XE_DEBUG_SRIOV bool "Enable extra SR-IOV debugging" default n + select DRM_XE_DEBUG_MEMIRQ help Enable extra SR-IOV debugging info. diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index edfd812e0f41..bc7a04ce69fd 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -56,6 +56,7 @@ xe-y += xe_bb.o \ xe_gt_topology.o \ xe_guc.o \ xe_guc_ads.o \ + xe_guc_capture.o \ xe_guc_ct.o \ xe_guc_db_mgr.o \ xe_guc_hwconfig.o \ @@ -129,6 +130,7 @@ xe-$(CONFIG_PCI_IOV) += \ xe_gt_sriov_pf.o \ xe_gt_sriov_pf_config.o \ xe_gt_sriov_pf_control.o \ + xe_gt_sriov_pf_migration.o \ xe_gt_sriov_pf_monitor.o \ xe_gt_sriov_pf_policy.o \ xe_gt_sriov_pf_service.o \ @@ -148,7 +150,6 @@ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \ -I$(src)/display/ext \ -I$(src)/compat-i915-headers \ -I$(srctree)/drivers/gpu/drm/i915/display/ \ - -Ddrm_i915_gem_object=xe_bo \ -Ddrm_i915_private=xe_device # Rule to build SOC code shared with i915 @@ -165,6 +166,7 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/ext/i915_irq.o \ display/ext/i915_utils.o \ + display/intel_bo.o \ display/intel_fb_bo.o \ display/intel_fbdev_fb.o \ display/xe_display.o \ @@ -180,7 +182,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ # SOC code shared with i915 xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-soc/intel_dram.o \ - i915-soc/intel_pch.o + i915-soc/intel_pch.o \ + i915-soc/intel_rom.o # Display code shared with i915 xe-$(CONFIG_DRM_XE_DISPLAY) += \ @@ -220,6 +223,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_dp_hdcp.o \ i915-display/intel_dp_link_training.o \ i915-display/intel_dp_mst.o \ + i915-display/intel_dp_test.o \ i915-display/intel_dpll.o \ i915-display/intel_dpll_mgr.o \ i915-display/intel_dpt_common.o \ @@ -248,6 +252,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_modeset_setup.o \ i915-display/intel_modeset_verify.o \ i915-display/intel_panel.o \ + i915-display/intel_pfit.o \ i915-display/intel_pmdemand.o \ i915-display/intel_pps.o \ i915-display/intel_psr.o \ diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index 43ad4652c2b2..b54fe40fc5a9 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -176,6 +176,14 @@ enum xe_guc_sleep_state_status { #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) #define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) +enum xe_guc_state_capture_event_status { + XE_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0, + XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1, +}; + +#define XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF +#define XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN 1 + #define XE_GUC_TLB_INVAL_TYPE_SHIFT 0 #define XE_GUC_TLB_INVAL_MODE_SHIFT 8 /* Flush PPC or SMRO caches along with TLB invalidation request */ diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h index 181180f5945c..b6a1852749dd 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h @@ -557,4 +557,65 @@ #define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn #define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96 GUC_HXG_REQUEST_MSG_n_DATAn +/** + * DOC: PF2GUC_SAVE_RESTORE_VF + * + * This message is used by the PF to migrate VF info state maintained by the GuC. + * + * This message must be sent as `CTB HXG Message`_. + * + * Available since GuC version 70.25.0 + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = **OPCODE** - operation to take: | + * | | | | + * | | | - _`GUC_PF_OPCODE_VF_SAVE` = 0 | + * | | | - _`GUC_PF_OPCODE_VF_RESTORE` = 1 | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_SAVE_RESTORE_VF` = 0x550B | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **ADDR_LO** - lower 32-bits of GGTT offset to the buffer | + * | | | where the VF info will be save to or restored from. | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **ADDR_HI** - upper 32-bits of GGTT offset to the buffer | + * | | | where the VF info will be save to or restored from. | + * +---+-------+--------------------------------------------------------------+ + * | 4 | 27:0 | **SIZE** - size of the buffer (in dwords) | + * | +-------+--------------------------------------------------------------+ + * | | 31:28 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = **USED** - size of used buffer space (in dwords) | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_SAVE_RESTORE_VF 0x550Bu + +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 4u) +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC_PF_OPCODE_VF_SAVE 0u +#define GUC_PF_OPCODE_VF_RESTORE 1u +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE (0xfffffffu << 0) +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_MBZ (0xfu << 28) + +#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_0_USED GUC_HXG_RESPONSE_MSG_0_DATA0 + #endif diff --git a/drivers/gpu/drm/xe/abi/guc_capture_abi.h b/drivers/gpu/drm/xe/abi/guc_capture_abi.h new file mode 100644 index 000000000000..e7898edc6236 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_capture_abi.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _ABI_GUC_CAPTURE_ABI_H +#define _ABI_GUC_CAPTURE_ABI_H + +#include <linux/types.h> + +/* Capture List Index */ +enum guc_capture_list_index_type { + GUC_CAPTURE_LIST_INDEX_PF = 0, + GUC_CAPTURE_LIST_INDEX_VF = 1, +}; + +#define GUC_CAPTURE_LIST_INDEX_MAX (GUC_CAPTURE_LIST_INDEX_VF + 1) + +/* Register-types of GuC capture register lists */ +enum guc_state_capture_type { + GUC_STATE_CAPTURE_TYPE_GLOBAL = 0, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE +}; + +#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1) + +/* Class indecies for capture_class and capture_instance arrays */ +enum guc_capture_list_class_type { + GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0, + GUC_CAPTURE_LIST_CLASS_VIDEO = 1, + GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2, + GUC_CAPTURE_LIST_CLASS_BLITTER = 3, + GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4, +}; + +#define GUC_CAPTURE_LIST_CLASS_MAX (GUC_CAPTURE_LIST_CLASS_GSC_OTHER + 1) + +/** + * struct guc_mmio_reg - GuC MMIO reg state struct + * + * GuC MMIO reg state struct + */ +struct guc_mmio_reg { + /** @offset: MMIO Offset - filled in by Host */ + u32 offset; + /** @value: MMIO Value - Used by Firmware to store value */ + u32 value; + /** @flags: Flags for accessing the MMIO */ + u32 flags; + /** @mask: Value of a mask to apply if mask with value is set */ + u32 mask; +#define GUC_REGSET_MASKED BIT(0) +#define GUC_REGSET_STEERING_NEEDED BIT(1) +#define GUC_REGSET_MASKED_WITH_VALUE BIT(2) +#define GUC_REGSET_RESTORE_ONLY BIT(3) +#define GUC_REGSET_STEERING_GROUP GENMASK(16, 12) +#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20) +} __packed; + +/** + * struct guc_mmio_reg_set - GuC register sets + * + * GuC register sets + */ +struct guc_mmio_reg_set { + /** @address: register address */ + u32 address; + /** @count: register count */ + u16 count; + /** @reserved: reserved */ + u16 reserved; +} __packed; + +/** + * struct guc_debug_capture_list_header - Debug capture list header. + * + * Debug capture list header. + */ +struct guc_debug_capture_list_header { + /** @info: contains number of MMIO descriptors in the capture list. */ + u32 info; +#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0) +} __packed; + +/** + * struct guc_debug_capture_list - Debug capture list + * + * As part of ADS registration, these header structures (followed by + * an array of 'struct guc_mmio_reg' entries) are used to register with + * GuC microkernel the list of registers we want it to dump out prior + * to a engine reset. + */ +struct guc_debug_capture_list { + /** @header: Debug capture list header. */ + struct guc_debug_capture_list_header header; + /** @regs: MMIO descriptors in the capture list. */ + struct guc_mmio_reg regs[]; +} __packed; + +/** + * struct guc_state_capture_header_t - State capture header. + * + * Prior to resetting engines that have hung or faulted, GuC microkernel + * reports the engine error-state (register values that was read) by + * logging them into the shared GuC log buffer using these hierarchy + * of structures. + */ +struct guc_state_capture_header_t { + /** + * @owner: VFID + * BR[ 7: 0] MBZ when SRIOV is disabled. When SRIOV is enabled + * VFID is an integer in range [0, 63] where 0 means the state capture + * is corresponding to the PF and an integer N in range [1, 63] means + * the state capture is for VF N. + */ + u32 owner; +#define GUC_STATE_CAPTURE_HEADER_VFID GENMASK(7, 0) + /** @info: Engine class/instance and capture type info */ + u32 info; +#define GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE GENMASK(3, 0) /* see guc_state_capture_type */ +#define GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS GENMASK(7, 4) /* see guc_capture_list_class_type */ +#define GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE GENMASK(11, 8) + /** + * @lrca: logical ring context address. + * if type-instance, LRCA (address) that hung, else set to ~0 + */ + u32 lrca; + /** + * @guc_id: context_index. + * if type-instance, context index of hung context, else set to ~0 + */ + u32 guc_id; + /** @num_mmio_entries: Number of captured MMIO entries. */ + u32 num_mmio_entries; +#define GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES GENMASK(9, 0) +} __packed; + +/** + * struct guc_state_capture_t - State capture. + * + * State capture + */ +struct guc_state_capture_t { + /** @header: State capture header. */ + struct guc_state_capture_header_t header; + /** @mmio_entries: Array of captured guc_mmio_reg entries. */ + struct guc_mmio_reg mmio_entries[]; +} __packed; + +/* State Capture Group Type */ +enum guc_state_capture_group_type { + GUC_STATE_CAPTURE_GROUP_TYPE_FULL = 0, + GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL +}; + +#define GUC_STATE_CAPTURE_GROUP_TYPE_MAX (GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL + 1) + +/** + * struct guc_state_capture_group_header_t - State capture group header + * + * State capture group header. + */ +struct guc_state_capture_group_header_t { + /** @owner: VFID */ + u32 owner; +#define GUC_STATE_CAPTURE_GROUP_HEADER_VFID GENMASK(7, 0) + /** @info: Engine class/instance and capture type info */ + u32 info; +#define GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES GENMASK(7, 0) +#define GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE GENMASK(15, 8) +} __packed; + +/** + * struct guc_state_capture_group_t - State capture group. + * + * this is the top level structure where an error-capture dump starts + */ +struct guc_state_capture_group_t { + /** @grp_header: State capture group header. */ + struct guc_state_capture_group_header_t grp_header; + /** @capture_entries: Array of state captures */ + struct guc_state_capture_t capture_entries[]; +} __packed; + +#endif diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h index 8f86a16dc577..f58198cf2cf6 100644 --- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h @@ -52,6 +52,7 @@ struct guc_ct_buffer_desc { #define GUC_CTB_STATUS_OVERFLOW (1 << 0) #define GUC_CTB_STATUS_UNDERFLOW (1 << 1) #define GUC_CTB_STATUS_MISMATCH (1 << 2) +#define GUC_CTB_STATUS_DISABLED (1 << 3) u32 reserved[13]; } __packed; static_assert(sizeof(struct guc_ct_buffer_desc) == 64); diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index 6b30743a2f6c..37606cf8cc5e 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -352,6 +352,7 @@ enum xe_guc_klv_ids { GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007, GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008, GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009, + GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a, }; #endif diff --git a/drivers/gpu/drm/xe/abi/guc_log_abi.h b/drivers/gpu/drm/xe/abi/guc_log_abi.h new file mode 100644 index 000000000000..554630b7ccd9 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_log_abi.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _ABI_GUC_LOG_ABI_H +#define _ABI_GUC_LOG_ABI_H + +#include <linux/types.h> + +/* GuC logging buffer types */ +enum guc_log_buffer_type { + GUC_LOG_BUFFER_CRASH_DUMP, + GUC_LOG_BUFFER_DEBUG, + GUC_LOG_BUFFER_CAPTURE, +}; + +#define GUC_LOG_BUFFER_TYPE_MAX 3 + +/** + * struct guc_log_buffer_state - GuC log buffer state + * + * Below state structure is used for coordination of retrieval of GuC firmware + * logs. Separate state is maintained for each log buffer type. + * read_ptr points to the location where Xe read last in log buffer and + * is read only for GuC firmware. write_ptr is incremented by GuC with number + * of bytes written for each log entry and is read only for Xe. + * When any type of log buffer becomes half full, GuC sends a flush interrupt. + * GuC firmware expects that while it is writing to 2nd half of the buffer, + * first half would get consumed by Host and then get a flush completed + * acknowledgment from Host, so that it does not end up doing any overwrite + * causing loss of logs. So when buffer gets half filled & Xe has requested + * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr + * to the value of write_ptr and raise the interrupt. + * On receiving the interrupt Xe should read the buffer, clear flush_to_file + * field and also update read_ptr with the value of sample_write_ptr, before + * sending an acknowledgment to GuC. marker & version fields are for internal + * usage of GuC and opaque to Xe. buffer_full_cnt field is incremented every + * time GuC detects the log buffer overflow. + */ +struct guc_log_buffer_state { + /** @marker: buffer state start marker */ + u32 marker[2]; + /** @read_ptr: the last byte offset that was read by KMD previously */ + u32 read_ptr; + /** + * @write_ptr: the next byte offset location that will be written by + * GuC + */ + u32 write_ptr; + /** @size: Log buffer size */ + u32 size; + /** + * @sampled_write_ptr: Log buffer write pointer + * This is written by GuC to the byte offset of the next free entry in + * the buffer on log buffer half full or state capture notification + */ + u32 sampled_write_ptr; + /** + * @wrap_offset: wraparound offset + * This is the byte offset of location 1 byte after last valid guc log + * event entry written by Guc firmware before there was a wraparound. + * This field is updated by guc firmware and should be used by Host + * when copying buffer contents to file. + */ + u32 wrap_offset; + /** @flags: Flush to file flag and buffer full count */ + u32 flags; +#define GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE GENMASK(0, 0) +#define GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT GENMASK(4, 1) + /** @version: The Guc-Log-Entry format version */ + u32 version; +} __packed; + +#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h deleted file mode 100644 index 710cecca972d..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h +++ /dev/null @@ -1 +0,0 @@ -/* Empty */ diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h deleted file mode 100644 index 650ea2803a97..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _I915_GEM_MMAN_H_ -#define _I915_GEM_MMAN_H_ - -#include "xe_bo_types.h" -#include <drm/drm_prime.h> - -static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma) -{ - return drm_gem_prime_mmap(&bo->ttm.base, vma); -} - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h deleted file mode 100644 index 777c20ceabab..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _I915_GEM_OBJECT_H_ -#define _I915_GEM_OBJECT_H_ - -#include <linux/types.h> - -#include "xe_bo.h" - -#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */ - -static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n) -{ - /* Should never be called */ - WARN_ON(1); - return n; -} - -static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo) -{ - /* legacy tiling is unused */ - return false; -} - -static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo) -{ - /* legacy tiling is unused */ - return false; -} - -static inline int i915_gem_object_read_from_page(struct xe_bo *bo, - u32 ofs, u64 *ptr, u32 size) -{ - struct ttm_bo_kmap_obj map; - void *src; - bool is_iomem; - int ret; - - ret = xe_bo_lock(bo, true); - if (ret) - return ret; - - ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map); - if (ret) - goto out_unlock; - - ofs &= ~PAGE_MASK; - src = ttm_kmap_obj_virtual(&map, &is_iomem); - src += ofs; - if (is_iomem) - memcpy_fromio(ptr, (void __iomem *)src, size); - else - memcpy(ptr, src, size); - - ttm_bo_kunmap(&map); -out_unlock: - xe_bo_unlock(bo); - return ret; -} - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h deleted file mode 100644 index 2a3f12d2978c..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _I915_GEM_OBJECT_FRONTBUFFER_H_ -#define _I915_GEM_OBJECT_FRONTBUFFER_H_ - -#define i915_gem_object_get_frontbuffer(obj) NULL -#define i915_gem_object_set_frontbuffer(obj, front) (front) - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h deleted file mode 100644 index 7d6bb1abab73..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* Copyright © 2024 Intel Corporation */ - -#ifndef __I915_GEM_OBJECT_TYPES_H__ -#define __I915_GEM_OBJECT_TYPES_H__ - -#include "xe_bo.h" - -#define to_intel_bo(x) gem_to_xe_bo((x)) - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index cb6c7598824b..9c4cf050059a 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), NULL, size, start, end, - ttm_bo_type_kernel, flags); + ttm_bo_type_kernel, flags, 0); if (IS_ERR(bo)) { err = PTR_ERR(bo); bo = NULL; diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h deleted file mode 100644 index b4c47617b64b..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef __I915_DEBUGFS_H__ -#define __I915_DEBUGFS_H__ - -struct drm_i915_gem_object; -struct seq_file; - -static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {} - -#endif /* __I915_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index f27a2c75b56d..84b0991b35b3 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -14,6 +14,7 @@ #include "i915_utils.h" #include "intel_runtime_pm.h" +#include "xe_device.h" /* for xe_device_has_flat_ccs() */ #include "xe_device_types.h" static inline struct drm_i915_private *to_i915(const struct drm_device *dev) @@ -66,19 +67,14 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) #define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE) #define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE) #define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE) +#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE) #define IS_HASWELL_ULT(dev_priv) (dev_priv && 0) #define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0) #define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0) -#define IP_VER(ver, rel) ((ver) << 8 | (rel)) - #define IS_MOBILE(xe) (xe && 0) -#define IS_LP(xe) ((xe) && 0) -#define IS_GEN9_LP(xe) ((xe) && 0) -#define IS_GEN9_BC(xe) ((xe) && 0) - #define IS_TIGERLAKE_UY(xe) (xe && 0) #define IS_COMETLAKE_ULX(xe) (xe && 0) #define IS_COFFEELAKE_ULX(xe) (xe && 0) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h deleted file mode 100644 index 98e9dd78f670..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _I915_GPU_ERROR_H_ -#define _I915_GPU_ERROR_H_ - -struct drm_i915_error_state_buf; - -__printf(2, 3) -static inline void -i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) -{ -} - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h index 8c7b315aa8ac..274042bff1be 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h @@ -20,18 +20,26 @@ static inline void enable_rpm_wakeref_asserts(void *rpm) { } +static inline bool +intel_runtime_pm_suspended(struct xe_runtime_pm *pm) +{ + struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); + + return pm_runtime_suspended(xe->drm.dev); +} + static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm) { struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - return xe_pm_runtime_resume_and_get(xe); + return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL; } static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) { struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - return xe_pm_runtime_get_if_in_use(xe); + return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL; } static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm) @@ -39,7 +47,8 @@ static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); xe_pm_runtime_get_noresume(xe); - return true; + + return INTEL_WAKEREF_DEF; } static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm) @@ -62,6 +71,6 @@ static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_ #define with_intel_runtime_pm(rpm, wf) \ for ((wf) = intel_runtime_pm_get(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h index eb5b5f0e4bd9..0382beb4035b 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h @@ -10,11 +10,11 @@ #include "xe_device_types.h" #include "xe_mmio.h" -static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore) +static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore) { struct xe_device *xe = container_of(uncore, struct xe_device, uncore); - return xe_root_mmio_gt(xe); + return xe_root_tile_mmio(xe); } static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore) @@ -29,7 +29,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline u8 intel_uncore_read8(struct intel_uncore *uncore, @@ -37,7 +37,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg); } static inline u16 intel_uncore_read16(struct intel_uncore *uncore, @@ -45,7 +45,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg); } static inline u64 @@ -57,11 +57,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore, u32 upper, lower, old_upper; int loop = 0; - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg); do { old_upper = upper; - lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg); - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); + lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg); + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg); } while (upper != old_upper && loop++ < 2); return (u64)upper << 32 | lower; @@ -72,7 +72,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline void intel_uncore_write(struct intel_uncore *uncore, @@ -80,7 +80,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); } static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, @@ -88,7 +88,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set); + return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set); } static inline int intel_wait_for_register(struct intel_uncore *uncore, @@ -97,7 +97,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, timeout * USEC_PER_MSEC, NULL, false); } @@ -107,7 +107,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, timeout * USEC_PER_MSEC, NULL, false); } @@ -118,7 +118,7 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, fast_timeout_us + 1000 * slow_timeout_ms, out_value, false); } @@ -128,7 +128,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline void intel_uncore_write_fw(struct intel_uncore *uncore, @@ -136,7 +136,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); } static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore, @@ -144,7 +144,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline void intel_uncore_write_notrace(struct intel_uncore *uncore, @@ -152,33 +152,9 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); } -static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore) -{ - struct xe_device *xe = container_of(uncore, struct xe_device, uncore); - - return xe_device_get_root_tile(xe)->mmio.regs; -} - -/* - * The raw_reg_{read,write} macros are intended as a micro-optimization for - * interrupt handlers so that the pointer indirection on uncore->regs can - * be computed once (and presumably cached in a register) instead of generating - * extra load instructions for each MMIO access. - * - * Given that these macros are only intended for non-GSI interrupt registers - * (and the goal is to avoid extra instructions generated by the compiler), - * these macros do not account for uncore->gsi_offset. Any caller that needs - * to use these macros on a GSI register is responsible for adding the - * appropriate GSI offset to the 'base' parameter. - */ -#define raw_reg_read(base, reg) \ - readl(base + i915_mmio_reg_offset(reg)) -#define raw_reg_write(base, reg, value) \ - writel(value, base + i915_mmio_reg_offset(reg)) - #define intel_uncore_forcewake_get(x, y) do { } while (0) #define intel_uncore_forcewake_put(x, y) do { } while (0) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h index ecb1c0707706..2a32faea9db5 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h @@ -5,4 +5,6 @@ #include <linux/types.h> -typedef unsigned long intel_wakeref_t; +typedef struct ref_tracker *intel_wakeref_t; + +#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h index c2c30ece8f77..5dfc587c8237 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h @@ -9,20 +9,14 @@ #include <linux/errno.h> #include <linux/types.h> -struct drm_i915_gem_object; +struct drm_gem_object; struct intel_pxp; static inline int intel_pxp_key_check(struct intel_pxp *pxp, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, bool assign) { return -ENODEV; } -static inline bool -i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) -{ - return false; -} - #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h new file mode 100644 index 000000000000..05cbfb697b2b --- /dev/null +++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#include "../../../i915/soc/intel_rom.h" diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c index eb40f1cb44f6..a7dbc6554d69 100644 --- a/drivers/gpu/drm/xe/display/ext/i915_irq.c +++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c @@ -7,25 +7,24 @@ #include "i915_reg.h" #include "intel_uncore.h" -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier) +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) { - intel_uncore_write(uncore, imr, 0xffffffff); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.imr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.imr); - intel_uncore_write(uncore, ier, 0); + intel_uncore_write(uncore, regs.ier, 0); /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); } /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { struct xe_device *xe = container_of(uncore, struct xe_device, uncore); u32 val = intel_uncore_read(uncore, reg); @@ -42,16 +41,14 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_posting_read(uncore, reg); } -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val) { - gen3_assert_iir_is_zero(uncore, iir); + gen2_assert_iir_is_zero(uncore, regs.iir); - intel_uncore_write(uncore, ier, ier_val); - intel_uncore_write(uncore, imr, imr_val); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.ier, ier_val); + intel_uncore_write(uncore, regs.imr, imr_val); + intel_uncore_posting_read(uncore, regs.imr); } bool intel_irqs_enabled(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c new file mode 100644 index 000000000000..9f54fad0f1c0 --- /dev/null +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <drm/drm_gem.h> + +#include "xe_bo.h" +#include "intel_bo.h" + +bool intel_bo_is_tiled(struct drm_gem_object *obj) +{ + /* legacy tiling is unused */ + return false; +} + +bool intel_bo_is_userptr(struct drm_gem_object *obj) +{ + /* xe does not have userptr bos */ + return false; +} + +bool intel_bo_is_shmem(struct drm_gem_object *obj) +{ + return false; +} + +bool intel_bo_is_protected(struct drm_gem_object *obj) +{ + return false; +} + +void intel_bo_flush_if_display(struct drm_gem_object *obj) +{ +} + +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + return drm_gem_prime_mmap(obj, vma); +} + +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size) +{ + struct xe_bo *bo = gem_to_xe_bo(obj); + struct ttm_bo_kmap_obj map; + void *src; + bool is_iomem; + int ret; + + ret = xe_bo_lock(bo, true); + if (ret) + return ret; + + ret = ttm_bo_kmap(&bo->ttm, offset >> PAGE_SHIFT, 1, &map); + if (ret) + goto out_unlock; + + offset &= ~PAGE_MASK; + src = ttm_kmap_obj_virtual(&map, &is_iomem); + src += offset; + if (is_iomem) + memcpy_fromio(dst, (void __iomem *)src, size); + else + memcpy(dst, src, size); + + ttm_bo_kunmap(&map); +out_unlock: + xe_bo_unlock(bo); + return ret; +} + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +{ + return NULL; +} + +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front) +{ + return front; +} + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) +{ + /* FIXME */ +} diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c index 63ce97cc4cfe..4d209ebc26c2 100644 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.c +++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c @@ -11,8 +11,10 @@ #include "intel_fb_bo.h" #include "xe_bo.h" -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) { + struct xe_bo *bo = gem_to_xe_bo(obj); + if (bo->flags & XE_BO_FLAG_PINNED) { /* Unpin our kernel fb first */ xe_bo_lock(bo, false); @@ -23,9 +25,10 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) } int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(bo->ttm.base.dev); int ret; @@ -65,11 +68,11 @@ err: return ret; } -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd) +struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_gem_object *bo; + struct xe_bo *bo; struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]); if (!gem) @@ -78,11 +81,11 @@ struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, bo = gem_to_xe_bo(gem); /* Require vram placement or dma-buf import */ if (IS_DGFX(i915) && - !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) && + !xe_bo_can_migrate(bo, XE_PL_VRAM0) && bo->ttm.type != ttm_bo_type_sg) { drm_gem_object_put(gem); return ERR_PTR(-EREMOTE); } - return bo; + return gem; } diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h deleted file mode 100644 index 5d365b925b7a..000000000000 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2021 Intel Corporation - */ - -#ifndef __INTEL_FB_BO_H__ -#define __INTEL_FB_BO_H__ - -struct drm_file; -struct drm_mode_fb_cmd2; -struct drm_i915_private; -struct intel_framebuffer; -struct xe_bo; - -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo); -int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, - struct drm_mode_fb_cmd2 *mode_cmd); - -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd); - -#endif diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 99499d6c0256..ca95fcd098ec 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -6,6 +6,7 @@ #include <drm/drm_fb_helper.h> #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fbdev_fb.h" #include "xe_bo.h" #include "xe_ttm_stolen_mgr.h" @@ -20,7 +21,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct xe_device *xe = to_xe_device(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; - struct drm_i915_gem_object *obj; + struct xe_bo *obj; int size; /* we don't do packed 24bpp */ @@ -64,13 +65,13 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, goto err; } - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd); if (IS_ERR(fb)) { xe_bo_unpin_map_no_vm(obj); goto err; } - drm_gem_object_put(intel_bo_to_drm_bo(obj)); + drm_gem_object_put(&obj->ttm.base); return to_intel_framebuffer(fb); @@ -79,8 +80,9 @@ err: } int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma) + struct drm_gem_object *_obj, struct i915_vma *vma) { + struct xe_bo *obj = gem_to_xe_bo(_obj); struct pci_dev *pdev = to_pci_dev(i915->drm.dev); if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { @@ -100,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info XE_WARN_ON(iosys_map_is_null(&obj->vmap)); info->screen_base = obj->vmap.vaddr_iomem; - info->screen_size = intel_bo_to_drm_bo(obj)->size; + info->screen_size = obj->ttm.base.size; return 0; } diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index c6e0c8d77a70..b5502f335f53 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -4,16 +4,16 @@ */ #include "xe_display.h" -#include "regs/xe_regs.h" +#include "regs/xe_irq_regs.h" #include <linux/fb.h> #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> #include <uapi/drm/xe_drm.h> #include "soc/intel_dram.h" -#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */ #include "intel_acpi.h" #include "intel_audio.h" #include "intel_bw.h" @@ -34,7 +34,7 @@ static bool has_display(struct xe_device *xe) { - return HAS_DISPLAY(xe); + return HAS_DISPLAY(&xe->display); } /** @@ -202,12 +202,14 @@ int xe_display_init(struct xe_device *xe) void xe_display_fini(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; intel_hpd_poll_fini(xe); - intel_hdcp_component_fini(xe); + intel_hdcp_component_fini(display); intel_audio_deinit(xe); } @@ -321,7 +323,9 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) * properly. */ intel_power_domains_disable(xe); - intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (!runtime) + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (!runtime && has_display(xe)) { drm_kms_helper_poll_disable(&xe->drm); intel_display_driver_disable_user_access(xe); @@ -330,7 +334,8 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) xe_display_flush_cleanup_work(xe); - intel_dp_mst_suspend(xe); + if (!runtime) + intel_dp_mst_suspend(xe); intel_hpd_cancel_work(xe); @@ -341,7 +346,7 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); - intel_dmc_suspend(xe); + intel_dmc_suspend(display); if (runtime && has_display(xe)) intel_hpd_poll_enable(xe); @@ -352,6 +357,36 @@ void xe_display_pm_suspend(struct xe_device *xe) __xe_display_pm_suspend(xe, false); } +void xe_display_pm_shutdown(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_power_domains_disable(xe); + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (has_display(xe)) { + drm_kms_helper_poll_disable(&xe->drm); + intel_display_driver_disable_user_access(xe); + intel_display_driver_suspend(xe); + } + + xe_display_flush_cleanup_work(xe); + intel_dp_mst_suspend(xe); + intel_hpd_cancel_work(xe); + + if (has_display(xe)) + intel_display_driver_suspend_access(xe); + + intel_encoder_suspend_all(display); + intel_encoder_shutdown_all(display); + + intel_opregion_suspend(display, PCI_D3cold); + + intel_dmc_suspend(display); +} + void xe_display_pm_runtime_suspend(struct xe_device *xe) { if (!xe->info.probe_display) @@ -376,6 +411,19 @@ void xe_display_pm_suspend_late(struct xe_device *xe) intel_display_power_suspend_late(xe); } +void xe_display_pm_shutdown_late(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + /* + * The only requirement is to reboot with display DC states disabled, + * for now leaving all display power wells in the INIT power domain + * enabled. + */ + intel_power_domains_driver_remove(xe); +} + void xe_display_pm_resume_early(struct xe_device *xe) { if (!xe->info.probe_display) @@ -393,7 +441,7 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) if (!xe->info.probe_display) return; - intel_dmc_resume(xe); + intel_dmc_resume(display); if (has_display(xe)) drm_mode_config_reset(&xe->drm); @@ -405,7 +453,9 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_display_driver_resume_access(xe); /* MST sideband requires HPD interrupts enabled */ - intel_dp_mst_resume(xe); + if (!runtime) + intel_dp_mst_resume(xe); + if (!runtime && has_display(xe)) { intel_display_driver_resume(xe); drm_kms_helper_poll_enable(&xe->drm); @@ -417,7 +467,8 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_opregion_resume(display); - intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); + if (!runtime) + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); intel_power_domains_enable(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index bed55fd26f30..17afa537aee5 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -35,7 +35,9 @@ void xe_display_irq_reset(struct xe_device *xe); void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); void xe_display_pm_suspend(struct xe_device *xe); +void xe_display_pm_shutdown(struct xe_device *xe); void xe_display_pm_suspend_late(struct xe_device *xe); +void xe_display_pm_shutdown_late(struct xe_device *xe); void xe_display_pm_resume_early(struct xe_device *xe); void xe_display_pm_resume(struct xe_device *xe); void xe_display_pm_runtime_suspend(struct xe_device *xe); @@ -66,7 +68,9 @@ static inline void xe_display_irq_reset(struct xe_device *xe) {} static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} static inline void xe_display_pm_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown(struct xe_device *xe) {} static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {} static inline void xe_display_pm_resume_early(struct xe_device *xe) {} static inline void xe_display_pm_resume(struct xe_device *xe) {} static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index f99d901a3214..f95375451e2f 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -48,11 +48,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d if (!vma) return false; + /* Set scanout flag for WC mapping */ obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT); + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; @@ -73,5 +74,9 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { - /* TODO: add xe specific flush_map() for dsb buffer object. */ + /* + * The memory barrier here is to ensure coherency of DSB vs MMIO, + * both for weak ordering archs and discrete cards. + */ + xe_device_wmb(dsb_buf->vma->bo->tile->xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index b58fc4ba2aac..761510ae0690 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -79,12 +79,14 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); struct xe_ggtt *ggtt = tile0->mem.ggtt; - struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj), *dpt; u32 dpt_size, size = bo->ttm.base.size; if (view->type == I915_GTT_VIEW_NORMAL) @@ -98,23 +100,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM0 | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); else - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -183,9 +191,11 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; u32 align; @@ -264,12 +274,14 @@ out: } static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, - const struct i915_gtt_view *view) + const struct i915_gtt_view *view, + u64 physical_alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); int ret; if (!vma) @@ -312,9 +324,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment); if (ret) goto err_unpin; @@ -355,7 +367,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, { *out_flags = 0; - return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment); } void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) @@ -366,13 +378,18 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) int intel_plane_pin_fb(struct intel_plane_state *plane_state) { struct drm_framebuffer *fb = plane_state->hw.fb; - struct xe_bo *bo = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); + struct xe_bo *bo = gem_to_xe_bo(obj); struct i915_vma *vma; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + u64 phys_alignment = plane->min_alignment(plane, fb, 0); /* We reject creating !SCANOUT fb's, so this is weird.. */ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); - vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); + vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment); + if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 6619a40aed15..7c02323e9531 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -30,26 +30,29 @@ struct intel_hdcp_gsc_message { #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header) -bool intel_hdcp_gsc_cs_required(struct xe_device *xe) +bool intel_hdcp_gsc_cs_required(struct intel_display *display) { - return DISPLAY_VER(xe) >= 14; + return DISPLAY_VER(display) >= 14; } -bool intel_hdcp_gsc_check_status(struct xe_device *xe) +bool intel_hdcp_gsc_check_status(struct intel_display *display) { + struct xe_device *xe = to_xe_device(display->drm); struct xe_tile *tile = xe_device_get_root_tile(xe); struct xe_gt *gt = tile->media_gt; struct xe_gsc *gsc = >->uc.gsc; bool ret = true; + unsigned int fw_ref; - if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) { + if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) { drm_dbg_kms(&xe->drm, "GSC Components not ready for HDCP2.x\n"); return false; } xe_pm_runtime_get(xe); - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) { drm_dbg_kms(&xe->drm, "failed to get forcewake to check proxy status\n"); ret = false; @@ -59,16 +62,17 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe) if (!xe_gsc_proxy_init_done(gsc)) ret = false; - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: xe_pm_runtime_put(xe); return ret; } /*This function helps allocate memory for the command that we will send to gsc cs */ -static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, +static int intel_hdcp_gsc_initialize_message(struct intel_display *display, struct intel_hdcp_gsc_message *hdcp_message) { + struct xe_device *xe = to_xe_device(display->drm); struct xe_bo *bo = NULL; u64 cmd_in, cmd_out; int ret = 0; @@ -80,7 +84,7 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, XE_BO_FLAG_GGTT); if (IS_ERR(bo)) { - drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n"); + drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n"); ret = PTR_ERR(bo); goto out; } @@ -96,7 +100,7 @@ out: return ret; } -static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe) +static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display) { struct intel_hdcp_gsc_message *hdcp_message; int ret; @@ -110,14 +114,14 @@ static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe) * NOTE: No need to lock the comp mutex here as it is already * going to be taken before this function called */ - ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message); + ret = intel_hdcp_gsc_initialize_message(display, hdcp_message); if (ret) { - drm_err(&xe->drm, "Could not initialize hdcp_message\n"); + drm_err(display->drm, "Could not initialize hdcp_message\n"); kfree(hdcp_message); return ret; } - xe->display.hdcp.hdcp_message = hdcp_message; + display->hdcp.hdcp_message = hdcp_message; return ret; } @@ -137,7 +141,7 @@ static const struct i915_hdcp_ops gsc_hdcp_ops = { .close_hdcp_session = intel_hdcp_gsc_close_session, }; -int intel_hdcp_gsc_init(struct xe_device *xe) +int intel_hdcp_gsc_init(struct intel_display *display) { struct i915_hdcp_arbiter *data; int ret; @@ -146,33 +150,33 @@ int intel_hdcp_gsc_init(struct xe_device *xe) if (!data) return -ENOMEM; - mutex_lock(&xe->display.hdcp.hdcp_mutex); - xe->display.hdcp.arbiter = data; - xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev; - xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops; - ret = intel_hdcp_gsc_hdcp2_init(xe); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = data; + display->hdcp.arbiter->hdcp_dev = display->drm->dev; + display->hdcp.arbiter->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(display); if (ret) kfree(data); - mutex_unlock(&xe->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } -void intel_hdcp_gsc_fini(struct xe_device *xe) +void intel_hdcp_gsc_fini(struct intel_display *display) { struct intel_hdcp_gsc_message *hdcp_message = - xe->display.hdcp.hdcp_message; - struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; + display->hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = display->hdcp.arbiter; if (hdcp_message) { xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); kfree(hdcp_message); - xe->display.hdcp.hdcp_message = NULL; + display->hdcp.hdcp_message = NULL; } kfree(arb); - xe->display.hdcp.arbiter = NULL; + display->hdcp.arbiter = NULL; } static int xe_gsc_send_sync(struct xe_device *xe, diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index a50ab9eae40a..8c113463a3d5 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -170,7 +170,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; if (intel_framebuffer_init(to_intel_framebuffer(fb), - bo, &mode_cmd)) { + &bo->ttm.base, &mode_cmd)) { drm_dbg_kms(&xe->drm, "intel fb init failed\n"); goto err_bo; } @@ -248,7 +248,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, * the lookup of sysmem scratch pages. */ plane->check_plane(crtc_state, plane_state); - plane->async_flip(plane, crtc_state, plane_state, true); + plane->async_flip(NULL, plane, crtc_state, plane_state, true); return; nofb: diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 81b71903675e..7c78496e6213 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -186,6 +186,7 @@ #define VDBOX_CGCTL3F10(base) XE_REG((base) + 0x3f10) #define IECPUNIT_CLKGATE_DIS REG_BIT(22) +#define RAMDFTUNIT_CLKGATE_DIS REG_BIT(9) #define VDBOX_CGCTL3F18(base) XE_REG((base) + 0x3f18) #define ALNUNIT_CLKGATE_DIS REG_BIT(13) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index bd604b9f08e4..0c9e4b2fafab 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -286,6 +286,9 @@ #define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) #define LTCDD_CLKGATE_DIS REG_BIT(10) +#define UNSLCGCTL9454 XE_REG(0x9454) +#define LSCFE_CLKGATE_DIS REG_BIT(4) + #define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4) #define L3_CR2X_CLKGATE_DIS REG_BIT(17) #define L3_CLKGATE_DIS REG_BIT(16) @@ -344,6 +347,14 @@ #define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0) #define FORCEWAKE_RENDER XE_REG(0xa278) + +#define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0) +#define MEDIA_SLICE3_AWAKE_STATUS REG_BIT(4) +#define MEDIA_SLICE2_AWAKE_STATUS REG_BIT(3) +#define MEDIA_SLICE1_AWAKE_STATUS REG_BIT(2) +#define RENDER_AWAKE_STATUS REG_BIT(1) +#define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0) + #define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4) #define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4) #define FORCEWAKE_GSC XE_REG(0xa618) @@ -556,62 +567,6 @@ #define GT_PERF_STATUS XE_REG(0x1381b4) #define VOLTAGE_MASK REG_GENMASK(10, 0) -/* - * Note: Interrupt registers 1900xx are VF accessible only until version 12.50. - * On newer platforms, VFs are using memory-based interrupts instead. - * However, for simplicity we keep this XE_REG_OPTION_VF tag intact. - */ - -#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF) -#define INTR_GSC REG_BIT(31) -#define INTR_GUC REG_BIT(25) -#define INTR_MGUC REG_BIT(24) -#define INTR_BCS8 REG_BIT(23) -#define INTR_BCS(x) REG_BIT(15 - (x)) -#define INTR_CCS(x) REG_BIT(4 + (x)) -#define INTR_RCS0 REG_BIT(0) -#define INTR_VECS(x) REG_BIT(31 - (x)) -#define INTR_VCS(x) REG_BIT(x) - -#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF) -#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF) -#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF) -#define ENGINE1_MASK REG_GENMASK(31, 16) -#define ENGINE0_MASK REG_GENMASK(15, 0) -#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF) -#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF) -#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF) - -#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF) -#define INTR_DATA_VALID REG_BIT(31) -#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x) -#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x) -#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x) -#define OTHER_GUC_INSTANCE 0 -#define OTHER_GSC_HECI2_INSTANCE 3 -#define OTHER_GSC_INSTANCE 6 - -#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF) -#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF) -#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF) -#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF) -#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF) -#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF) -#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4) -#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF) -#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF) -#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF) -#define CCS0_CCS1_INTR_MASK XE_REG(0x190100) -#define CCS2_CCS3_INTR_MASK XE_REG(0x190104) -#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110) -#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114) -#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118) -#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c) -#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) -#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8) -#define GSC_ER_COMPLETE REG_BIT(5) -#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4) -#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) -#define GT_RENDER_USER_INTERRUPT REG_BIT(0) +#define SFC_DONE(n) XE_REG(0x1cc000 + (n) * 0x1000) #endif diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h index a5fd14307f94..2118f7dec287 100644 --- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h @@ -84,6 +84,8 @@ #define HUC_LOADING_AGENT_GUC REG_BIT(1) #define GUC_WOPCM_OFFSET_VALID REG_BIT(0) #define GUC_MAX_IDLE_COUNT XE_REG(0xc3e4) +#define GUC_PMTIMESTAMP_LO XE_REG(0xc3e8) +#define GUC_PMTIMESTAMP_HI XE_REG(0xc3ec) #define GUC_SEND_INTERRUPT XE_REG(0xc4c8) #define GUC_SEND_TRIGGER REG_BIT(0) diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h new file mode 100644 index 000000000000..1776b3f78ccb --- /dev/null +++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ +#ifndef _XE_IRQ_REGS_H_ +#define _XE_IRQ_REGS_H_ + +#include "regs/xe_reg_defs.h" + +#define PCU_IRQ_OFFSET 0x444e0 +#define GU_MISC_IRQ_OFFSET 0x444f0 +#define GU_MISC_GSE REG_BIT(27) + +#define DG1_MSTR_TILE_INTR XE_REG(0x190008) +#define DG1_MSTR_IRQ REG_BIT(31) +#define DG1_MSTR_TILE(t) REG_BIT(t) + +#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF) +#define MASTER_IRQ REG_BIT(31) +#define GU_MISC_IRQ REG_BIT(29) +#define DISPLAY_IRQ REG_BIT(16) +#define GT_DW_IRQ(x) REG_BIT(x) + +/* + * Note: Interrupt registers 1900xx are VF accessible only until version 12.50. + * On newer platforms, VFs are using memory-based interrupts instead. + * However, for simplicity we keep this XE_REG_OPTION_VF tag intact. + */ + +#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF) +#define INTR_GSC REG_BIT(31) +#define INTR_GUC REG_BIT(25) +#define INTR_MGUC REG_BIT(24) +#define INTR_BCS8 REG_BIT(23) +#define INTR_BCS(x) REG_BIT(15 - (x)) +#define INTR_CCS(x) REG_BIT(4 + (x)) +#define INTR_RCS0 REG_BIT(0) +#define INTR_VECS(x) REG_BIT(31 - (x)) +#define INTR_VCS(x) REG_BIT(x) + +#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF) +#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF) +#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF) +#define ENGINE1_MASK REG_GENMASK(31, 16) +#define ENGINE0_MASK REG_GENMASK(15, 0) +#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF) +#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF) +#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF) + +#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF) +#define INTR_DATA_VALID REG_BIT(31) +#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x) +#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x) +#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x) +#define OTHER_GUC_INSTANCE 0 +#define OTHER_GSC_HECI2_INSTANCE 3 +#define OTHER_GSC_INSTANCE 6 + +#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF) +#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF) +#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF) +#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF) +#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF) +#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF) +#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4) +#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF) +#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF) +#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF) +#define CCS0_CCS1_INTR_MASK XE_REG(0x190100) +#define CCS2_CCS3_INTR_MASK XE_REG(0x190104) +#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110) +#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114) +#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118) +#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c) +#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) +#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8) +#define GSC_ER_COMPLETE REG_BIT(5) +#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4) +#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) +#define GT_RENDER_USER_INTERRUPT REG_BIT(0) + +#endif diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h index 23f7dc5bbe99..51fd40ffafcb 100644 --- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h +++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h @@ -128,7 +128,7 @@ struct xe_reg_mcr { * options. */ #define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \ - .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ + .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ }) static inline bool xe_reg_is_valid(struct xe_reg r) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index dfa869f0dddd..3293172b0128 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -11,10 +11,6 @@ #define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12) #define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0) -#define PCU_IRQ_OFFSET 0x444e0 -#define GU_MISC_IRQ_OFFSET 0x444f0 -#define GU_MISC_GSE REG_BIT(27) - #define GU_CNTL_PROTECTED XE_REG(0x10100C) #define DRIVERINT_FLR_DIS REG_BIT(31) @@ -57,16 +53,6 @@ #define MTL_MPE_FREQUENCY XE_REG(0x13802c) #define MTL_RPE_MASK REG_GENMASK(8, 0) -#define DG1_MSTR_TILE_INTR XE_REG(0x190008) -#define DG1_MSTR_IRQ REG_BIT(31) -#define DG1_MSTR_TILE(t) REG_BIT(t) - -#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF) -#define MASTER_IRQ REG_BIT(31) -#define GU_MISC_IRQ REG_BIT(29) -#define DISPLAY_IRQ REG_BIT(16) -#define GT_DW_IRQ(x) REG_BIT(x) - #define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF) #define VF_CAP REG_BIT(0) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 8dac069483e8..3e0ae40ebbd2 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -6,6 +6,13 @@ #include <kunit/test.h> #include <kunit/visibility.h> +#include <linux/iosys-map.h> +#include <linux/math64.h> +#include <linux/prandom.h> +#include <linux/swap.h> + +#include <uapi/linux/sysinfo.h> + #include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" #include "tests/xe_test.h" @@ -358,9 +365,242 @@ static void xe_bo_evict_kunit(struct kunit *test) evict_test_run_device(xe); } +struct xe_bo_link { + struct list_head link; + struct xe_bo *bo; + u32 val; +}; + +#define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M) + +static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state, + struct xe_bo_link *link) +{ + struct iosys_map map; + int ret = ttm_bo_vmap(&bo->ttm, &map); + size_t __maybe_unused i; + + if (ret) + return ret; + + for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) { + u32 val = prandom_u32_state(state); + + iosys_map_wr(&map, i, u32, val); + if (i == 0) + link->val = val; + } + + ttm_bo_vunmap(&bo->ttm, &map); + return 0; +} + +static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo, + unsigned int bo_nr, struct rnd_state *state, + struct xe_bo_link *link) +{ + struct iosys_map map; + int ret = ttm_bo_vmap(&bo->ttm, &map); + size_t i; + bool failed = false; + + if (ret) { + KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr); + return true; + } + + for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) { + u32 val = prandom_u32_state(state); + + if (iosys_map_rd(&map, i, u32) != val) { + KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx", + bo_nr, (unsigned long long)i); + kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n", + (unsigned int)iosys_map_rd(&map, i, u32), val); + if (i == 0 && val != link->val) + kunit_info(test, "Looks like PRNG is out of sync.\n"); + failed = true; + break; + } + } + + ttm_bo_vunmap(&bo->ttm, &map); + + return failed; +} + +/* + * Try to create system bos corresponding to twice the amount + * of available system memory to test shrinker functionality. + * If no swap space is available to accommodate the + * memory overcommit, mark bos purgeable. + */ +static int shrink_test_run_device(struct xe_device *xe) +{ + struct kunit *test = kunit_get_current_test(); + LIST_HEAD(bos); + struct xe_bo_link *link, *next; + struct sysinfo si; + u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit; + unsigned int interrupted = 0, successful = 0, count = 0; + struct rnd_state prng; + u64 rand_seed; + bool failed = false; + + rand_seed = get_random_u64(); + prandom_seed_state(&prng, rand_seed); + kunit_info(test, "Random seed is 0x%016llx.\n", + (unsigned long long)rand_seed); + + /* Skip if execution time is expected to be too long. */ + + limit = SZ_32G; + /* IGFX with flat CCS needs to copy when swapping / shrinking */ + if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe)) + limit = SZ_16G; + + si_meminfo(&si); + ram = (size_t)si.freeram * si.mem_unit; + if (ram > limit) { + kunit_skip(test, "Too long expected execution time.\n"); + return 0; + } + to_alloc = ram * 2; + + ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE; + if (to_alloc > ram_and_swap) + purgeable = to_alloc - ram_and_swap; + purgeable += div64_u64(purgeable, 5); + + kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n", + (unsigned long)ram); + for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) { + struct xe_bo *bo; + unsigned int mem_type; + struct xe_ttm_tt *xe_tt; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + KUNIT_FAIL(test, "Unexpected link allocation failure\n"); + failed = true; + break; + } + + INIT_LIST_HEAD(&link->link); + + /* We can create bos using WC caching here. But it is slower. */ + bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE, + DRM_XE_GEM_CPU_CACHING_WB, + XE_BO_FLAG_SYSTEM); + if (IS_ERR(bo)) { + if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) && + bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS)) + KUNIT_FAIL(test, "Error creating bo: %pe\n", bo); + kfree(link); + failed = true; + break; + } + xe_bo_lock(bo, false); + xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm); + + /* + * Allocate purgeable bos first, because if we do it the + * other way around, they may not be subject to swapping... + */ + if (alloced < purgeable) { + xe_tt->purgeable = true; + bo->ttm.priority = 0; + } else { + int ret = shrink_test_fill_random(bo, &prng, link); + + if (ret) { + xe_bo_unlock(bo); + xe_bo_put(bo); + KUNIT_FAIL(test, "Error filling bo with random data: %pe\n", + ERR_PTR(ret)); + kfree(link); + failed = true; + break; + } + } + + mem_type = bo->ttm.resource->mem_type; + xe_bo_unlock(bo); + link->bo = bo; + list_add_tail(&link->link, &bos); + + if (mem_type != XE_PL_TT) { + KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n", + bo->ttm.resource->mem_type); + failed = true; + } + cond_resched(); + if (signal_pending(current)) + break; + } + + /* + * Read back and destroy bos. Reset the pseudo-random seed to get an + * identical pseudo-random number sequence for readback. + */ + prandom_seed_state(&prng, rand_seed); + list_for_each_entry_safe(link, next, &bos, link) { + static struct ttm_operation_ctx ctx = {.interruptible = true}; + struct xe_bo *bo = link->bo; + struct xe_ttm_tt *xe_tt; + int ret; + + count++; + if (!signal_pending(current) && !failed) { + bool purgeable, intr = false; + + xe_bo_lock(bo, NULL); + + /* xe_tt->purgeable is cleared on validate. */ + xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm); + purgeable = xe_tt->purgeable; + do { + ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx); + if (ret == -EINTR) + intr = true; + } while (ret == -EINTR && !signal_pending(current)); + + if (!ret && !purgeable) + failed = shrink_test_verify(test, bo, count, &prng, link); + + xe_bo_unlock(bo); + if (ret) { + KUNIT_FAIL(test, "Validation failed: %pe\n", + ERR_PTR(ret)); + failed = true; + } else if (intr) { + interrupted++; + } else { + successful++; + } + } + xe_bo_put(link->bo); + list_del(&link->link); + kfree(link); + } + kunit_info(test, "Readbacks interrupted: %u successful: %u\n", + interrupted, successful); + + return 0; +} + +static void xe_bo_shrink_kunit(struct kunit *test) +{ + struct xe_device *xe = test->priv; + + shrink_test_run_device(xe); +} + static struct kunit_case xe_bo_tests[] = { KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param), KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param), + KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param, + {.speed = KUNIT_SPEED_SLOW}), {} }; diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 79be73b4a02b..6f9b7a266b41 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -43,19 +43,18 @@ static void read_l3cc_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 l3cc, l3cc_expected; - unsigned int i; + unsigned int fw_ref, i; u32 reg_val; - u32 ret; - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (!(i & 1)) { if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i >> 1)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i >> 1)); mocs_dbg(gt, "reg_val=0x%x\n", reg_val); } else { @@ -72,7 +71,7 @@ static void read_l3cc_table(struct xe_gt *gt, KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc, "l3cc idx=%u has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void read_mocs_table(struct xe_gt *gt, @@ -80,21 +79,20 @@ static void read_mocs_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 mocs, mocs_expected; - unsigned int i; + unsigned int fw_ref, i; u32 reg_val; - u32 ret; KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index, "Unused entries index should have been defined\n"); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); mocs_expected = get_entry_control(info, i); mocs = reg_val; @@ -106,7 +104,7 @@ static void read_mocs_table(struct xe_gt *gt, "mocs reg 0x%x has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static int mocs_kernel_test_run_device(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h index e22bbf57fca7..04d6b95c6d87 100644 --- a/drivers/gpu/drm/xe/xe_assert.h +++ b/drivers/gpu/drm/xe/xe_assert.h @@ -10,7 +10,7 @@ #include <drm/drm_print.h> -#include "xe_device_types.h" +#include "xe_gt_types.h" #include "xe_step.h" /** diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 2a093540354e..ae6b337cdc54 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -283,6 +283,8 @@ struct xe_ttm_tt { struct device *dev; struct sg_table sgt; struct sg_table *sg; + /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ + bool purgeable; }; static int xe_tt_map_sg(struct ttm_tt *tt) @@ -468,7 +470,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, mem->bus.offset += vram->io_start; mem->bus.is_iomem = true; -#if !defined(CONFIG_X86) +#if !IS_ENABLED(CONFIG_X86) mem->bus.caching = ttm_write_combined; #endif return 0; @@ -761,7 +763,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, if (xe_rpm_reclaim_safe(xe)) { /* * We might be called through swapout in the validation path of - * another TTM device, so unconditionally acquire rpm here. + * another TTM device, so acquire rpm here. */ xe_pm_runtime_get(xe); } else { @@ -901,7 +903,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo) } } - ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); + ret = ttm_bo_populate(&bo->ttm, &ctx); if (ret) goto err_res_free; @@ -961,7 +963,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo) if (ret) return ret; - ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); + ret = ttm_bo_populate(&bo->ttm, &ctx); if (ret) goto err_res_free; @@ -1089,6 +1091,33 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) } } +static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx) +{ + struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + + if (ttm_bo->ttm) { + struct ttm_placement place = {}; + int ret = ttm_bo_validate(ttm_bo, &place, ctx); + + drm_WARN_ON(&xe->drm, ret); + } +} + +static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false + }; + + if (ttm_bo->ttm) { + struct xe_ttm_tt *xe_tt = + container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); + + if (xe_tt->purgeable) + xe_ttm_bo_purge(ttm_bo, &ctx); + } +} + const struct ttm_device_funcs xe_ttm_funcs = { .ttm_tt_create = xe_ttm_tt_create, .ttm_tt_populate = xe_ttm_tt_populate, @@ -1101,6 +1130,7 @@ const struct ttm_device_funcs xe_ttm_funcs = { .release_notify = xe_ttm_bo_release_notify, .eviction_valuable = ttm_bo_eviction_valuable, .delete_mem_notify = xe_ttm_bo_delete_mem_notify, + .swap_notify = xe_ttm_bo_swap_notify, }; static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) @@ -1431,7 +1461,8 @@ static struct xe_bo * __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - u16 cpu_caching, enum ttm_bo_type type, u32 flags) + u16 cpu_caching, enum ttm_bo_type type, u32 flags, + u64 alignment) { struct xe_bo *bo = NULL; int err; @@ -1460,6 +1491,8 @@ __xe_bo_create_locked(struct xe_device *xe, if (IS_ERR(bo)) return bo; + bo->min_align = alignment; + /* * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), * to ensure the shared resv doesn't disappear under the bo, the bo @@ -1500,16 +1533,18 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, u64 alignment) { - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags, alignment); } struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags) { - return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, + flags, 0); } struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, @@ -1519,7 +1554,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, { struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER); + flags | XE_BO_FLAG_USER, 0); if (!IS_ERR(bo)) xe_bo_unlock_vm_held(bo); @@ -1543,6 +1578,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile size_t size, u64 offset, enum ttm_bo_type type, u32 flags) { + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, + type, flags, 0); +} + +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment) +{ struct xe_bo *bo; int err; u64 start = offset == ~0ull ? 0 : offset; @@ -1553,7 +1599,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile flags |= XE_BO_FLAG_GGTT; bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS, + alignment); if (IS_ERR(bo)) return bo; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 6e4be52306df..7fa44a0138b0 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -77,7 +77,7 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags); + enum ttm_bo_type type, u32 flags, u64 alignment); struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags); @@ -94,6 +94,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 offset, enum ttm_bo_type type, u32 flags); +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment); struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, enum ttm_bo_type type, u32 flags); @@ -312,8 +318,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev) return round_down(max / 2, PAGE_SIZE); } -#define i915_gem_object_flush_if_display(obj) ((void)(obj)) - #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 2ed558ac2264..13c6d8a69e91 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -76,9 +76,11 @@ struct xe_bo { /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ struct list_head vram_userfault_link; -}; -#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base) -#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev) + /** @min_align: minimum alignment needed for this BO if different + * from default + */ + u64 min_align; +}; #endif diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index fe4319eb13fd..492b4877433f 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -90,13 +90,32 @@ static int forcewake_open(struct inode *inode, struct file *file) { struct xe_device *xe = inode->i_private; struct xe_gt *gt; - u8 id; + u8 id, last_gt; + unsigned int fw_ref; xe_pm_runtime_get(xe); - for_each_gt(gt, xe, id) - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + for_each_gt(gt, xe, id) { + last_gt = id; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + goto err_fw_get; + } return 0; + +err_fw_get: + for_each_gt(gt, xe, id) { + if (id < last_gt) + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + else if (id == last_gt) + xe_force_wake_put(gt_to_fw(gt), fw_ref); + else + break; + } + + xe_pm_runtime_put(xe); + return -ETIMEDOUT; } static int forcewake_release(struct inode *inode, struct file *file) @@ -106,7 +125,7 @@ static int forcewake_release(struct inode *inode, struct file *file) u8 id; for_each_gt(gt, xe, id) - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(xe); return 0; diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index bdb76e834e4c..d2679c5d976b 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -6,6 +6,7 @@ #include "xe_devcoredump.h" #include "xe_devcoredump_types.h" +#include <linux/ascii85.h> #include <linux/devcoredump.h> #include <generated/utsrelease.h> @@ -16,9 +17,12 @@ #include "xe_force_wake.h" #include "xe_gt.h" #include "xe_gt_printk.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" +#include "xe_guc_log.h" #include "xe_guc_submit.h" #include "xe_hw_engine.h" +#include "xe_module.h" #include "xe_sched_job.h" #include "xe_vm.h" @@ -85,9 +89,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count, p = drm_coredump_printer(&iter); - drm_printf(&p, "**** Xe Device Coredump ****\n"); - drm_printf(&p, "kernel: " UTS_RELEASE "\n"); - drm_printf(&p, "module: " KBUILD_MODNAME "\n"); + drm_puts(&p, "**** Xe Device Coredump ****\n"); + drm_puts(&p, "kernel: " UTS_RELEASE "\n"); + drm_puts(&p, "module: " KBUILD_MODNAME "\n"); ts = ktime_to_timespec64(ss->snapshot_time); drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); @@ -96,20 +100,27 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count, drm_printf(&p, "Process: %s\n", ss->process_name); xe_device_snapshot_print(xe, &p); - drm_printf(&p, "\n**** GuC CT ****\n"); - xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p); - xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p); + drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id); + drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id); - drm_printf(&p, "\n**** Job ****\n"); - xe_sched_job_snapshot_print(coredump->snapshot.job, &p); + drm_puts(&p, "\n**** GuC Log ****\n"); + xe_guc_log_snapshot_print(ss->guc.log, &p); + drm_puts(&p, "\n**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(ss->guc.ct, &p); - drm_printf(&p, "\n**** HW Engines ****\n"); + drm_puts(&p, "\n**** Contexts ****\n"); + xe_guc_exec_queue_snapshot_print(ss->ge, &p); + + drm_puts(&p, "\n**** Job ****\n"); + xe_sched_job_snapshot_print(ss->job, &p); + + drm_puts(&p, "\n**** HW Engines ****\n"); for (i = 0; i < XE_NUM_HW_ENGINES; i++) - if (coredump->snapshot.hwe[i]) - xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i], - &p); - drm_printf(&p, "\n**** VM state ****\n"); - xe_vm_snapshot_print(coredump->snapshot.vm, &p); + if (ss->hwe[i]) + xe_engine_snapshot_print(ss->hwe[i], &p); + + drm_puts(&p, "\n**** VM state ****\n"); + xe_vm_snapshot_print(ss->vm, &p); return count - iter.remain; } @@ -118,8 +129,14 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) { int i; - xe_guc_ct_snapshot_free(ss->ct); - ss->ct = NULL; + xe_guc_log_snapshot_free(ss->guc.log); + ss->guc.log = NULL; + + xe_guc_ct_snapshot_free(ss->guc.ct); + ss->guc.ct = NULL; + + xe_guc_capture_put_matched_nodes(&ss->gt->uc.guc); + ss->matched_node = NULL; xe_guc_exec_queue_snapshot_free(ss->ge); ss->ge = NULL; @@ -141,13 +158,15 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) { struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); + unsigned int fw_ref; /* keep going if fw fails as we still want to save the memory and SW data */ - if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL)) + fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); xe_vm_snapshot_capture_delayed(ss->vm); xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); - xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); /* Calculate devcoredump size */ ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); @@ -204,6 +223,7 @@ static void xe_devcoredump_free(void *data) /* To prevent stale data on next snapshot, clear everything */ memset(&coredump->snapshot, 0, sizeof(coredump->snapshot)); coredump->captured = false; + coredump->job = NULL; drm_info(&coredump_to_xe(coredump)->drm, "Xe device coredump has been deleted.\n"); } @@ -214,14 +234,13 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, struct xe_devcoredump_snapshot *ss = &coredump->snapshot; struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); - struct xe_hw_engine *hwe; - enum xe_hw_engine_id id; u32 adj_logical_mask = q->logical_mask; u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - int i; + unsigned int fw_ref; bool cookie; + int i; ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); @@ -231,6 +250,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, strscpy(ss->process_name, process_name); ss->gt = q->gt; + coredump->job = job; INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); cookie = dma_fence_begin_signalling(); @@ -244,26 +264,19 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, } /* keep going if fw fails as we still want to save the memory and SW data */ - if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL)) - xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); + fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); - coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true); - coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q); - coredump->snapshot.job = xe_sched_job_snapshot_capture(job); - coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm); + ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true); + ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct); + ss->ge = xe_guc_exec_queue_snapshot_capture(q); + ss->job = xe_sched_job_snapshot_capture(job); + ss->vm = xe_vm_snapshot_capture(q->vm); - for_each_hw_engine(hwe, q->gt, id) { - if (hwe->class != q->hwe->class || - !(BIT(hwe->logical_instance) & adj_logical_mask)) { - coredump->snapshot.hwe[id] = NULL; - continue; - } - coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe); - } + xe_engine_snapshot_capture_for_job(job); queue_work(system_unbound_wq, &ss->work); - xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(q->gt), fw_ref); dma_fence_end_signalling(cookie); } @@ -310,3 +323,89 @@ int xe_devcoredump_init(struct xe_device *xe) } #endif + +/** + * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85 + * + * The output is split to multiple lines because some print targets, e.g. dmesg + * cannot handle arbitrarily long lines. Note also that printing to dmesg in + * piece-meal fashion is not possible, each separate call to drm_puts() has a + * line-feed automatically added! Therefore, the entire output line must be + * constructed in a local buffer first, then printed in one atomic output call. + * + * There is also a scheduler yield call to prevent the 'task has been stuck for + * 120s' kernel hang check feature from firing when printing to a slow target + * such as dmesg over a serial port. + * + * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down. + * + * @p: the printer object to output to + * @prefix: optional prefix to add to output string + * @blob: the Binary Large OBject to dump out + * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32) + * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32) + */ +void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, + const void *blob, size_t offset, size_t size) +{ + const u32 *blob32 = (const u32 *)blob; + char buff[ASCII85_BUFSZ], *line_buff; + size_t line_pos = 0; + +#define DMESG_MAX_LINE_LEN 800 +#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */ + + if (size & 3) + drm_printf(p, "Size not word aligned: %zu", size); + if (offset & 3) + drm_printf(p, "Offset not word aligned: %zu", size); + + line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL); + if (IS_ERR_OR_NULL(line_buff)) { + drm_printf(p, "Failed to allocate line buffer: %pe", line_buff); + return; + } + + blob32 += offset / sizeof(*blob32); + size /= sizeof(*blob32); + + if (prefix) { + strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2); + line_pos = strlen(line_buff); + + line_buff[line_pos++] = ':'; + line_buff[line_pos++] = ' '; + } + + while (size--) { + u32 val = *(blob32++); + + strscpy(line_buff + line_pos, ascii85_encode(val, buff), + DMESG_MAX_LINE_LEN - line_pos); + line_pos += strlen(line_buff + line_pos); + + if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) { + line_buff[line_pos++] = '\n'; + line_buff[line_pos++] = 0; + + drm_puts(p, line_buff); + + line_pos = 0; + + /* Prevent 'stuck thread' time out errors */ + cond_resched(); + } + } + + if (line_pos) { + line_buff[line_pos++] = '\n'; + line_buff[line_pos++] = 0; + + drm_puts(p, line_buff); + } + + kfree(line_buff); + +#undef MIN_SPACE +#undef DMESG_MAX_LINE_LEN +} diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h index e2fa65ce0932..a4eebc285fc8 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.h +++ b/drivers/gpu/drm/xe/xe_devcoredump.h @@ -6,6 +6,9 @@ #ifndef _XE_DEVCOREDUMP_H_ #define _XE_DEVCOREDUMP_H_ +#include <linux/types.h> + +struct drm_printer; struct xe_device; struct xe_sched_job; @@ -23,4 +26,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe) } #endif +void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, + const void *blob, size_t offset, size_t size); + #endif diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h index 440d05d77a5a..3703ddea1252 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump_types.h +++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h @@ -34,16 +34,27 @@ struct xe_devcoredump_snapshot { /** @work: Workqueue for deferred capture outside of signaling context */ struct work_struct work; - /* GuC snapshots */ - /** @ct: GuC CT snapshot */ - struct xe_guc_ct_snapshot *ct; - /** @ge: Guc Engine snapshot */ + /** @guc: GuC snapshots */ + struct { + /** @guc.ct: GuC CT snapshot */ + struct xe_guc_ct_snapshot *ct; + /** @guc.log: GuC log snapshot */ + struct xe_guc_log_snapshot *log; + } guc; + + /** @ge: GuC Submission Engine snapshot */ struct xe_guc_submit_exec_queue_snapshot *ge; /** @hwe: HW Engine snapshot array */ struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES]; /** @job: Snapshot of job state */ struct xe_sched_job_snapshot *job; + /** + * @matched_node: The matched capture node for timedout job + * this single-node tracker works because devcoredump will always only + * produce one hw-engine capture per devcoredump event + */ + struct __guc_capture_parsed_output *matched_node; /** @vm: Snapshot of VM state */ struct xe_vm_snapshot *vm; @@ -69,6 +80,8 @@ struct xe_devcoredump { bool captured; /** @snapshot: Snapshot is captured at time of the first crash */ struct xe_devcoredump_snapshot snapshot; + /** @job: Point to the faulting job */ + struct xe_sched_job *job; }; #endif diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index a1987b554a8d..0e2dd691bdae 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -5,10 +5,11 @@ #include "xe_device.h" +#include <linux/aperture.h> #include <linux/delay.h> +#include <linux/fault-inject.h> #include <linux/units.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client.h> #include <drm/drm_gem_ttm_helper.h> @@ -301,7 +302,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe_display_driver_set_hooks(&driver); - err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + err = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (err) return ERR_PTR(err); @@ -373,6 +374,12 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, err: return ERR_PTR(err); } +ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ + +static bool xe_driver_flr_disabled(struct xe_device *xe) +{ + return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; +} /* * The driver-initiated FLR is the highest level of reset that we can trigger @@ -387,17 +394,12 @@ err: * if/when a new instance of i915 is bound to the device it will do a full * re-init anyway. */ -static void xe_driver_flr(struct xe_device *xe) +static void __xe_driver_flr(struct xe_device *xe) { const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); int ret; - if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { - drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); - return; - } - drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); /* @@ -409,25 +411,25 @@ static void xe_driver_flr(struct xe_device *xe) * is still pending (unless the HW is totally dead), but better to be * safe in case something unexpected happens */ - ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); + ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); return; } - xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); + xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); /* Trigger the actual Driver-FLR */ - xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR); + xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); /* Wait for hardware teardown to complete */ - ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); + ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); return; } /* Wait for hardware/firmware re-init to complete */ - ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, + ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); @@ -435,7 +437,17 @@ static void xe_driver_flr(struct xe_device *xe) } /* Clear sticky completion status */ - xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); + xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); +} + +static void xe_driver_flr(struct xe_device *xe) +{ + if (xe_driver_flr_disabled(xe)) { + drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); + return; + } + + __xe_driver_flr(xe); } static void xe_driver_flr_fini(void *arg) @@ -478,16 +490,15 @@ mask_err: return err; } -static bool verify_lmem_ready(struct xe_gt *gt) +static bool verify_lmem_ready(struct xe_device *xe) { - u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT; + u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; return !!val; } static int wait_for_lmem_ready(struct xe_device *xe) { - struct xe_gt *gt = xe_root_mmio_gt(xe); unsigned long timeout, start; if (!IS_DGFX(xe)) @@ -496,7 +507,7 @@ static int wait_for_lmem_ready(struct xe_device *xe) if (IS_SRIOV_VF(xe)) return 0; - if (verify_lmem_ready(gt)) + if (verify_lmem_ready(xe)) return 0; drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); @@ -525,13 +536,14 @@ static int wait_for_lmem_ready(struct xe_device *xe) msleep(20); - } while (!verify_lmem_ready(gt)); + } while (!verify_lmem_ready(xe)); drm_dbg(&xe->drm, "lmem ready after %ums", jiffies_to_msecs(jiffies - start)); return 0; } +ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */ static void update_device_info(struct xe_device *xe) { @@ -579,19 +591,21 @@ int xe_device_probe_early(struct xe_device *xe) return 0; } -static int xe_device_set_has_flat_ccs(struct xe_device *xe) +static int probe_has_flat_ccs(struct xe_device *xe) { + struct xe_gt *gt; + unsigned int fw_ref; u32 reg; - int err; + /* Always enabled/disabled, no runtime check to do */ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs) return 0; - struct xe_gt *gt = xe_root_mmio_gt(xe); + gt = xe_root_mmio_gt(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); @@ -600,7 +614,8 @@ static int xe_device_set_has_flat_ccs(struct xe_device *xe) drm_dbg(&xe->drm, "Flat CCS has been disabled in bios, May lead to performance impact"); - return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } int xe_device_probe(struct xe_device *xe) @@ -636,6 +651,13 @@ int xe_device_probe(struct xe_device *xe) err = xe_gt_init_early(gt); if (err) return err; + + /* + * Only after this point can GT-specific MMIO operations + * (including things like communication with the GuC) + * be performed. + */ + xe_gt_mmio_init(gt); } for_each_tile(tile, xe, id) { @@ -651,11 +673,9 @@ int xe_device_probe(struct xe_device *xe) err = xe_ggtt_init_early(tile->mem.ggtt); if (err) return err; - if (IS_SRIOV_VF(xe)) { - err = xe_memirq_init(&tile->sriov.vf.memirq); - if (err) - return err; - } + err = xe_memirq_init(&tile->memirq); + if (err) + return err; } for_each_gt(gt, xe, id) { @@ -679,7 +699,7 @@ int xe_device_probe(struct xe_device *xe) if (err) goto err; - err = xe_device_set_has_flat_ccs(xe); + err = probe_has_flat_ccs(xe); if (err) goto err; @@ -789,6 +809,24 @@ void xe_device_remove(struct xe_device *xe) void xe_device_shutdown(struct xe_device *xe) { + struct xe_gt *gt; + u8 id; + + drm_dbg(&xe->drm, "Shutting down device\n"); + + if (xe_driver_flr_disabled(xe)) { + xe_display_pm_shutdown(xe); + + xe_irq_suspend(xe); + + for_each_gt(gt, xe, id) + xe_gt_shutdown(gt); + + xe_display_pm_shutdown_late(xe); + } else { + /* BOOM! */ + __xe_driver_flr(xe); + } } /** @@ -802,11 +840,9 @@ void xe_device_shutdown(struct xe_device *xe) */ void xe_device_wmb(struct xe_device *xe) { - struct xe_gt *gt = xe_root_mmio_gt(xe); - wmb(); if (IS_DGFX(xe)) - xe_mmio_write32(gt, VF_CAP_REG, 0); + xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); } /** @@ -830,6 +866,7 @@ void xe_device_wmb(struct xe_device *xe) void xe_device_td_flush(struct xe_device *xe) { struct xe_gt *gt; + unsigned int fw_ref; u8 id; if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) @@ -844,10 +881,11 @@ void xe_device_td_flush(struct xe_device *xe) if (xe_gt_is_media_type(gt)) continue; - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; - xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); /* * FIXME: We can likely do better here with our choice of * timeout. Currently we just assume the worst case, i.e. 150us, @@ -855,36 +893,36 @@ void xe_device_td_flush(struct xe_device *xe) * scenario on current platforms if all cache entries are * transient and need to be flushed.. */ - if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, + if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, 150, NULL, false)) xe_gt_err_once(gt, "TD flush timeout\n"); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } } void xe_device_l2_flush(struct xe_device *xe) { struct xe_gt *gt; - int err; + unsigned int fw_ref; gt = xe_root_mmio_gt(xe); if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; spin_lock(>->global_invl_lock); - xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); + xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) + if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) @@ -919,6 +957,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) for_each_gt(gt, xe, id) { drm_printf(p, "GT id: %u\n", id); + drm_printf(p, "\tTile: %u\n", gt->tile->id); drm_printf(p, "\tType: %s\n", gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); drm_printf(p, "\tIP ver: %u.%u.%u\n", diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 34620ef855c0..f1fbfe916867 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -9,6 +9,8 @@ #include <drm/drm_util.h> #include "xe_device_types.h" +#include "xe_gt_types.h" +#include "xe_sriov.h" static inline struct xe_device *to_xe_device(const struct drm_device *dev) { @@ -138,7 +140,7 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe) static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt) { - return >->mmio.fw; + return >->pm.fw; } void xe_device_assert_mem_access(struct xe_device *xe); @@ -153,11 +155,22 @@ static inline bool xe_device_has_sriov(struct xe_device *xe) return xe->info.has_sriov; } +static inline bool xe_device_has_msix(struct xe_device *xe) +{ + /* TODO: change this when MSI-X support is fully integrated */ + return false; +} + static inline bool xe_device_has_memirq(struct xe_device *xe) { return GRAPHICS_VERx100(xe) >= 1250; } +static inline bool xe_device_uses_memirq(struct xe_device *xe) +{ + return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe)); +} + u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size); void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 687f3a9039bb..b9ea455d6f59 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -14,7 +14,6 @@ #include "xe_devcoredump_types.h" #include "xe_heci_gsc.h" -#include "xe_gt_types.h" #include "xe_lmtt_types.h" #include "xe_memirq_types.h" #include "xe_oa.h" @@ -108,6 +107,45 @@ struct xe_mem_region { }; /** + * struct xe_mmio - register mmio structure + * + * Represents an MMIO region that the CPU may use to access registers. A + * region may share its IO map with other regions (e.g., all GTs within a + * tile share the same map with their parent tile, but represent different + * subregions of the overall IO space). + */ +struct xe_mmio { + /** @tile: Backpointer to tile, used for tracing */ + struct xe_tile *tile; + + /** @regs: Map used to access registers. */ + void __iomem *regs; + + /** + * @sriov_vf_gt: Backpointer to GT. + * + * This pointer is only set for GT MMIO regions and only when running + * as an SRIOV VF structure + */ + struct xe_gt *sriov_vf_gt; + + /** + * @regs_size: Length of the register region within the map. + * + * The size of the iomap set in *regs is generally larger than the + * register mmio space since it includes unused regions and/or + * non-register regions such as the GGTT PTEs. + */ + size_t regs_size; + + /** @adj_limit: adjust MMIO address if address is below this value */ + u32 adj_limit; + + /** @adj_offset: offset to add to MMIO address when adjusting */ + u32 adj_offset; +}; + +/** * struct xe_tile - hardware tile structure * * From a driver perspective, a "tile" is effectively a complete GPU, containing @@ -148,26 +186,14 @@ struct xe_tile { * * 4MB-8MB: reserved * * 8MB-16MB: global GTT */ - struct { - /** @mmio.size: size of tile's MMIO space */ - size_t size; - - /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */ - void __iomem *regs; - } mmio; + struct xe_mmio mmio; /** * @mmio_ext: MMIO-extension info for a tile. * * Each tile has its own additional 256MB (28-bit) MMIO-extension space. */ - struct { - /** @mmio_ext.size: size of tile's additional MMIO-extension space */ - size_t size; - - /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */ - void __iomem *regs; - } mmio_ext; + struct xe_mmio mmio_ext; /** @mem: memory management info for tile */ struct { @@ -200,14 +226,14 @@ struct xe_tile { struct xe_lmtt lmtt; } pf; struct { - /** @sriov.vf.memirq: Memory Based Interrupts. */ - struct xe_memirq memirq; - /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */ struct xe_ggtt_node *ggtt_balloon[2]; } vf; } sriov; + /** @memirq: Memory Based Interrupts. */ + struct xe_memirq memirq; + /** @pcode: tile's PCODE */ struct { /** @pcode.lock: protecting tile's PCODE mailbox data */ diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index fb52a23e28f8..22f0f1a6dfd5 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -278,6 +278,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) struct xe_hw_engine *hwe; struct xe_exec_queue *q; u64 gpu_timestamp; + unsigned int fw_ref; xe_pm_runtime_get(xe); @@ -303,13 +304,16 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) continue; fw = xe_hw_engine_to_fw_domain(hwe); - if (xe_force_wake_get(gt_to_fw(gt), fw)) { + + fw_ref = xe_force_wake_get(gt_to_fw(gt), fw); + if (!xe_force_wake_ref_has_domain(fw_ref, fw)) { hwe = NULL; + xe_force_wake_put(gt_to_fw(gt), fw_ref); break; } gpu_timestamp = xe_hw_engine_read_timestamp(hwe); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); break; } diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 7deb480e26af..1158b6062a6c 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -143,7 +143,7 @@ struct xe_exec_queue { /** @hw_engine_group_link: link into exec queues in the same hw engine group */ struct list_head hw_engine_group_link; /** @lrc: logical ring context for this exec queue */ - struct xe_lrc *lrc[]; + struct xe_lrc *lrc[] __counted_by(width); }; /** diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index 6a59165b9569..a8c416a48812 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -44,6 +44,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, u32 ctx_id) { struct xe_gt *gt = hwe->gt; + struct xe_mmio *mmio = >->mmio; struct xe_device *xe = gt_to_xe(gt); u64 lrc_desc; @@ -58,7 +59,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, } if (hwe->class == XE_ENGINE_CLASS_COMPUTE) - xe_mmio_write32(hwe->gt, RCU_MODE, + xe_mmio_write32(mmio, RCU_MODE, _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail); @@ -76,17 +77,17 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, */ wmb(); - xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base), + xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base), xe_bo_ggtt_addr(hwe->hwsp)); - xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base)); - xe_mmio_write32(gt, RING_MODE(hwe->mmio_base), + xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base)); + xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); - xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base), + xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base), lower_32_bits(lrc_desc)); - xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base), + xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base), upper_32_bits(lrc_desc)); - xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base), + xe_mmio_write32(mmio, RING_EXECLIST_CONTROL(hwe->mmio_base), EL_CTRL_LOAD); } @@ -168,8 +169,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe) struct xe_gt *gt = hwe->gt; u32 hi, lo; - lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base)); - hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base)); + lo = xe_mmio_read32(>->mmio, RING_EXECLIST_STATUS_LO(hwe->mmio_base)); + hi = xe_mmio_read32(>->mmio, RING_EXECLIST_STATUS_HI(hwe->mmio_base)); return lo | (u64)hi << 32; } @@ -312,7 +313,7 @@ execlist_run_job(struct drm_sched_job *drm_job) q->ring_ops->emit_job(job); xe_execlist_make_active(exl); - return dma_fence_get(job->fence); + return job->fence; } static void execlist_job_free(struct drm_sched_job *drm_job) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 7d9fc489dcb8..4f6784e5abf8 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -21,15 +21,25 @@ static const char *str_wake_sleep(bool wake) return wake ? "wake" : "sleep"; } -static void domain_init(struct xe_force_wake_domain *domain, +static void mark_domain_initialized(struct xe_force_wake *fw, + enum xe_force_wake_domain_id id) +{ + fw->initialized_domains |= BIT(id); +} + +static void init_domain(struct xe_force_wake *fw, enum xe_force_wake_domain_id id, struct xe_reg reg, struct xe_reg ack) { + struct xe_force_wake_domain *domain = &fw->domains[id]; + domain->id = id; domain->reg_ctl = reg; domain->reg_ack = ack; domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL); domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL); + + mark_domain_initialized(fw, id); } void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) @@ -43,13 +53,11 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); if (xe->info.graphics_verx100 >= 1270) { - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], - XE_FW_DOMAIN_ID_GT, + init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, FORCEWAKE_ACK_GT_MTL); } else { - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], - XE_FW_DOMAIN_ID_GT, + init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, FORCEWAKE_ACK_GT); } @@ -63,8 +71,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); if (!xe_gt_is_media_type(gt)) - domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER], - XE_FW_DOMAIN_ID_RENDER, + init_domain(fw, XE_FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER, FORCEWAKE_ACK_RENDER); @@ -72,8 +79,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) if (!(gt->info.engine_mask & BIT(i))) continue; - domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j], - XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, + init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, FORCEWAKE_MEDIA_VDBOX(j), FORCEWAKE_ACK_MEDIA_VDBOX(j)); } @@ -82,15 +88,13 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) if (!(gt->info.engine_mask & BIT(i))) continue; - domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j], - XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, + init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, FORCEWAKE_MEDIA_VEBOX(j), FORCEWAKE_ACK_MEDIA_VEBOX(j)); } if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)) - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC], - XE_FW_DOMAIN_ID_GSC, + init_domain(fw, XE_FW_DOMAIN_ID_GSC, FORCEWAKE_GSC, FORCEWAKE_ACK_GSC); } @@ -100,7 +104,7 @@ static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, if (IS_SRIOV_VF(gt_to_xe(gt))) return; - xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0)); + xe_mmio_write32(>->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0)); } static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake) @@ -111,7 +115,7 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, if (IS_SRIOV_VF(gt_to_xe(gt))) return 0; - ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0, + ret = xe_mmio_wait32(>->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, &value, true); if (ret) @@ -156,52 +160,108 @@ static int domain_sleep_wait(struct xe_gt *gt, (ffs(tmp__) - 1))) && \ domain__->reg_ctl.addr) -int xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +/** + * xe_force_wake_get() : Increase the domain refcount + * @fw: struct xe_force_wake + * @domains: forcewake domains to get refcount on + * + * This function wakes up @domains if they are asleep and takes references. + * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized + * domains will be considered for refcount and it is a caller responsibility + * to check returned ref if it includes any specific domain by using + * xe_force_wake_ref_has_domain() function. Caller must call + * xe_force_wake_put() function to decrease incremented refcounts. + * + * Return: opaque reference to woken domains or zero if none of requested + * domains were awake. + */ +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - enum xe_force_wake_domains tmp, woken = 0; + unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0; + unsigned int tmp, ref_rqst; unsigned long flags; - int ret = 0; + xe_gt_assert(gt, is_power_of_2(domains)); + xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL); + xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains); + + ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, domains, fw, tmp) { + for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) { if (!domain->ref++) { - woken |= BIT(domain->id); + awake_rqst |= BIT(domain->id); domain_wake(gt, domain); } + ref_incr |= BIT(domain->id); } - for_each_fw_domain_masked(domain, woken, fw, tmp) { - ret |= domain_wake_wait(gt, domain); + for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) { + if (domain_wake_wait(gt, domain) == 0) { + fw->awake_domains |= BIT(domain->id); + } else { + awake_failed |= BIT(domain->id); + --domain->ref; + } } - fw->awake_domains |= woken; + ref_incr &= ~awake_failed; spin_unlock_irqrestore(&fw->lock, flags); - return ret; + xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n", + str_plural(hweight_long(awake_failed)), awake_failed); + + if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains) + ref_incr |= XE_FORCEWAKE_ALL; + + return ref_incr; } -int xe_force_wake_put(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +/** + * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0 + * @fw: Pointer to the force wake structure + * @fw_ref: return of xe_force_wake_get() + * + * This function reduces the reference counts for domains in fw_ref. If + * refcount for any of the specified domain reaches 0, it puts the domain to sleep + * and waits for acknowledgment for domain to sleep within 50 milisec timeout. + * Warns in case of timeout of ack from domain. + */ +void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - enum xe_force_wake_domains tmp, sleep = 0; + unsigned int tmp, sleep = 0; unsigned long flags; - int ret = 0; + int ack_fail = 0; + + /* + * Avoid unnecessary lock and unlock when the function is called + * in error path of individual domains. + */ + if (!fw_ref) + return; + + if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + fw_ref = fw->initialized_domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, domains, fw, tmp) { + for_each_fw_domain_masked(domain, fw_ref, fw, tmp) { + xe_gt_assert(gt, domain->ref); + if (!--domain->ref) { sleep |= BIT(domain->id); domain_sleep(gt, domain); } } for_each_fw_domain_masked(domain, sleep, fw, tmp) { - ret |= domain_sleep_wait(gt, domain); + if (domain_sleep_wait(gt, domain) == 0) + fw->awake_domains &= ~BIT(domain->id); + else + ack_fail |= BIT(domain->id); } - fw->awake_domains &= ~sleep; spin_unlock_irqrestore(&fw->lock, flags); - return ret; + xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n", + str_plural(hweight_long(ack_fail)), ack_fail); } diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index a2577672f4e3..0e3e84bfa51c 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -15,10 +15,9 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw); void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw); -int xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains); -int xe_force_wake_put(struct xe_force_wake *fw, - enum xe_force_wake_domains domains); +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains); +void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref); static inline int xe_force_wake_ref(struct xe_force_wake *fw, @@ -46,4 +45,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw, xe_gt_assert(fw->gt, fw->awake_domains & domain); } +/** + * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref + * @fw_ref : the force_wake reference + * @domain : forcewake domain to verify + * + * This function confirms whether the @fw_ref includes a reference to the + * specified @domain. + * + * Return: true if domain is refcounted. + */ +static inline bool +xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain) +{ + return fw_ref & domain; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h index ed0edc2cdf9f..899fbbcb3ea9 100644 --- a/drivers/gpu/drm/xe/xe_force_wake_types.h +++ b/drivers/gpu/drm/xe/xe_force_wake_types.h @@ -48,7 +48,7 @@ enum xe_force_wake_domains { XE_FW_MEDIA_VEBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX2), XE_FW_MEDIA_VEBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX3), XE_FW_GSC = BIT(XE_FW_DOMAIN_ID_GSC), - XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) - 1 + XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) }; /** @@ -78,7 +78,9 @@ struct xe_force_wake { /** @lock: protects everything force wake struct */ spinlock_t lock; /** @awake_domains: mask of all domains awake */ - enum xe_force_wake_domains awake_domains; + unsigned int awake_domains; + /** @initialized_domains: mask of all initialized domains */ + unsigned int initialized_domains; /** @domains: force wake domains */ struct xe_force_wake_domain domains[XE_FW_DOMAIN_ID_COUNT]; }; diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index ff19eca5d358..558fac8bb6fb 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -5,6 +5,7 @@ #include "xe_ggtt.h" +#include <linux/fault-inject.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/sizes.h> @@ -107,8 +108,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev) static void ggtt_update_access_counter(struct xe_ggtt *ggtt) { - struct xe_gt *gt = XE_WA(ggtt->tile->primary_gt, 22019338487) ? ggtt->tile->primary_gt : - ggtt->tile->media_gt; + struct xe_tile *tile = ggtt->tile; + struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ? + tile->primary_gt : tile->media_gt; + struct xe_mmio *mmio = &affected_gt->mmio; u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63; /* * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit @@ -118,7 +121,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt) lockdep_assert_held(&ggtt->lock); if ((++ggtt->access_count % max_gtt_writes) == 0) { - xe_mmio_write32(gt, GMD_ID, 0x0); + xe_mmio_write32(mmio, GMD_ID, 0x0); ggtt->access_count = 0; } } @@ -243,7 +246,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) else ggtt->pt_ops = &xelp_pt_ops; - ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0); + ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); drm_mm_init(&ggtt->mm, xe_wopcm_size(xe), ggtt->size - xe_wopcm_size(xe)); @@ -262,6 +265,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) return 0; } +ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */ static void xe_ggtt_invalidate(struct xe_ggtt *ggtt); @@ -405,7 +409,7 @@ static void xe_ggtt_invalidate(struct xe_ggtt *ggtt) * vs. correct GGTT page. Not particularly a hot code path so blindly * do a mmio read here which results in GuC reading correct GGTT page. */ - xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); + xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG); /* Each GT in a tile has its own TLB to cache GGTT lookups */ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt); @@ -609,7 +613,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end) { int err; - u64 alignment = XE_PAGE_SIZE; + u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE; if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) alignment = SZ_64K; diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 6fbea70d3d36..1eb791ddc375 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -34,6 +34,7 @@ #include "instructions/xe_gsc_commands.h" #include "regs/xe_gsc_regs.h" #include "regs/xe_gt_regs.h" +#include "regs/xe_irq_regs.h" static struct xe_gt * gsc_to_gt(struct xe_gsc *gsc) @@ -179,7 +180,7 @@ out_bo: static int gsc_fw_is_loaded(struct xe_gt *gt) { - return xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) & + return xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) & HECI1_FWSTS1_INIT_COMPLETE; } @@ -190,7 +191,7 @@ static int gsc_fw_wait(struct xe_gt *gt) * executed by the GSCCS. To account for possible submission delays or * other issues, we use a 500ms timeout in the wait here. */ - return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE), + return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE), HECI1_FWSTS1_INIT_COMPLETE, HECI1_FWSTS1_INIT_COMPLETE, 500 * USEC_PER_MSEC, NULL, false); @@ -260,19 +261,17 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_tile *tile = gt_to_tile(gt); + unsigned int fw_ref; int ret; if (XE_WA(tile->primary_gt, 14018094691)) { - ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); + fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); /* * If the forcewake fails we want to keep going, because the worst * case outcome in failing to apply the WA is that PXP won't work, - * which is not fatal. We still throw a warning so the issue is - * seen if it happens. + * which is not fatal. Forcewake get warns implicitly in case of failure */ - xe_gt_WARN_ON(tile->primary_gt, ret); - xe_gt_mcr_multicast_write(tile->primary_gt, EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK, EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT); @@ -281,7 +280,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) ret = gsc_upload(gsc); if (XE_WA(tile->primary_gt, 14018094691)) - xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref); if (ret) return ret; @@ -330,7 +329,7 @@ static int gsc_er_complete(struct xe_gt *gt) * so in that scenario we're always guaranteed to find the correct * value. */ - er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE; + er_status = xe_mmio_read32(>->mmio, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE; if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) { /* @@ -351,6 +350,7 @@ static void gsc_work(struct work_struct *work) struct xe_gsc *gsc = container_of(work, typeof(*gsc), work); struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; u32 actions; int ret; @@ -360,7 +360,7 @@ static void gsc_work(struct work_struct *work) spin_unlock_irq(&gsc->lock); xe_pm_runtime_get(xe); - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); if (actions & GSC_ACTION_ER_COMPLETE) { ret = gsc_er_complete(gt); @@ -380,7 +380,7 @@ static void gsc_work(struct work_struct *work) xe_gsc_proxy_request_handler(gsc); out: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); } @@ -581,11 +581,11 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep) if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt)) return; - xe_mmio_rmw32(gt, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set); + xe_mmio_rmw32(>->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set); if (prep) { /* make sure the reset bit is clear when writing the CSR reg */ - xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), + xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), HECI_H_CSR_RST, HECI_H_CSR_IG); msleep(200); } @@ -599,7 +599,8 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep) void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) { struct xe_gt *gt = gsc_to_gt(gsc); - int err; + struct xe_mmio *mmio = >->mmio; + unsigned int fw_ref; xe_uc_fw_print(&gsc->fw, p); @@ -608,17 +609,17 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) if (!xe_uc_fw_is_enabled(&gsc->fw)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) return; drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", - xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); - - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); + + xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c index 2d6ea8c01445..fc64b45d324b 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -65,7 +65,7 @@ gsc_to_gt(struct xe_gsc *gsc) bool xe_gsc_proxy_init_done(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); - u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); + u32 fwsts1 = xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) == HECI1_FWSTS1_PROXY_STATE_NORMAL; @@ -78,7 +78,7 @@ static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set) /* make sure we never accidentally write the RST bit */ clr |= HECI_H_CSR_RST; - xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); + xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); } static void gsc_proxy_irq_clear(struct xe_gsc *gsc) @@ -450,22 +450,21 @@ void xe_gsc_proxy_remove(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); - int err = 0; + unsigned int fw_ref = 0; if (!gsc->proxy.component_added) return; /* disable HECI2 IRQs */ xe_pm_runtime_get(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n"); /* try do disable irq even if forcewake failed */ gsc_proxy_irq_toggle(gsc, false); - if (!err) - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); xe_gsc_wait_for_worker_completion(gsc); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index d5fd6a089b7c..d6744be01a68 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -77,7 +77,8 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile) return ERR_PTR(-ENOMEM); gt->tile = tile; - gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0); + gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", + WQ_MEM_RECLAIM); err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); if (err) @@ -97,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt) static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; if (!xe_gt_is_media_type(gt)) { @@ -114,13 +115,13 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) } xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; @@ -128,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) if (xe_gt_is_media_type(gt)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg &= ~CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -244,7 +245,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) else if (entry->clr_bits + 1) val = (reg.mcr ? xe_gt_mcr_unicast_read_any(gt, reg_mcr) : - xe_mmio_read32(gt, reg)) & (~entry->clr_bits); + xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits); else val = 0; @@ -402,11 +403,14 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + err = -ETIMEDOUT; goto err_hw_fence_irq; + } if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); @@ -439,16 +443,14 @@ static int gt_fw_domain_init(struct xe_gt *gt) * Stash hardware-reported version. Since this register does not exist * on pre-MTL platforms, reading it there will (correctly) return 0. */ - gt->info.gmdid = xe_mmio_read32(gt, GMD_ID); - - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - XE_WARN_ON(err); + gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: dump_pat_on_error(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_hw_fence_irq: for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -458,11 +460,14 @@ err_hw_fence_irq: static int all_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_hw_fence_irq; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_force_wake; + } xe_gt_mcr_set_implicit_defaults(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); @@ -526,14 +531,12 @@ static int all_fw_domain_init(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt))) xe_gt_sriov_pf_init_hw(gt); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); -err_hw_fence_irq: + xe_force_wake_put(gt_to_fw(gt), fw_ref); for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -546,11 +549,12 @@ err_hw_fence_irq: */ int xe_gt_init_hwconfig(struct xe_gt *gt) { + unsigned int fw_ref; int err; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; xe_gt_mcr_init_early(gt); xe_pat_init(gt); @@ -568,8 +572,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt) xe_gt_enable_host_l2_vram(gt); out_fw: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -out: + xe_force_wake_put(gt_to_fw(gt), fw_ref); return err; } @@ -622,6 +625,30 @@ int xe_gt_init(struct xe_gt *gt) return 0; } +/** + * xe_gt_mmio_init() - Initialize GT's MMIO access + * @gt: the GT object + * + * Initialize GT's MMIO accessor, which will be used to access registers inside + * this GT. + */ +void xe_gt_mmio_init(struct xe_gt *gt) +{ + struct xe_tile *tile = gt_to_tile(gt); + + gt->mmio.regs = tile->mmio.regs; + gt->mmio.regs_size = tile->mmio.regs_size; + gt->mmio.tile = tile; + + if (gt->info.type == XE_GT_TYPE_MEDIA) { + gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; + gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; + } + + if (IS_SRIOV_VF(gt_to_xe(gt))) + gt->mmio.sriov_vf_gt = gt; +} + void xe_gt_record_user_engines(struct xe_gt *gt) { struct xe_hw_engine *hwe; @@ -649,8 +676,8 @@ static int do_gt_reset(struct xe_gt *gt) xe_gsc_wa_14015076503(gt, true); - xe_mmio_write32(gt, GDRST, GRDOM_FULL); - err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false); + xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL); + err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false); if (err) xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n", ERR_PTR(err)); @@ -740,6 +767,7 @@ static int do_gt_restart(struct xe_gt *gt) static int gt_reset(struct xe_gt *gt) { + unsigned int fw_ref; int err; if (xe_device_wedged(gt_to_xe(gt))) @@ -760,9 +788,11 @@ static int gt_reset(struct xe_gt *gt) xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_msg; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_out; + } xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); @@ -780,8 +810,7 @@ static int gt_reset(struct xe_gt *gt) if (err) goto err_out; - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); @@ -789,8 +818,7 @@ static int gt_reset(struct xe_gt *gt) return 0; err_out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -err_msg: + xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); @@ -810,7 +838,7 @@ static void gt_reset_worker(struct work_struct *w) void xe_gt_reset_async(struct xe_gt *gt) { - xe_gt_info(gt, "trying reset\n"); + xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0)); /* Don't do a reset while one is already in flight */ if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc)) @@ -822,22 +850,25 @@ void xe_gt_reset_async(struct xe_gt *gt) void xe_gt_suspend_prepare(struct xe_gt *gt) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_stop_prepare(>->uc); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } int xe_gt_suspend(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "suspending\n"); xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = xe_uc_suspend(>->uc); @@ -848,19 +879,29 @@ int xe_gt_suspend(struct xe_gt *gt) xe_gt_disable_host_l2_vram(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "suspended\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); return err; } +void xe_gt_shutdown(struct xe_gt *gt) +{ + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + do_gt_reset(gt); + xe_force_wake_put(gt_to_fw(gt), fw_ref); +} + /** * xe_gt_sanitize_freq() - Restore saved frequencies if necessary. * @gt: the GT object @@ -883,11 +924,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int xe_gt_resume(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "resuming\n"); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = do_gt_restart(gt); @@ -896,14 +938,15 @@ int xe_gt_resume(struct xe_gt *gt) xe_gt_idle_enable_pg(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "resumed\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index ee138e9768a2..82b9b7f82fca 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -31,6 +31,7 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile); int xe_gt_init_hwconfig(struct xe_gt *gt); int xe_gt_init_early(struct xe_gt *gt); int xe_gt_init(struct xe_gt *gt); +void xe_gt_mmio_init(struct xe_gt *gt); void xe_gt_declare_wedged(struct xe_gt *gt); int xe_gt_record_default_lrcs(struct xe_gt *gt); @@ -48,6 +49,7 @@ void xe_gt_record_user_engines(struct xe_gt *gt); void xe_gt_suspend_prepare(struct xe_gt *gt); int xe_gt_suspend(struct xe_gt *gt); +void xe_gt_shutdown(struct xe_gt *gt); int xe_gt_resume(struct xe_gt *gt); void xe_gt_reset_async(struct xe_gt *gt); void xe_gt_sanitize(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index ffcbd05671fc..b6adfb9f2030 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -74,7 +74,7 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) * platforms as these bits are unused there. */ mode |= CCS_MODE_CSLICE_0_3_MASK << 16; - xe_mmio_write32(gt, CCS_MODE, mode); + xe_mmio_write32(>->mmio, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", mode, config, num_engines, num_slices); diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c index 86c2d62b4bdc..cc2ae159298e 100644 --- a/drivers/gpu/drm/xe/xe_gt_clock.c +++ b/drivers/gpu/drm/xe/xe_gt_clock.c @@ -17,7 +17,7 @@ static u32 read_reference_ts_freq(struct xe_gt *gt) { - u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE); + u32 ts_override = xe_mmio_read32(>->mmio, TIMESTAMP_OVERRIDE); u32 base_freq, frac_freq; base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK, @@ -57,7 +57,7 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg) int xe_gt_clock_init(struct xe_gt *gt) { - u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE); + u32 ctc_reg = xe_mmio_read32(>->mmio, CTC_MODE); u32 freq = 0; /* Assuming gen11+ so assert this assumption is correct */ @@ -66,7 +66,7 @@ int xe_gt_clock_init(struct xe_gt *gt) if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) { freq = read_reference_ts_freq(gt); } else { - u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0); + u32 c0 = xe_mmio_read32(>->mmio, RPM_CONFIG0); freq = get_crystal_clock_freq(c0); diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index 8f95d3a5949b..3e8c351a0eab 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -15,6 +15,7 @@ #include "xe_ggtt.h" #include "xe_gt.h" #include "xe_gt_mcr.h" +#include "xe_gt_idle.h" #include "xe_gt_sriov_pf_debugfs.h" #include "xe_gt_sriov_vf_debugfs.h" #include "xe_gt_stats.h" @@ -89,26 +90,36 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p) struct xe_device *xe = gt_to_xe(gt); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; - int err; + unsigned int fw_ref; xe_pm_runtime_get(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { xe_pm_runtime_put(xe); - return err; + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; } for_each_hw_engine(hwe, gt, id) xe_hw_engine_print(hwe, p); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); - if (err) - return err; return 0; } +static int powergate_info(struct xe_gt *gt, struct drm_printer *p) +{ + int ret; + + xe_pm_runtime_get(gt_to_xe(gt)); + ret = xe_gt_idle_pg_print(gt, p); + xe_pm_runtime_put(gt_to_xe(gt)); + + return ret; +} + static int force_reset(struct xe_gt *gt, struct drm_printer *p) { xe_pm_runtime_get(gt_to_xe(gt)); @@ -288,6 +299,7 @@ static const struct drm_info_list debugfs_list[] = { {"topology", .show = xe_gt_debugfs_simple_show, .data = topology}, {"steering", .show = xe_gt_debugfs_simple_show, .data = steering}, {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt}, + {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info}, {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore}, {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds}, {"pat", .show = xe_gt_debugfs_simple_show, .data = pat}, diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index ab76973f3e1e..6bd39b2c5003 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -11,9 +11,9 @@ #include <drm/drm_managed.h> #include <drm/drm_print.h> -#include "xe_device_types.h" #include "xe_gt_sysfs.h" #include "xe_gt_throttle.h" +#include "xe_gt_types.h" #include "xe_guc_pc.h" #include "xe_pm.h" diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 67aba4140510..fd80afeef56a 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -98,7 +98,10 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency) void xe_gt_idle_enable_pg(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); - u32 pg_enable; + struct xe_gt_idle *gtidle = >->gtidle; + struct xe_mmio *mmio = >->mmio; + u32 vcs_mask, vecs_mask; + unsigned int fw_ref; int i, j; if (IS_SRIOV_VF(xe)) @@ -110,39 +113,136 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) xe_device_assert_mem_access(gt_to_xe(gt)); - pg_enable = RENDER_POWERGATE_ENABLE | MEDIA_POWERGATE_ENABLE; + vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE); + vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE); + + if (vcs_mask || vecs_mask) + gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE; + + if (!xe_gt_is_media_type(gt)) + gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE; for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) { if ((gt->info.engine_mask & BIT(i))) - pg_enable |= (VDN_HCP_POWERGATE_ENABLE(j) | - VDN_MFXVDENC_POWERGATE_ENABLE(j)); + gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) | + VDN_MFXVDENC_POWERGATE_ENABLE(j)); } - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (xe->info.skip_guc_pc) { /* * GuC sets the hysteresis value when GuC PC is enabled * else set it to 25 (25 * 1.28us) */ - xe_mmio_write32(gt, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25); - xe_mmio_write32(gt, RENDER_POWERGATE_IDLE_HYSTERESIS, 25); + xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25); + xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25); } - xe_mmio_write32(gt, POWERGATE_ENABLE, pg_enable); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } void xe_gt_idle_disable_pg(struct xe_gt *gt) { + struct xe_gt_idle *gtidle = >->gtidle; + unsigned int fw_ref; + if (IS_SRIOV_VF(gt_to_xe(gt))) return; xe_device_assert_mem_access(gt_to_xe(gt)); - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + gtidle->powergate_enable = 0; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + xe_mmio_write32(>->mmio, POWERGATE_ENABLE, gtidle->powergate_enable); + xe_force_wake_put(gt_to_fw(gt), fw_ref); +} + +/** + * xe_gt_idle_pg_print - Xe powergating info + * @gt: GT object + * @p: drm_printer. + * + * This function prints the powergating information + * + * Return: 0 on success, negative error code otherwise + */ +int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p) +{ + struct xe_gt_idle *gtidle = >->gtidle; + struct xe_device *xe = gt_to_xe(gt); + enum xe_gt_idle_state state; + u32 pg_enabled, pg_status = 0; + u32 vcs_mask, vecs_mask; + unsigned int fw_ref; + int n; + /* + * Media Slices + * + * Slice 0: VCS0, VCS1, VECS0 + * Slice 1: VCS2, VCS3, VECS1 + * Slice 2: VCS4, VCS5, VECS2 + * Slice 3: VCS6, VCS7, VECS3 + */ + static const struct { + u64 engines; + u32 status_bit; + } media_slices[] = { + {(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) | + BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS}, + + {(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) | + BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS}, - xe_mmio_write32(gt, POWERGATE_ENABLE, 0); + {(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) | + BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS}, - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + {(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) | + BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS}, + }; + + if (xe->info.platform == XE_PVC) { + drm_printf(p, "Power Gating not supported\n"); + return 0; + } + + state = gtidle->idle_status(gtidle_to_pc(gtidle)); + pg_enabled = gtidle->powergate_enable; + + /* Do not wake the GT to read powergating status */ + if (state != GT_IDLE_C6) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; + + pg_enabled = xe_mmio_read32(>->mmio, POWERGATE_ENABLE); + pg_status = xe_mmio_read32(>->mmio, POWERGATE_DOMAIN_STATUS); + + xe_force_wake_put(gt_to_fw(gt), fw_ref); + } + + if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) { + drm_printf(p, "Render Power Gating Enabled: %s\n", + str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE)); + + drm_printf(p, "Render Power Gate Status: %s\n", + str_up_down(pg_status & RENDER_AWAKE_STATUS)); + } + + vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE); + vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE); + + /* Print media CPG status only if media is present */ + if (vcs_mask || vecs_mask) { + drm_printf(p, "Media Power Gating Enabled: %s\n", + str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE)); + + for (n = 0; n < ARRAY_SIZE(media_slices); n++) + if (gt->info.engine_mask & media_slices[n].engines) + drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n, + str_up_down(pg_status & media_slices[n].status_bit)); + } + return 0; } static ssize_t name_show(struct device *dev, @@ -201,13 +301,14 @@ static void gt_idle_fini(void *arg) { struct kobject *kobj = arg; struct xe_gt *gt = kobj_to_gt(kobj->parent); + unsigned int fw_ref; xe_gt_idle_disable_pg(gt); if (gt_to_xe(gt)->info.skip_guc_pc) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); xe_gt_idle_disable_c6(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } sysfs_remove_files(kobj, gt_idle_attrs); @@ -260,9 +361,9 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt) return; /* Units of 1280 ns for a total of 5s */ - xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA); + xe_mmio_write32(>->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA); /* Enable RC6 */ - xe_mmio_write32(gt, RC_CONTROL, + xe_mmio_write32(>->mmio, RC_CONTROL, RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE); } @@ -274,6 +375,6 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt) if (IS_SRIOV_VF(gt_to_xe(gt))) return; - xe_mmio_write32(gt, RC_CONTROL, 0); - xe_mmio_write32(gt, RC_STATE, 0); + xe_mmio_write32(>->mmio, RC_CONTROL, 0); + xe_mmio_write32(>->mmio, RC_STATE, 0); } diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h index 554447b5d46d..4455a6501cb0 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.h +++ b/drivers/gpu/drm/xe/xe_gt_idle.h @@ -8,6 +8,7 @@ #include "xe_gt_idle_types.h" +struct drm_printer; struct xe_gt; int xe_gt_idle_init(struct xe_gt_idle *gtidle); @@ -15,5 +16,6 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt); void xe_gt_idle_disable_c6(struct xe_gt *gt); void xe_gt_idle_enable_pg(struct xe_gt *gt); void xe_gt_idle_disable_pg(struct xe_gt *gt); +int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p); #endif /* _XE_GT_IDLE_H_ */ diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h index f99b447534f3..b8b297a3f884 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle_types.h +++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h @@ -23,6 +23,8 @@ enum xe_gt_idle_state { struct xe_gt_idle { /** @name: name */ char name[16]; + /** @powergate_enable: copy of powergate enable bits */ + u32 powergate_enable; /** @residency_multiplier: residency multiplier in ns */ u32 residency_multiplier; /** @cur_residency: raw driver copy of idle residency */ diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index c834f64b0178..5013d674e17d 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -237,13 +237,26 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = { {}, }; +static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = { + { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */ + { 0x384900, 0x384AFF }, /* GAM */ + { 0x389560, 0x3895FF }, /* MEDIAINF */ + { 0x38B600, 0x38B8FF }, /* L3BANK */ + { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */ + { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */ + { 0x393C00, 0x393C7F }, /* MEDIAINF */ + {}, +}; + static void init_steering_l3bank(struct xe_gt *gt) { + struct xe_mmio *mmio = >->mmio; + if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(mmio, MIRROR_FUSE3)); u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK, - xe_mmio_read32(gt, XEHP_FUSE4)); + xe_mmio_read32(mmio, XEHP_FUSE4)); /* * Group selects mslice, instance selects bank within mslice. @@ -254,7 +267,7 @@ static void init_steering_l3bank(struct xe_gt *gt) bank_mask & BIT(0) ? 0 : 2; } else if (gt_to_xe(gt)->info.platform == XE_DG2) { u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(mmio, MIRROR_FUSE3)); u32 bank = __ffs(mslice_mask) * 8; /* @@ -266,7 +279,7 @@ static void init_steering_l3bank(struct xe_gt *gt) gt->steering[L3BANK].instance_target = bank & 0x3; } else { u32 fuse = REG_FIELD_GET(L3BANK_MASK, - ~xe_mmio_read32(gt, MIRROR_FUSE3)); + ~xe_mmio_read32(mmio, MIRROR_FUSE3)); gt->steering[L3BANK].group_target = 0; /* unused */ gt->steering[L3BANK].instance_target = __ffs(fuse); @@ -276,7 +289,7 @@ static void init_steering_l3bank(struct xe_gt *gt) static void init_steering_mslice(struct xe_gt *gt) { u32 mask = REG_FIELD_GET(MEML3_EN_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(>->mmio, MIRROR_FUSE3)); /* * mslice registers are valid (not terminated) if either the meml3 @@ -352,6 +365,19 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, *instance = dss % gt->steering_dss_per_grp; } +/** + * xe_gt_mcr_steering_info_to_dss_id - Get DSS ID from group/instance steering + * @gt: GT structure + * @group: steering group ID + * @instance: steering instance ID + * + * Return: the coverted DSS id. + */ +u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance) +{ + return group * dss_per_group(gt) + instance; +} + static void init_steering_dss(struct xe_gt *gt) { gt->steering_dss_per_grp = dss_per_group(gt); @@ -380,7 +406,7 @@ static void init_steering_oaddrm(struct xe_gt *gt) static void init_steering_sqidi_psmi(struct xe_gt *gt) { u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(>->mmio, MIRROR_FUSE3)); u32 select = __ffs(mask); gt->steering[SQIDI_PSMI].group_target = select >> 1; @@ -439,7 +465,10 @@ void xe_gt_mcr_init(struct xe_gt *gt) if (gt->info.type == XE_GT_TYPE_MEDIA) { drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13); - if (MEDIA_VERx100(xe) >= 1301) { + if (MEDIA_VER(xe) >= 30) { + gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; + gt->steering[INSTANCE0].ranges = xe3lpm_instance0_steering_table; + } else if (MEDIA_VERx100(xe) >= 1301) { gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table; } else { @@ -494,8 +523,8 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt) u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) | REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2); - xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val); - xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val); + xe_mmio_write32(>->mmio, MCFG_MCR_SELECTOR, steer_val); + xe_mmio_write32(>->mmio, SF_MCR_SELECTOR, steer_val); /* * For GAM registers, all reads should be directed to instance 1 * (unicast reads against other instances are not allowed), @@ -533,7 +562,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, continue; for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) { - if (xe_mmio_in_range(gt, >->steering[type].ranges[i], reg)) { + if (xe_mmio_in_range(>->mmio, >->steering[type].ranges[i], reg)) { *group = gt->steering[type].group_target; *instance = gt->steering[type].instance_target; return true; @@ -544,7 +573,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges; if (implicit_ranges) for (int i = 0; implicit_ranges[i].end > 0; i++) - if (xe_mmio_in_range(gt, &implicit_ranges[i], reg)) + if (xe_mmio_in_range(>->mmio, &implicit_ranges[i], reg)) return false; /* @@ -579,7 +608,7 @@ static void mcr_lock(struct xe_gt *gt) __acquires(>->mcr_lock) * when a read to the relevant register returns 1. */ if (GRAPHICS_VERx100(xe) >= 1270) - ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL, + ret = xe_mmio_wait32(>->mmio, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL, true); drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT); @@ -589,7 +618,7 @@ static void mcr_unlock(struct xe_gt *gt) __releases(>->mcr_lock) { /* Release hardware semaphore - this is done by writing 1 to the register */ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) - xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1); + xe_mmio_write32(>->mmio, STEER_SEMAPHORE, 0x1); spin_unlock(>->mcr_lock); } @@ -603,6 +632,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, u8 rw_flag, int group, int instance, u32 value) { const struct xe_reg reg = to_xe_reg(reg_mcr); + struct xe_mmio *mmio = >->mmio; struct xe_reg steer_reg; u32 steer_val, val = 0; @@ -635,12 +665,12 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, if (rw_flag == MCR_OP_READ) steer_val |= MCR_MULTICAST; - xe_mmio_write32(gt, steer_reg, steer_val); + xe_mmio_write32(mmio, steer_reg, steer_val); if (rw_flag == MCR_OP_READ) - val = xe_mmio_read32(gt, reg); + val = xe_mmio_read32(mmio, reg); else - xe_mmio_write32(gt, reg, value); + xe_mmio_write32(mmio, reg, value); /* * If we turned off the multicast bit (during a write) we're required @@ -649,7 +679,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, * operation. */ if (rw_flag == MCR_OP_WRITE) - xe_mmio_write32(gt, steer_reg, MCR_MULTICAST); + xe_mmio_write32(mmio, steer_reg, MCR_MULTICAST); return val; } @@ -684,7 +714,7 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr) group, instance, 0); mcr_unlock(gt); } else { - val = xe_mmio_read32(gt, reg); + val = xe_mmio_read32(>->mmio, reg); } return val; @@ -757,7 +787,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, * to touch the steering register. */ mcr_lock(gt); - xe_mmio_write32(gt, reg, value); + xe_mmio_write32(>->mmio, reg, value); mcr_unlock(gt); } diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h index 8d119a0d5493..c0cd36021c24 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.h +++ b/drivers/gpu/drm/xe/xe_gt_mcr.h @@ -28,6 +28,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg, void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p); void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance); +u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance); /* * Loop over each DSS and determine the group and instance IDs that diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h index d6228baaff1e..5dc71394372d 100644 --- a/drivers/gpu/drm/xe/xe_gt_printk.h +++ b/drivers/gpu/drm/xe/xe_gt_printk.h @@ -8,7 +8,7 @@ #include <drm/drm_print.h> -#include "xe_device_types.h" +#include "xe_gt_types.h" #define xe_gt_printk(_gt, _level, _fmt, ...) \ drm_##_level(>_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index 905f409db74b..e71fc3d2bda2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -5,12 +5,15 @@ #include <drm/drm_managed.h> +#include "regs/xe_guc_regs.h" #include "regs/xe_regs.h" +#include "xe_gt.h" #include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_service.h" #include "xe_mmio.h" @@ -72,7 +75,7 @@ static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe) static void pf_enable_ggtt_guest_update(struct xe_gt *gt) { - xe_mmio_write32(gt, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN); + xe_mmio_write32(>->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN); } /** @@ -87,6 +90,57 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) pf_enable_ggtt_guest_update(gt); xe_gt_sriov_pf_service_update(gt); + xe_gt_sriov_pf_migration_init(gt); +} + +static u32 pf_get_vf_regs_stride(struct xe_device *xe) +{ + return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000; +} + +static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride) +{ + struct xe_reg pf_reg = vf_reg; + + pf_reg.vf = 0; + pf_reg.addr += stride * vfid; + + return pf_reg; +} + +static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid) +{ + u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt)); + struct xe_reg scratch; + int n, count; + + if (xe_gt_is_media_type(gt)) { + count = MED_VF_SW_FLAG_COUNT; + for (n = 0; n < count; n++) { + scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride); + xe_mmio_write32(>->mmio, scratch, 0); + } + } else { + count = VF_SW_FLAG_COUNT; + for (n = 0; n < count; n++) { + scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride); + xe_mmio_write32(>->mmio, scratch, 0); + } + } +} + +/** + * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function can only be called on PF. + */ +void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + pf_clear_vf_scratch_regs(gt, vfid); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index f0cb726a6919..96fab779a906 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -11,6 +11,7 @@ struct xe_gt; #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_init_early(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); +void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_restart(struct xe_gt *gt); #else static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index afdb477ecf83..192643d63d22 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -34,6 +34,8 @@ #include "xe_ttm_vram_mgr.h" #include "xe_wopcm.h" +#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) + /* * Return: number of KLVs that were successfully parsed and saved, * negative error code on failure. @@ -229,14 +231,16 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i } /* Return: number of configuration dwords written */ -static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config) +static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) { u32 n = 0; if (xe_ggtt_node_allocated(config->ggtt_region)) { - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); - cfg[n++] = lower_32_bits(config->ggtt_region->base.start); - cfg[n++] = upper_32_bits(config->ggtt_region->base.start); + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); + cfg[n++] = lower_32_bits(config->ggtt_region->base.start); + cfg[n++] = upper_32_bits(config->ggtt_region->base.start); + } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); cfg[n++] = lower_32_bits(config->ggtt_region->base.size); @@ -247,20 +251,24 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config) } /* Return: number of configuration dwords written */ -static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config) +static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) { u32 n = 0; - n += encode_config_ggtt(cfg, config); + n += encode_config_ggtt(cfg, config, details); - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); - cfg[n++] = config->begin_ctx; + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); + cfg[n++] = config->begin_ctx; + } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS); cfg[n++] = config->num_ctxs; - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); - cfg[n++] = config->begin_db; + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); + cfg[n++] = config->begin_db; + } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS); cfg[n++] = config->num_dbs; @@ -301,7 +309,7 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) if (!cfg) return -ENOMEM; - num_dwords = encode_config(cfg, config); + num_dwords = encode_config(cfg, config, true); xe_gt_assert(gt, num_dwords <= max_cfg_dwords); if (xe_gt_is_media_type(gt)) { @@ -309,10 +317,10 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid); /* media-GT will never include a GGTT config */ - xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config)); + xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); /* the GGTT config must be taken from the primary-GT instead */ - num_dwords += encode_config_ggtt(cfg + num_dwords, other); + num_dwords += encode_config_ggtt(cfg + num_dwords, other, true); } xe_gt_assert(gt, num_dwords <= max_cfg_dwords); @@ -2044,7 +2052,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) valid_all = valid_all && valid_lmem; } - return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA; + return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA; } /** @@ -2071,6 +2079,174 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid) } /** + * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be PF) + * @buf: the buffer to save a config to (or NULL if query the buf size) + * @size: the size of the buffer (or 0 if query the buf size) + * + * This function can only be called on PF. + * + * Return: mininum size of the buffer or the number of bytes saved, + * or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size) +{ + struct xe_gt_sriov_config *config; + ssize_t ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + xe_gt_assert(gt, !(!buf ^ !size)); + + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + ret = pf_validate_vf_config(gt, vfid); + if (!size) { + ret = ret ? 0 : SZ_4K; + } else if (!ret) { + if (size < SZ_4K) { + ret = -ENOBUFS; + } else { + config = pf_pick_vf_config(gt, vfid); + ret = encode_config(buf, config, false) * sizeof(u32); + } + } + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + return ret; +} + +static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid, + u32 key, u32 len, const u32 *value) +{ + switch (key) { + case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY: + if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN) + return -EBADMSG; + return pf_provision_vf_ctxs(gt, vfid, value[0]); + + case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY: + if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN) + return -EBADMSG; + return pf_provision_vf_dbs(gt, vfid, value[0]); + + case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY: + if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN) + return -EBADMSG; + return pf_provision_exec_quantum(gt, vfid, value[0]); + + case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY: + if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN) + return -EBADMSG; + return pf_provision_preempt_timeout(gt, vfid, value[0]); + + /* auto-generate case statements */ +#define define_threshold_key_to_provision_case(TAG, ...) \ + case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \ + BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \ + if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \ + return -EBADMSG; \ + return pf_provision_threshold(gt, vfid, \ + MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \ + value[0]); + + MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case) +#undef define_threshold_key_to_provision_case + } + + if (xe_gt_is_media_type(gt)) + return -EKEYREJECTED; + + switch (key) { + case GUC_KLV_VF_CFG_GGTT_SIZE_KEY: + if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN) + return -EBADMSG; + return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0])); + + case GUC_KLV_VF_CFG_LMEM_SIZE_KEY: + if (!IS_DGFX(gt_to_xe(gt))) + return -EKEYREJECTED; + if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN) + return -EBADMSG; + return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0])); + } + + return -EKEYREJECTED; +} + +static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid, + const u32 *klvs, size_t num_dwords) +{ + int err; + + while (num_dwords >= GUC_KLV_LEN_MIN) { + u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]); + u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]); + + klvs += GUC_KLV_LEN_MIN; + num_dwords -= GUC_KLV_LEN_MIN; + + if (num_dwords < len) + err = -EBADMSG; + else + err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs); + + if (err) { + xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err)); + return err; + } + + klvs += len; + num_dwords -= len; + } + + return pf_validate_vf_config(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be PF) + * @buf: the buffer with config data + * @size: the size of the config data + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) +{ + int err; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + + if (!size) + return -ENODATA; + + if (size % sizeof(u32)) + return -EINVAL; + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { + struct drm_printer p = xe_gt_info_printer(gt); + + drm_printf(&p, "restoring VF%u config:\n", vfid); + xe_guc_klv_print(buf, size / sizeof(u32), &p); + } + + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_send_vf_cfg_reset(gt, vfid); + if (!err) { + pf_release_vf_config(gt, vfid); + err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32)); + } + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + return err; +} + +/** * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset. * @gt: the &xe_gt * @@ -2203,6 +2379,41 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) } /** + * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Print LMEM allocations across all VFs. + * VFs without LMEM allocation are skipped. + * + * This function can only be called on PF. + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p) +{ + unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); + const struct xe_gt_sriov_config *config; + char buf[10]; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 1; n <= total_vfs; n++) { + config = >->sriov.pf.vfs[n].config; + if (!config->lmem_obj) + continue; + + string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2, + buf, sizeof(buf)); + drm_printf(p, "VF%u:\t%zu\t(%s)\n", + n, config->lmem_obj->size, buf); + } + + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return 0; +} + +/** * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. * @gt: the &xe_gt * @p: the &drm_printer diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h index 42e64769f666..0c55aa40a1a7 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h @@ -54,6 +54,10 @@ int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long tim int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force); int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh); +ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size); +int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size); + bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_config_restart(struct xe_gt *gt); @@ -61,6 +65,7 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt); int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p); +int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 02f7328bd6ce..1f50aec3a059 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -9,9 +9,11 @@ #include "xe_device.h" #include "xe_gt.h" +#include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_sriov_pf_service.h" #include "xe_gt_sriov_printk.h" @@ -176,6 +178,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(PAUSE_SEND_PAUSE); CASE2STR(PAUSE_WAIT_GUC); CASE2STR(PAUSE_GUC_DONE); + CASE2STR(PAUSE_SAVE_GUC); CASE2STR(PAUSE_FAILED); CASE2STR(PAUSED); CASE2STR(RESUME_WIP); @@ -415,6 +418,10 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) * : | : / * : v : / * : PAUSE_GUC_DONE o-----restart + * : | : + * : | o---<--busy : + * : v / / : + * : PAUSE_SAVE_GUC : * : / : * : / : * :....o..............o...............o...........: @@ -434,6 +441,7 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid) pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC); } } @@ -464,12 +472,41 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid) pf_enter_vf_pause_failed(gt, vfid); } +static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) + pf_enter_vf_state_machine_bug(gt, vfid); +} + +static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) + return false; + + err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid); + if (err) { + /* retry if busy */ + if (err == -EBUSY) { + pf_enter_vf_pause_save_guc(gt, vfid); + return true; + } + /* give up on error */ + if (err == -EIO) + pf_enter_vf_mismatch(gt, vfid); + } + + pf_enter_vf_pause_completed(gt, vfid); + return true; +} + static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid) { if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE)) return false; - pf_enter_vf_pause_completed(gt, vfid); + pf_enter_vf_pause_save_guc(gt, vfid); return true; } @@ -1008,7 +1045,7 @@ static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid) if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO)) return false; - /* XXX: placeholder */ + xe_gt_sriov_pf_sanitize_hw(gt, vfid); pf_enter_vf_flr_send_finish(gt, vfid); return true; @@ -1338,6 +1375,9 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) if (pf_exit_vf_pause_guc_done(gt, vfid)) return true; + if (pf_exit_vf_pause_save_guc(gt, vfid)) + return true; + if (pf_exit_vf_resume_send_resume(gt, vfid)) return true; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index 11830aafea45..f02f941b4ad2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -27,6 +27,7 @@ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command. * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC. * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC. + * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state. * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress. @@ -56,6 +57,7 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE, + XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC, XE_GT_SRIOV_STATE_PAUSE_FAILED, XE_GT_SRIOV_STATE_PAUSED, diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 2290ddaf9594..05df4ab3514b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -17,6 +17,7 @@ #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_debugfs.h" #include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_sriov_pf_policy.h" #include "xe_gt_sriov_pf_service.h" @@ -81,6 +82,11 @@ static const struct drm_info_list pf_info[] = { .data = xe_gt_sriov_pf_config_print_dbs, }, { + "lmem_provisioned", + .show = xe_gt_debugfs_simple_show, + .data = xe_gt_sriov_pf_config_print_lmem, + }, + { "runtime_registers", .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_pf_service_print_runtime, @@ -312,6 +318,9 @@ static const struct { { "stop", xe_gt_sriov_pf_control_stop_vf }, { "pause", xe_gt_sriov_pf_control_pause_vf }, { "resume", xe_gt_sriov_pf_control_resume_vf }, +#ifdef CONFIG_DRM_XE_DEBUG_SRIOV + { "restore!", xe_gt_sriov_pf_migration_restore_guc_state }, +#endif }; static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) @@ -375,6 +384,119 @@ static const struct file_operations control_ops = { .llseek = default_llseek, }; +/* + * /sys/kernel/debug/dri/0/ + * ├── gt0 + * │ ├── vf1 + * │ │ ├── guc_state + */ +static ssize_t guc_state_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + + return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos); +} + +static ssize_t guc_state_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + + if (*pos) + return -EINVAL; + + return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count); +} + +static const struct file_operations guc_state_ops = { + .owner = THIS_MODULE, + .read = guc_state_read, + .write = guc_state_write, + .llseek = default_llseek, +}; + +/* + * /sys/kernel/debug/dri/0/ + * ├── gt0 + * │ ├── vf1 + * │ │ ├── config_blob + */ +static ssize_t config_blob_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + ssize_t ret; + void *tmp; + + ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0); + if (!ret) + return -ENODATA; + if (ret < 0) + return ret; + + tmp = kzalloc(ret, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret); + if (ret > 0) + ret = simple_read_from_buffer(buf, count, pos, tmp, ret); + + kfree(tmp); + return ret; +} + +static ssize_t config_blob_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + ssize_t ret; + void *tmp; + + if (*pos) + return -EINVAL; + + if (!count) + return -ENODATA; + + if (count > SZ_4K) + return -EINVAL; + + tmp = kzalloc(count, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + if (copy_from_user(tmp, buf, count)) { + ret = -EFAULT; + } else { + ret = xe_gt_sriov_pf_config_restore(gt, vfid, tmp, count); + if (!ret) + ret = count; + } + kfree(tmp); + return ret; +} + +static const struct file_operations config_blob_ops = { + .owner = THIS_MODULE, + .read = config_blob_read, + .write = config_blob_write, + .llseek = default_llseek, +}; + /** * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs. * @gt: the &xe_gt to register @@ -423,5 +545,15 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) pf_add_config_attrs(gt, vfdentry, VFID(n)); debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops); + + /* for testing/debugging purposes only! */ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + debugfs_create_file("guc_state", + IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, + vfdentry, NULL, &guc_state_ops); + debugfs_create_file("config_blob", + IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, + vfdentry, NULL, &config_blob_ops); + } } } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c new file mode 100644 index 000000000000..c712111aa30d --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "abi/guc_actions_sriov_abi.h" +#include "xe_bo.h" +#include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" +#include "xe_gt_sriov_printk.h" +#include "xe_guc.h" +#include "xe_guc_ct.h" +#include "xe_sriov.h" + +/* Return: number of dwords saved/restored/required or a negative error code on failure */ +static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, + u64 addr, u32 ndwords) +{ + u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) | + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO, lower_32_bits(addr)), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI, upper_32_bits(addr)), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE, ndwords), + }; + + return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); +} + +/* Return: size of the state in dwords or a negative error code on failure */ +static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + ret = guc_action_vf_save_restore(>->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0); + return ret ?: -ENODATA; +} + +/* Return: number of state dwords saved or a negative error code on failure */ +static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, + void *buff, size_t size) +{ + const int ndwords = size / sizeof(u32); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = tile_to_xe(tile); + struct xe_guc *guc = >->uc.guc; + struct xe_bo *bo; + int ret; + + xe_gt_assert(gt, size % sizeof(u32) == 0); + xe_gt_assert(gt, size == ndwords * sizeof(u32)); + + bo = xe_bo_create_pin_map(xe, tile, NULL, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE, + xe_bo_ggtt_addr(bo), ndwords); + if (!ret) + ret = -ENODATA; + else if (ret > ndwords) + ret = -EPROTO; + else if (ret > 0) + xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32)); + + xe_bo_unpin_map_no_vm(bo); + return ret; +} + +/* Return: number of state dwords restored or a negative error code on failure */ +static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, + const void *buff, size_t size) +{ + const int ndwords = size / sizeof(u32); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = tile_to_xe(tile); + struct xe_guc *guc = >->uc.guc; + struct xe_bo *bo; + int ret; + + xe_gt_assert(gt, size % sizeof(u32) == 0); + xe_gt_assert(gt, size == ndwords * sizeof(u32)); + + bo = xe_bo_create_pin_map(xe, tile, NULL, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size); + + ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE, + xe_bo_ggtt_addr(bo), ndwords); + if (!ret) + ret = -ENODATA; + else if (ret > ndwords) + ret = -EPROTO; + + xe_bo_unpin_map_no_vm(bo); + return ret; +} + +static bool pf_migration_supported(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + return gt->sriov.pf.migration.supported; +} + +static struct mutex *pf_migration_mutex(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + return >->sriov.pf.migration.snapshot_lock; +} + +static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt, + unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + lockdep_assert_held(pf_migration_mutex(gt)); + + return >->sriov.pf.vfs[vfid].snapshot; +} + +static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +{ + return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs; +} + +static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +{ + struct xe_device *xe = gt_to_xe(gt); + + drmm_kfree(&xe->drm, snapshot->guc.buff); + snapshot->guc.buff = NULL; + snapshot->guc.size = 0; +} + +static int pf_alloc_guc_state(struct xe_gt *gt, + struct xe_gt_sriov_state_snapshot *snapshot, + size_t size) +{ + struct xe_device *xe = gt_to_xe(gt); + void *p; + + pf_free_guc_state(gt, snapshot); + + if (!size) + return -ENODATA; + + if (size % sizeof(u32)) + return -EINVAL; + + if (size > SZ_2M) + return -EFBIG; + + p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL); + if (!p) + return -ENOMEM; + + snapshot->guc.buff = p; + snapshot->guc.size = size; + return 0; +} + +static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +{ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { + unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot); + + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n", + vfid, snapshot->guc.size / sizeof(u32)); + print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET, + snapshot->guc.buff, min(SZ_64, snapshot->guc.size)); + } +} + +static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + size_t size; + int ret; + + ret = pf_send_guc_query_vf_state_size(gt, vfid); + if (ret < 0) + goto fail; + size = ret * sizeof(u32); + xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size); + + ret = pf_alloc_guc_state(gt, snapshot, size); + if (ret < 0) + goto fail; + + ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size); + if (ret < 0) + goto fail; + size = ret * sizeof(u32); + xe_gt_assert(gt, size); + xe_gt_assert(gt, size <= snapshot->guc.size); + snapshot->guc.size = size; + + pf_dump_guc_state(gt, snapshot); + return 0; + +fail: + xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret)); + pf_free_guc_state(gt, snapshot); + return ret; +} + +/** + * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + err = pf_save_vf_guc_state(gt, vfid); + mutex_unlock(pf_migration_mutex(gt)); + + return err; +} + +static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + int ret; + + if (!snapshot->guc.size) + return -ENODATA; + + xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n", + snapshot->guc.size / sizeof(u32), vfid); + ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size); + if (ret < 0) + goto fail; + + xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid); + return 0; + +fail: + xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +/** + * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + ret = pf_restore_vf_guc_state(gt, vfid); + mutex_unlock(pf_migration_mutex(gt)); + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +/** + * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @buf: the user space buffer to read to + * @count: the maximum number of bytes to read + * @pos: the current position in the buffer + * + * This function is for PF only. + * + * This function reads up to @count bytes from the saved VF GuC state buffer + * at offset @pos into the user space address starting at @buf. + * + * Return: the number of bytes read or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, + char __user *buf, size_t count, loff_t *pos) +{ + struct xe_gt_sriov_state_snapshot *snapshot; + ssize_t ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + snapshot = pf_pick_vf_snapshot(gt, vfid); + if (snapshot->guc.size) + ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff, + snapshot->guc.size); + else + ret = -ENODATA; + mutex_unlock(pf_migration_mutex(gt)); + + return ret; +} + +/** + * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @buf: the user space buffer with GuC VF state + * @size: the size of GuC VF state (in bytes) + * + * This function is for PF only. + * + * This function reads @size bytes of the VF GuC state stored at user space + * address @buf and writes it into a internal VF state buffer. + * + * Return: the number of bytes used or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, + const char __user *buf, size_t size) +{ + struct xe_gt_sriov_state_snapshot *snapshot; + loff_t pos = 0; + ssize_t ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + snapshot = pf_pick_vf_snapshot(gt, vfid); + ret = pf_alloc_guc_state(gt, snapshot, size); + if (!ret) { + ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size); + if (ret < 0) + pf_free_guc_state(gt, snapshot); + else + pf_dump_guc_state(gt, snapshot); + } + mutex_unlock(pf_migration_mutex(gt)); + + return ret; +} +#endif /* CONFIG_DEBUG_FS */ + +static bool pf_check_migration_support(struct xe_gt *gt) +{ + /* GuC 70.25 with save/restore v2 is required */ + xe_gt_assert(gt, GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 25, 0)); + + /* XXX: for now this is for feature enabling only */ + return IS_ENABLED(CONFIG_DRM_XE_DEBUG); +} + +/** + * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration. + * @gt: the &xe_gt + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + int err; + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + + gt->sriov.pf.migration.supported = pf_check_migration_support(gt); + + if (!pf_migration_supported(gt)) + return 0; + + err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock); + if (err) + return err; + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h new file mode 100644 index 000000000000..09faeae00ddb --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GT_SRIOV_PF_MIGRATION_H_ +#define _XE_GT_SRIOV_PF_MIGRATION_H_ + +#include <linux/types.h> + +struct xe_gt; + +int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); +int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); + +#ifdef CONFIG_DEBUG_FS +ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, + char __user *buf, size_t count, loff_t *pos); +ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, + const char __user *buf, size_t count); +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h new file mode 100644 index 000000000000..1f3110b6d44f --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ +#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ + +#include <linux/mutex.h> +#include <linux/types.h> + +/** + * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data. + * + * Used by the PF driver to maintain per-VF migration data. + */ +struct xe_gt_sriov_state_snapshot { + /** @guc: GuC VF state snapshot */ + struct { + /** @guc.buff: buffer with the VF state */ + u32 *buff; + /** @guc.size: size of the buffer (must be dwords aligned) */ + u32 size; + } guc; +}; + +/** + * struct xe_gt_sriov_pf_migration - GT-level data. + * + * Used by the PF driver to maintain non-VF specific per-GT data. + */ +struct xe_gt_sriov_pf_migration { + /** @supported: indicates whether the feature is supported */ + bool supported; + + /** @snapshot_lock: protects all VFs snapshots */ + struct mutex snapshot_lock; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c index 0e23b7ea4f3e..924e75b94aec 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c @@ -237,7 +237,7 @@ static void read_many(struct xe_gt *gt, unsigned int count, const struct xe_reg *regs, u32 *values) { while (count--) - *values++ = xe_mmio_read32(gt, *regs++); + *values++ = xe_mmio_read32(>->mmio, *regs++); } static void pf_prepare_runtime_info(struct xe_gt *gt) @@ -402,7 +402,7 @@ static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit, for (i = 0; i < count; ++i, ++data) { addr = runtime->regs[start + i].addr; - data->offset = xe_mmio_adjusted_addr(gt, addr); + data->offset = xe_mmio_adjusted_addr(>->mmio, addr); data->value = runtime->values[start + i]; } @@ -513,7 +513,7 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p for (; size--; regs++, values++) { drm_printf(p, "reg[%#x] = %#x\n", - xe_mmio_adjusted_addr(gt, regs->addr), *values); + xe_mmio_adjusted_addr(>->mmio, regs->addr), *values); } return 0; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index 28e1b130bf87..0426b1a77069 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -10,6 +10,7 @@ #include "xe_gt_sriov_pf_config_types.h" #include "xe_gt_sriov_pf_control_types.h" +#include "xe_gt_sriov_pf_migration_types.h" #include "xe_gt_sriov_pf_monitor_types.h" #include "xe_gt_sriov_pf_policy_types.h" #include "xe_gt_sriov_pf_service_types.h" @@ -29,6 +30,9 @@ struct xe_gt_sriov_metadata { /** @version: negotiated VF/PF ABI version */ struct xe_gt_sriov_pf_service_version version; + + /** @snapshot: snapshot of the VF state data */ + struct xe_gt_sriov_state_snapshot snapshot; }; /** @@ -36,6 +40,7 @@ struct xe_gt_sriov_metadata { * @service: service data. * @control: control data. * @policy: policy data. + * @migration: migration data. * @spare: PF-only provisioning configuration. * @vfs: metadata for all VFs. */ @@ -43,6 +48,7 @@ struct xe_gt_sriov_pf { struct xe_gt_sriov_pf_service service; struct xe_gt_sriov_pf_control control; struct xe_gt_sriov_pf_policy policy; + struct xe_gt_sriov_pf_migration migration; struct xe_gt_sriov_spare_config spare; struct xe_gt_sriov_metadata *vfs; }; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 4ebc82e607af..d3baba50f085 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -881,7 +881,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr) */ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) { - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); struct vf_runtime_reg *rr; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); @@ -917,7 +917,7 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) */ void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) { - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); xe_gt_assert(gt, !reg.vf); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c index f3ddcbefc6bc..2ed5b6780d30 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c @@ -33,7 +33,7 @@ static const struct drm_info_list vf_info[] = { .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_vf_print_version, }, -#if defined(CONFIG_DRM_XE_DEBUG) || defined(CONFIG_DRM_XE_DEBUG_SRIOV) +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) || IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) { "runtime_regs", .show = xe_gt_debugfs_simple_show, diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c index 25963e33a383..03b225364101 100644 --- a/drivers/gpu/drm/xe/xe_gt_throttle.c +++ b/drivers/gpu/drm/xe/xe_gt_throttle.c @@ -41,9 +41,9 @@ u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt) xe_pm_runtime_get(gt_to_xe(gt)); if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS); + reg = xe_mmio_read32(>->mmio, MTL_MEDIA_PERF_LIMIT_REASONS); else - reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS); + reg = xe_mmio_read32(>->mmio, GT0_PERF_LIMIT_REASONS); xe_pm_runtime_put(gt_to_xe(gt)); return reg; diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 9d82ea30f4df..3cb228c773cd 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -270,6 +270,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt, int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; if (xe_guc_ct_enabled(>->uc.guc.ct) && gt->uc.guc.submission_state.enabled) { @@ -283,20 +284,22 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) xe_gt_tlb_invalidation_fence_wait(&fence); } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { + struct xe_mmio *mmio = >->mmio; + if (IS_SRIOV_VF(xe)) return 0; - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) { - xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1, + xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1, PVC_GUC_TLB_INV_DESC1_INVALIDATE); - xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0, + xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0, PVC_GUC_TLB_INV_DESC0_VALID); } else { - xe_mmio_write32(gt, GUC_TLB_INV_CR, + xe_mmio_write32(mmio, GUC_TLB_INV_CR, GUC_TLB_INV_CR_INVALIDATE); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } return 0; diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 0662f71c6ede..df2042db7ee6 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -5,6 +5,7 @@ #include "xe_gt_topology.h" +#include <generated/xe_wa_oob.h> #include <linux/bitmap.h> #include <linux/compiler.h> @@ -12,6 +13,7 @@ #include "xe_assert.h" #include "xe_gt.h" #include "xe_mmio.h" +#include "xe_wa.h" static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) @@ -25,7 +27,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) va_start(argp, numregs); for (i = 0; i < numregs; i++) - fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg)); + fuse_val[i] = xe_mmio_read32(>->mmio, va_arg(argp, struct xe_reg)); va_end(argp); bitmap_from_arr32(mask, fuse_val, numregs * 32); @@ -35,7 +37,7 @@ static void load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type) { struct xe_device *xe = gt_to_xe(gt); - u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE); + u32 reg_val = xe_mmio_read32(>->mmio, XELP_EU_ENABLE); u32 val = 0; int i; @@ -127,7 +129,19 @@ static void load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) { struct xe_device *xe = gt_to_xe(gt); - u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3); + u32 fuse3 = xe_mmio_read32(>->mmio, MIRROR_FUSE3); + + /* + * PTL platforms with media version 30.00 do not provide proper values + * for the media GT's L3 bank registers. Skip the readout since we + * don't have any way to obtain real values. + * + * This may get re-described as an official workaround in the future, + * but there's no tracking number assigned yet so we use a custom + * OOB workaround descriptor. + */ + if (XE_WA(gt, no_media_l3)) + return; if (GRAPHICS_VER(xe) >= 20) { xe_l3_bank_mask_t per_node = {}; @@ -141,7 +155,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) xe_l3_bank_mask_t per_node = {}; xe_l3_bank_mask_t per_mask_bit = {}; u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3); - u32 fuse4 = xe_mmio_read32(gt, XEHP_FUSE4); + u32 fuse4 = xe_mmio_read32(>->mmio, XEHP_FUSE4); u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4); bitmap_set_value8(per_mask_bit, 0x3, 0); diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 3d1c51de0268..a287b98ee70b 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -6,6 +6,7 @@ #ifndef _XE_GT_TYPES_H_ #define _XE_GT_TYPES_H_ +#include "xe_device_types.h" #include "xe_force_wake_types.h" #include "xe_gt_idle_types.h" #include "xe_gt_sriov_pf_types.h" @@ -145,19 +146,20 @@ struct xe_gt { /** * @mmio: mmio info for GT. All GTs within a tile share the same * register space, but have their own copy of GSI registers at a - * specific offset, as well as their own forcewake handling. + * specific offset. + */ + struct xe_mmio mmio; + + /** + * @pm: power management info for GT. The driver uses the GT's + * "force wake" interface to wake up specific parts of the GT hardware + * from C6 sleep states and ensure the hardware remains awake while it + * is being actively used. */ struct { - /** @mmio.fw: force wake for GT */ + /** @pm.fw: force wake for GT */ struct xe_force_wake fw; - /** - * @mmio.adj_limit: adjust MMIO address if address is below this - * value - */ - u32 adj_limit; - /** @mmio.adj_offset: offect to add to MMIO address when adjusting */ - u32 adj_offset; - } mmio; + } pm; /** @sriov: virtualization data related to GT */ union { diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 52df28032a6f..7f704346a8f4 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -14,6 +14,7 @@ #include "regs/xe_gt_regs.h" #include "regs/xe_gtt_defs.h" #include "regs/xe_guc_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_force_wake.h" @@ -22,6 +23,7 @@ #include "xe_gt_sriov_vf.h" #include "xe_gt_throttle.h" #include "xe_guc_ads.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_guc_db_mgr.h" #include "xe_guc_hwconfig.h" @@ -68,7 +70,7 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc) static u32 guc_ctl_feature_flags(struct xe_guc *guc) { - u32 flags = 0; + u32 flags = GUC_CTL_ENABLE_LITE_RESTORE; if (!guc_to_xe(guc)->info.skip_guc_pc) flags |= GUC_CTL_ENABLE_SLPC; @@ -236,20 +238,21 @@ static void guc_write_params(struct xe_guc *guc) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - xe_mmio_write32(gt, SOFT_SCRATCH(0), 0); + xe_mmio_write32(>->mmio, SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]); + xe_mmio_write32(>->mmio, SOFT_SCRATCH(1 + i), guc->params[i]); } static void guc_fini_hw(void *arg) { struct xe_guc *guc = arg; struct xe_gt *gt = guc_to_gt(guc); + unsigned int fw_ref; - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_fini_hw(&guc_to_gt(guc)->uc); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -338,6 +341,10 @@ int xe_guc_init(struct xe_guc *guc) if (ret) goto out; + ret = xe_guc_capture_init(guc); + if (ret) + goto out; + ret = xe_guc_ads_init(&guc->ads); if (ret) goto out; @@ -425,6 +432,7 @@ int xe_guc_post_load_init(struct xe_guc *guc) int xe_guc_reset(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; u32 guc_status, gdrst; int ret; @@ -433,15 +441,15 @@ int xe_guc_reset(struct xe_guc *guc) if (IS_SRIOV_VF(gt_to_xe(gt))) return xe_gt_sriov_vf_bootstrap(gt); - xe_mmio_write32(gt, GDRST, GRDOM_GUC); + xe_mmio_write32(mmio, GDRST, GRDOM_GUC); - ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false); + ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false); if (ret) { xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst); goto err_out; } - guc_status = xe_mmio_read32(gt, GUC_STATUS); + guc_status = xe_mmio_read32(mmio, GUC_STATUS); if (!(guc_status & GS_MIA_IN_RESET)) { xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n", guc_status); @@ -459,6 +467,7 @@ err_out: static void guc_prepare_xfer(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; struct xe_device *xe = guc_to_xe(guc); u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC | GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | @@ -473,12 +482,12 @@ static void guc_prepare_xfer(struct xe_guc *guc) shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index); /* Must program this register before loading the ucode with DMA */ - xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags); + xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags); - xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE); + xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE); /* Make sure GuC receives ARAT interrupts */ - xe_mmio_rmw32(gt, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0); + xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0); } /* @@ -494,7 +503,7 @@ static int guc_xfer_rsa(struct xe_guc *guc) if (guc->fw.rsa_size > 256) { u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + xe_uc_fw_rsa_offset(&guc->fw); - xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); + xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); return 0; } @@ -503,7 +512,7 @@ static int guc_xfer_rsa(struct xe_guc *guc) return -ENOMEM; for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) - xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]); + xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(i), rsa[i]); return 0; } @@ -583,7 +592,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc) * extreme thermal throttling. And a system that is that hot during boot is probably * dead anyway! */ -#if defined(CONFIG_DRM_XE_DEBUG) +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) #define GUC_LOAD_RETRY_LIMIT 20 #else #define GUC_LOAD_RETRY_LIMIT 3 @@ -593,6 +602,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc) static void guc_wait_ucode(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; struct xe_guc_pc *guc_pc = >->uc.guc.pc; ktime_t before, after, delta; int load_done; @@ -619,7 +629,7 @@ static void guc_wait_ucode(struct xe_guc *guc) * timeouts rather than allowing a huge timeout each time. So basically, need * to treat a timeout no different to a value change. */ - ret = xe_mmio_wait32_not(gt, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK, + ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK, last_status, 1000 * 1000, &status, false); if (ret < 0) count++; @@ -657,7 +667,7 @@ static void guc_wait_ucode(struct xe_guc *guc) switch (bootrom) { case XE_BOOTROM_STATUS_NO_KEY_FOUND: xe_gt_err(gt, "invalid key requested, header = 0x%08X\n", - xe_mmio_read32(gt, GUC_HEADER_INFO)); + xe_mmio_read32(mmio, GUC_HEADER_INFO)); break; case XE_BOOTROM_STATUS_RSA_FAILED: @@ -672,7 +682,7 @@ static void guc_wait_ucode(struct xe_guc *guc) switch (ukernel) { case XE_GUC_LOAD_STATUS_EXCEPTION: xe_gt_err(gt, "firmware exception. EIP: %#x\n", - xe_mmio_read32(gt, SOFT_SCRATCH(13))); + xe_mmio_read32(mmio, SOFT_SCRATCH(13))); break; case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID: @@ -824,10 +834,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - msg = xe_mmio_read32(gt, SOFT_SCRATCH(15)); + msg = xe_mmio_read32(>->mmio, SOFT_SCRATCH(15)); msg &= XE_GUC_RECV_MSG_EXCEPTION | XE_GUC_RECV_MSG_CRASH_DUMP_POSTED; - xe_mmio_write32(gt, SOFT_SCRATCH(15), 0); + xe_mmio_write32(>->mmio, SOFT_SCRATCH(15), 0); if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED) xe_gt_err(gt, "Received early GuC crash dump notification!\n"); @@ -844,14 +854,14 @@ static void guc_enable_irq(struct xe_guc *guc) REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); /* Primary GuC and media GuC share a single enable bit */ - xe_mmio_write32(gt, GUC_SG_INTR_ENABLE, + xe_mmio_write32(>->mmio, GUC_SG_INTR_ENABLE, REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST)); /* * There are separate mask bits for primary and media GuCs, so use * a RMW operation to avoid clobbering the other GuC's setting. */ - xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0); + xe_mmio_rmw32(>->mmio, GUC_SG_INTR_MASK, events, 0); } int xe_guc_enable_communication(struct xe_guc *guc) @@ -863,7 +873,7 @@ int xe_guc_enable_communication(struct xe_guc *guc) struct xe_gt *gt = guc_to_gt(guc); struct xe_tile *tile = gt_to_tile(gt); - err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); + err = xe_memirq_init_guc(&tile->memirq, guc); if (err) return err; } else { @@ -907,7 +917,7 @@ void xe_guc_notify(struct xe_guc *guc) * additional payload data to the GuC but this capability is not * used by the firmware yet. Use default value in the meantime. */ - xe_mmio_write32(gt, guc->notify_reg, default_notify_data); + xe_mmio_write32(>->mmio, guc->notify_reg, default_notify_data); } int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr) @@ -925,6 +935,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, { struct xe_device *xe = guc_to_xe(guc); struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; u32 header, reply; struct xe_reg reply_reg = xe_gt_is_media_type(gt) ? MED_VF_SW_FLAG(0) : VF_SW_FLAG(0); @@ -947,19 +958,19 @@ retry: /* Not in critical data-path, just do if else for GT type */ if (xe_gt_is_media_type(gt)) { for (i = 0; i < len; ++i) - xe_mmio_write32(gt, MED_VF_SW_FLAG(i), + xe_mmio_write32(mmio, MED_VF_SW_FLAG(i), request[i]); - xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX)); + xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX)); } else { for (i = 0; i < len; ++i) - xe_mmio_write32(gt, VF_SW_FLAG(i), + xe_mmio_write32(mmio, VF_SW_FLAG(i), request[i]); - xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX)); + xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX)); } xe_guc_notify(guc); - ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN, + ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN, FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC), 50000, &reply, false); if (ret) { @@ -969,7 +980,7 @@ timeout: return ret; } - header = xe_mmio_read32(gt, reply_reg); + header = xe_mmio_read32(mmio, reply_reg); if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) { /* @@ -985,7 +996,7 @@ timeout: BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS); BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1); - ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, + ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask, 1000000, &header, false); if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != @@ -1032,7 +1043,7 @@ proto: for (i = 1; i < VF_SW_FLAG_COUNT; i++) { reply_reg.addr += sizeof(u32); - response_buf[i] = xe_mmio_read32(gt, reply_reg); + response_buf[i] = xe_mmio_read32(mmio, reply_reg); } } @@ -1145,17 +1156,17 @@ int xe_guc_start(struct xe_guc *guc) void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) { struct xe_gt *gt = guc_to_gt(guc); + unsigned int fw_ref; u32 status; - int err; int i; xe_uc_fw_print(&guc->fw, p); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; - status = xe_mmio_read32(gt, GUC_STATUS); + status = xe_mmio_read32(>->mmio, GUC_STATUS); drm_printf(p, "\nGuC status 0x%08x:\n", status); drm_printf(p, "\tBootrom status = 0x%x\n", @@ -1170,12 +1181,15 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) drm_puts(p, "\nScratch registers:\n"); for (i = 0; i < SOFT_SCRATCH_COUNT; i++) { drm_printf(p, "\t%2d: \t0x%x\n", - i, xe_mmio_read32(gt, SOFT_SCRATCH(i))); + i, xe_mmio_read32(>->mmio, SOFT_SCRATCH(i))); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + drm_puts(p, "\n"); xe_guc_ct_print(&guc->ct, p, false); + + drm_puts(p, "\n"); xe_guc_submit_print(guc, p); } diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index 42116b167c98..58338be44558 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -82,4 +82,9 @@ static inline struct xe_device *guc_to_xe(struct xe_guc *guc) return gt_to_xe(guc_to_gt(guc)); } +static inline struct drm_device *guc_to_drm(struct xe_guc *guc) +{ + return &guc_to_xe(guc)->drm; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index d1902a8581ca..4e746ae98888 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -5,6 +5,8 @@ #include "xe_guc_ads.h" +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> #include <generated/xe_wa_oob.h> @@ -18,6 +20,7 @@ #include "xe_gt_ccs_mode.h" #include "xe_gt_printk.h" #include "xe_guc.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_hw_engine.h" #include "xe_lrc.h" @@ -149,8 +152,7 @@ static u32 guc_ads_waklv_size(struct xe_guc_ads *ads) static size_t guc_ads_capture_size(struct xe_guc_ads *ads) { - /* FIXME: Allocate a proper capture list */ - return PAGE_ALIGN(PAGE_SIZE); + return PAGE_ALIGN(ads->capture_size); } static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads) @@ -357,6 +359,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads) GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE, &offset, &remain); + if (XE_WA(gt, 14022866841)) + guc_waklv_enable_simple(ads, + GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO, + &offset, &remain); + /* * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now, * the default value for this register is determined to be 0xC40. This could change in the @@ -404,6 +411,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) struct xe_bo *bo; ads->golden_lrc_size = calculate_golden_lrc_size(ads); + ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads)); ads->regset_size = calculate_regset_size(gt); ads->ads_waklv_size = calculate_waklv_size(ads); @@ -418,14 +426,15 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) return 0; } +ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */ /** * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load * @ads: Additional data structures object * - * Recalcuate golden_lrc_size & regset_size as the number hardware engines may - * have changed after the hwconfig was loaded. Also verify the new sizes fit in - * the already allocated ADS buffer object. + * Recalculate golden_lrc_size, capture_size and regset_size as the number + * hardware engines may have changed after the hwconfig was loaded. Also verify + * the new sizes fit in the already allocated ADS buffer object. * * Return: 0 on success, negative error code on error. */ @@ -437,6 +446,8 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads) xe_gt_assert(gt, ads->bo); ads->golden_lrc_size = calculate_golden_lrc_size(ads); + /* Calculate Capture size with worst size */ + ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads)); ads->regset_size = calculate_regset_size(gt); xe_gt_assert(gt, ads->golden_lrc_size + @@ -536,20 +547,148 @@ static void guc_mapping_table_init(struct xe_gt *gt, } } -static void guc_capture_list_init(struct xe_guc_ads *ads) +static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map, + enum guc_capture_list_class_type capture_class) +{ + struct xe_device *xe = gt_to_xe(gt); + u32 mask; + + switch (capture_class) { + case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]); + mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_VIDEO: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_BLITTER: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_GSC_OTHER: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]); + break; + default: + mask = 0; + } + + return mask; +} + +static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt, + int owner, int type, int class, u32 *total_size, size_t *size, + void **pptr) +{ + *size = 0; + + if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) { + if (*total_size + *size > ads->capture_size) + xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n", + *total_size + *size, ads->capture_size); + else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr)) + return false; + } + + return true; +} + +static int guc_capture_prep_lists(struct xe_guc_ads *ads) { + struct xe_guc *guc = ads_to_guc(ads); + struct xe_gt *gt = ads_to_gt(ads); + u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0; + struct iosys_map info_map; + size_t size = 0; + void *ptr; int i, j; - u32 addr = xe_bo_ggtt_addr(ads->bo) + guc_ads_capture_offset(ads); - /* FIXME: Populate a proper capture list */ + /* + * GuC Capture's steered reg-list needs to be allocated and initialized + * after the GuC-hwconfig is available which guaranteed from here. + */ + xe_guc_capture_steered_list_init(ads_to_guc(ads)); + + capture_offset = guc_ads_capture_offset(ads); + ads_ggtt = xe_bo_ggtt_addr(ads->bo); + info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), + offsetof(struct __guc_ads_blob, system_info)); + + /* first, set aside the first page for a capture_list with zero descriptors */ + total_size = PAGE_SIZE; + if (!xe_guc_capture_getnullheader(guc, &ptr, &size)) + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size); + + null_ggtt = ads_ggtt + capture_offset; + capture_offset += PAGE_SIZE; + + /* + * Populate capture list : at this point adps is already allocated and + * mapped to worst case size + */ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { - for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) { - ads_blob_write(ads, ads.capture_instance[i][j], addr); - ads_blob_write(ads, ads.capture_class[i][j], addr); + bool write_empty_list; + + for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) { + u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j); + /* null list if we dont have said engine or list */ + if (!engine_mask) { + ads_blob_write(ads, ads.capture_class[i][j], null_ggtt); + ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt); + continue; + } + + /* engine exists: start with engine-class registers */ + write_empty_list = get_capture_list(ads, guc, gt, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + j, &total_size, &size, &ptr); + if (!write_empty_list) { + ads_blob_write(ads, ads.capture_class[i][j], + ads_ggtt + capture_offset); + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, + ptr, size); + total_size += size; + capture_offset += size; + } else { + ads_blob_write(ads, ads.capture_class[i][j], null_ggtt); + } + + /* engine exists: next, engine-instance registers */ + write_empty_list = get_capture_list(ads, guc, gt, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, + j, &total_size, &size, &ptr); + if (!write_empty_list) { + ads_blob_write(ads, ads.capture_instance[i][j], + ads_ggtt + capture_offset); + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, + ptr, size); + total_size += size; + capture_offset += size; + } else { + ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt); + } } - ads_blob_write(ads, ads.capture_global[i], addr); + /* global registers is last in our PF/VF loops */ + write_empty_list = get_capture_list(ads, guc, gt, i, + GUC_STATE_CAPTURE_TYPE_GLOBAL, + 0, &total_size, &size, &ptr); + if (!write_empty_list) { + ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset); + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, + size); + total_size += size; + capture_offset += size; + } else { + ads_blob_write(ads, ads.capture_global[i], null_ggtt); + } } + + if (ads->capture_size != PAGE_ALIGN(total_size)) + xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n", + ads->capture_size, PAGE_ALIGN(total_size)); + return PAGE_ALIGN(total_size); } static void guc_mmio_regset_write_one(struct xe_guc_ads *ads, @@ -684,7 +823,7 @@ static void guc_doorbell_init(struct xe_guc_ads *ads) if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) { u32 distdbreg = - xe_mmio_read32(gt, DIST_DBS_POPULATED); + xe_mmio_read32(>->mmio, DIST_DBS_POPULATED); ads_blob_write(ads, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI], @@ -738,7 +877,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_mmio_reg_state_init(ads); guc_prep_golden_lrc_null(ads); guc_mapping_table_init(gt, &info_map); - guc_capture_list_init(ads); + guc_capture_prep_lists(ads); guc_doorbell_init(ads); guc_waklv_init(ads); diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h index 2de5decfe0fd..70c132458ac3 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h @@ -22,6 +22,8 @@ struct xe_guc_ads { u32 regset_size; /** @ads_waklv_size: total waklv size supported by platform */ u32 ads_waklv_size; + /** @capture_size: size of register set passed to GuC for capture */ + u32 capture_size; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c new file mode 100644 index 000000000000..cc72446a5de1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -0,0 +1,1975 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021-2024 Intel Corporation + */ + +#include <linux/types.h> + +#include <drm/drm_managed.h> +#include <drm/drm_print.h> + +#include "abi/guc_actions_abi.h" +#include "abi/guc_capture_abi.h" +#include "abi/guc_log_abi.h" +#include "regs/xe_engine_regs.h" +#include "regs/xe_gt_regs.h" +#include "regs/xe_guc_regs.h" +#include "regs/xe_regs.h" + +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_exec_queue_types.h" +#include "xe_gt.h" +#include "xe_gt_mcr.h" +#include "xe_gt_printk.h" +#include "xe_guc.h" +#include "xe_guc_ads.h" +#include "xe_guc_capture.h" +#include "xe_guc_capture_types.h" +#include "xe_guc_ct.h" +#include "xe_guc_exec_queue_types.h" +#include "xe_guc_log.h" +#include "xe_guc_submit_types.h" +#include "xe_guc_submit.h" +#include "xe_hw_engine_types.h" +#include "xe_hw_engine.h" +#include "xe_lrc.h" +#include "xe_macros.h" +#include "xe_map.h" +#include "xe_mmio.h" +#include "xe_sched_job.h" + +/* + * struct __guc_capture_bufstate + * + * Book-keeping structure used to track read and write pointers + * as we extract error capture data from the GuC-log-buffer's + * error-capture region as a stream of dwords. + */ +struct __guc_capture_bufstate { + u32 size; + u32 data_offset; + u32 rd; + u32 wr; +}; + +/* + * struct __guc_capture_parsed_output - extracted error capture node + * + * A single unit of extracted error-capture output data grouped together + * at an engine-instance level. We keep these nodes in a linked list. + * See cachelist and outlist below. + */ +struct __guc_capture_parsed_output { + /* + * A single set of 3 capture lists: a global-list + * an engine-class-list and an engine-instance list. + * outlist in __guc_capture_parsed_output will keep + * a linked list of these nodes that will eventually + * be detached from outlist and attached into to + * xe_codedump in response to a context reset + */ + struct list_head link; + bool is_partial; + u32 eng_class; + u32 eng_inst; + u32 guc_id; + u32 lrca; + u32 type; + bool locked; + enum xe_hw_engine_snapshot_source_id source; + struct gcap_reg_list_info { + u32 vfid; + u32 num_regs; + struct guc_mmio_reg *regs; + } reginfo[GUC_STATE_CAPTURE_TYPE_MAX]; +#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_STATE_CAPTURE_TYPE_GLOBAL) +#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS) +}; + +/* + * Define all device tables of GuC error capture register lists + * NOTE: + * For engine-registers, GuC only needs the register offsets + * from the engine-mmio-base + * + * 64 bit registers need 2 entries for low 32 bit register and high 32 bit + * register, for example: + * Register data_type flags mask Register name + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + * { XXX_REG_HI(0), REG_64BIT_HI_DW,, 0, 0, "XXX_REG"}, + * 1. data_type: Indicate is hi/low 32 bit for a 64 bit register + * A 64 bit register define requires 2 consecutive entries, + * with low dword first and hi dword the second. + * 2. Register name: null for incompleted define + */ +#define COMMON_XELP_BASE_GLOBAL \ + { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"} + +#define COMMON_BASE_ENGINE_INSTANCE \ + { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \ + { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \ + { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \ + { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \ + { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \ + { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \ + { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \ + { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \ + { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \ + { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \ + { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \ + { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \ + { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \ + { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \ + { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \ + { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \ + { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \ + { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \ + { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \ + { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"} + +#define COMMON_XELP_RC_CLASS \ + { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"} + +#define COMMON_XELP_RC_CLASS_INSTDONE \ + { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \ + { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \ + { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"} + +#define XELP_VEC_CLASS_REGS \ + { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \ + { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \ + { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \ + { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"} + +/* XE_LP Global */ +static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = { + COMMON_XELP_BASE_GLOBAL, +}; + +/* Render / Compute Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_rc_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* Render / Compute Engine-Class */ +static const struct __guc_mmio_reg_descr xe_rc_class_regs[] = { + COMMON_XELP_RC_CLASS, + COMMON_XELP_RC_CLASS_INSTDONE, +}; + +/* Render / Compute Engine-Class for xehpg */ +static const struct __guc_mmio_reg_descr xe_hpg_rc_class_regs[] = { + COMMON_XELP_RC_CLASS, +}; + +/* Media Decode/Encode Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_vd_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* Video Enhancement Engine-Class */ +static const struct __guc_mmio_reg_descr xe_vec_class_regs[] = { + XELP_VEC_CLASS_REGS, +}; + +/* Video Enhancement Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_vec_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* Blitter Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_blt_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* XE_LP - GSC Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* + * Empty list to prevent warnings about unknown class/instance types + * as not all class/instance types have entries on all platforms. + */ +static const struct __guc_mmio_reg_descr empty_regs_list[] = { +}; + +#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x) +#define TO_GCAP_DEF_TYPE(x) (GUC_STATE_CAPTURE_TYPE_##x) +#define MAKE_REGLIST(regslist, regsowner, regstype, class) \ + { \ + regslist, \ + ARRAY_SIZE(regslist), \ + TO_GCAP_DEF_OWNER(regsowner), \ + TO_GCAP_DEF_TYPE(regstype), \ + class \ + } + +/* List of lists for legacy graphic product version < 1255 */ +static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = { + MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(xe_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + {} +}; + + /* List of lists for graphic product version >= 1255 */ +static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = { + MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + {} +}; + +static const char * const capture_list_type_names[] = { + "Global", + "Class", + "Instance", +}; + +static const char * const capture_engine_class_names[] = { + "Render/Compute", + "Video", + "VideoEnhance", + "Blitter", + "GSC-Other", +}; + +struct __guc_capture_ads_cache { + bool is_valid; + void *ptr; + size_t size; + int status; +}; + +struct xe_guc_state_capture { + const struct __guc_mmio_reg_descr_group *reglists; + /** + * NOTE: steered registers have multiple instances depending on the HW configuration + * (slices or dual-sub-slices) and thus depends on HW fuses discovered + */ + struct __guc_mmio_reg_descr_group *extlists; + struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX] + [GUC_STATE_CAPTURE_TYPE_MAX] + [GUC_CAPTURE_LIST_CLASS_MAX]; + void *ads_null_cache; + struct list_head cachelist; +#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS) +#define PREALLOC_NODES_DEFAULT_NUMREGS 64 + + int max_mmio_per_node; + struct list_head outlist; +}; + +static void +guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node); + +static const struct __guc_mmio_reg_descr_group * +guc_capture_get_device_reglist(struct xe_device *xe) +{ + if (GRAPHICS_VERx100(xe) >= 1255) + return xe_hpg_lists; + else + return xe_lp_lists; +} + +static const struct __guc_mmio_reg_descr_group * +guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists, + u32 owner, u32 type, enum guc_capture_list_class_type capture_class) +{ + int i; + + if (!reglists) + return NULL; + + for (i = 0; reglists[i].list; ++i) { + if (reglists[i].owner == owner && reglists[i].type == type && + (reglists[i].engine == capture_class || + reglists[i].type == GUC_STATE_CAPTURE_TYPE_GLOBAL)) + return ®lists[i]; + } + + return NULL; +} + +const struct __guc_mmio_reg_descr_group * +xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, bool is_ext) +{ + const struct __guc_mmio_reg_descr_group *reglists; + + if (is_ext) { + struct xe_guc *guc = >->uc.guc; + + reglists = guc->capture->extlists; + } else { + reglists = guc_capture_get_device_reglist(gt_to_xe(gt)); + } + return guc_capture_get_one_list(reglists, owner, type, capture_class); +} + +struct __ext_steer_reg { + const char *name; + struct xe_reg_mcr reg; +}; + +static const struct __ext_steer_reg xe_extregs[] = { + {"SAMPLER_INSTDONE", SAMPLER_INSTDONE}, + {"ROW_INSTDONE", ROW_INSTDONE} +}; + +static const struct __ext_steer_reg xehpg_extregs[] = { + {"SC_INSTDONE", XEHPG_SC_INSTDONE}, + {"SC_INSTDONE_EXTRA", XEHPG_SC_INSTDONE_EXTRA}, + {"SC_INSTDONE_EXTRA2", XEHPG_SC_INSTDONE_EXTRA2}, + {"INSTDONE_GEOM_SVGUNIT", XEHPG_INSTDONE_GEOM_SVGUNIT} +}; + +static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, + const struct __ext_steer_reg *extlist, + int slice_id, int subslice_id) +{ + if (!ext || !extlist) + return; + + ext->reg = XE_REG(extlist->reg.__reg.addr); + ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); + ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); + ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); + ext->regname = extlist->name; +} + +static int +__alloc_ext_regs(struct drm_device *drm, struct __guc_mmio_reg_descr_group *newlist, + const struct __guc_mmio_reg_descr_group *rootlist, int num_regs) +{ + struct __guc_mmio_reg_descr *list; + + list = drmm_kzalloc(drm, num_regs * sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL); + if (!list) + return -ENOMEM; + + newlist->list = list; + newlist->num_regs = num_regs; + newlist->owner = rootlist->owner; + newlist->engine = rootlist->engine; + newlist->type = rootlist->type; + + return 0; +} + +static int guc_capture_get_steer_reg_num(struct xe_device *xe) +{ + int num = ARRAY_SIZE(xe_extregs); + + if (GRAPHICS_VERx100(xe) >= 1255) + num += ARRAY_SIZE(xehpg_extregs); + + return num; +} + +static void guc_capture_alloc_steered_lists(struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + u16 slice, subslice; + int iter, i, total = 0; + const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists; + const struct __guc_mmio_reg_descr_group *list; + struct __guc_mmio_reg_descr_group *extlists; + struct __guc_mmio_reg_descr *extarray; + bool has_xehpg_extregs = GRAPHICS_VERx100(gt_to_xe(gt)) >= 1255; + struct drm_device *drm = >_to_xe(gt)->drm; + bool has_rcs_ccs = false; + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + + /* + * If GT has no rcs/ccs, no need to alloc steered list. + * Currently, only rcs/ccs has steering register, if in the future, + * other engine types has steering register, this condition check need + * to be extended + */ + for_each_hw_engine(hwe, gt, id) { + if (xe_engine_class_to_guc_capture_class(hwe->class) == + GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) { + has_rcs_ccs = true; + break; + } + } + + if (!has_rcs_ccs) + return; + + /* steered registers currently only exist for the render-class */ + list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE); + /* + * Skip if this platform has no engine class registers or if extlists + * was previously allocated + */ + if (!list || guc->capture->extlists) + return; + + total = bitmap_weight(gt->fuse_topo.g_dss_mask, sizeof(gt->fuse_topo.g_dss_mask) * 8) * + guc_capture_get_steer_reg_num(guc_to_xe(guc)); + + if (!total) + return; + + /* allocate an extra for an end marker */ + extlists = drmm_kzalloc(drm, 2 * sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL); + if (!extlists) + return; + + if (__alloc_ext_regs(drm, &extlists[0], list, total)) { + drmm_kfree(drm, extlists); + return; + } + + /* For steering registers, the list is generated at run-time */ + extarray = (struct __guc_mmio_reg_descr *)extlists[0].list; + for_each_dss_steering(iter, gt, slice, subslice) { + for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) { + __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice); + ++extarray; + } + + if (has_xehpg_extregs) + for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) { + __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice); + ++extarray; + } + } + + extlists[0].num_regs = total; + + xe_gt_dbg(guc_to_gt(guc), "capture found %d ext-regs.\n", total); + guc->capture->extlists = extlists; +} + +static int +guc_capture_list_init(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, struct guc_mmio_reg *ptr, + u16 num_entries) +{ + u32 ptr_idx = 0, list_idx = 0; + const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; + struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; + const struct __guc_mmio_reg_descr_group *match; + u32 list_num; + + if (!reglists) + return -ENODEV; + + match = guc_capture_get_one_list(reglists, owner, type, capture_class); + if (!match) + return -ENODATA; + + list_num = match->num_regs; + for (list_idx = 0; ptr_idx < num_entries && list_idx < list_num; ++list_idx, ++ptr_idx) { + ptr[ptr_idx].offset = match->list[list_idx].reg.addr; + ptr[ptr_idx].value = 0xDEADF00D; + ptr[ptr_idx].flags = match->list[list_idx].flags; + ptr[ptr_idx].mask = match->list[list_idx].mask; + } + + match = guc_capture_get_one_list(extlists, owner, type, capture_class); + if (match) + for (ptr_idx = list_num, list_idx = 0; + ptr_idx < num_entries && list_idx < match->num_regs; + ++ptr_idx, ++list_idx) { + ptr[ptr_idx].offset = match->list[list_idx].reg.addr; + ptr[ptr_idx].value = 0xDEADF00D; + ptr[ptr_idx].flags = match->list[list_idx].flags; + ptr[ptr_idx].mask = match->list[list_idx].mask; + } + + if (ptr_idx < num_entries) + xe_gt_dbg(guc_to_gt(guc), "Got short capture reglist init: %d out-of %d.\n", + ptr_idx, num_entries); + + return 0; +} + +static int +guc_cap_list_num_regs(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class) +{ + const struct __guc_mmio_reg_descr_group *match; + int num_regs = 0; + + match = guc_capture_get_one_list(guc->capture->reglists, owner, type, capture_class); + if (match) + num_regs = match->num_regs; + + match = guc_capture_get_one_list(guc->capture->extlists, owner, type, capture_class); + if (match) + num_regs += match->num_regs; + else + /* + * If a caller wants the full register dump size but we have + * not yet got the hw-config, which is before max_mmio_per_node + * is initialized, then provide a worst-case number for + * extlists based on max dss fuse bits, but only ever for + * render/compute + */ + if (owner == GUC_CAPTURE_LIST_INDEX_PF && + type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && + capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE && + !guc->capture->max_mmio_per_node) + num_regs += guc_capture_get_steer_reg_num(guc_to_xe(guc)) * + XE_MAX_DSS_FUSE_BITS; + + return num_regs; +} + +static int +guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, + size_t *size, bool is_purpose_est) +{ + struct xe_guc_state_capture *gc = guc->capture; + struct xe_gt *gt = guc_to_gt(guc); + struct __guc_capture_ads_cache *cache; + int num_regs; + + xe_gt_assert(gt, type < GUC_STATE_CAPTURE_TYPE_MAX); + xe_gt_assert(gt, capture_class < GUC_CAPTURE_LIST_CLASS_MAX); + + cache = &gc->ads_cache[owner][type][capture_class]; + if (!gc->reglists) { + xe_gt_warn(gt, "No capture reglist for this device\n"); + return -ENODEV; + } + + if (cache->is_valid) { + *size = cache->size; + return cache->status; + } + + if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF && + !guc_capture_get_one_list(gc->reglists, owner, type, capture_class)) { + if (type == GUC_STATE_CAPTURE_TYPE_GLOBAL) + xe_gt_warn(gt, "Missing capture reglist: global!\n"); + else + xe_gt_warn(gt, "Missing capture reglist: %s(%u):%s(%u)!\n", + capture_list_type_names[type], type, + capture_engine_class_names[capture_class], capture_class); + return -ENODEV; + } + + num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class); + /* intentional empty lists can exist depending on hw config */ + if (!num_regs) + return -ENODATA; + + if (size) + *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) + + (num_regs * sizeof(struct guc_mmio_reg))); + + return 0; +} + +/** + * xe_guc_capture_getlistsize - Get list size for owner/type/class combination + * @guc: The GuC object + * @owner: PF/VF owner + * @type: GuC capture register type + * @capture_class: GuC capture engine class id + * @size: Point to the size + * + * This function will get the list for the owner/type/class combination, and + * return the page aligned list size. + * + * Returns: 0 on success or a negative error code on failure. + */ +int +xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, size_t *size) +{ + return guc_capture_getlistsize(guc, owner, type, capture_class, size, false); +} + +/** + * xe_guc_capture_getlist - Get register capture list for owner/type/class + * combination + * @guc: The GuC object + * @owner: PF/VF owner + * @type: GuC capture register type + * @capture_class: GuC capture engine class id + * @outptr: Point to cached register capture list + * + * This function will get the register capture list for the owner/type/class + * combination. + * + * Returns: 0 on success or a negative error code on failure. + */ +int +xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, void **outptr) +{ + struct xe_guc_state_capture *gc = guc->capture; + struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][capture_class]; + struct guc_debug_capture_list *listnode; + int ret, num_regs; + u8 *caplist, *tmp; + size_t size = 0; + + if (!gc->reglists) + return -ENODEV; + + if (cache->is_valid) { + *outptr = cache->ptr; + return cache->status; + } + + ret = xe_guc_capture_getlistsize(guc, owner, type, capture_class, &size); + if (ret) { + cache->is_valid = true; + cache->ptr = NULL; + cache->size = 0; + cache->status = ret; + return ret; + } + + caplist = drmm_kzalloc(guc_to_drm(guc), size, GFP_KERNEL); + if (!caplist) + return -ENOMEM; + + /* populate capture list header */ + tmp = caplist; + num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class); + listnode = (struct guc_debug_capture_list *)tmp; + listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs); + + /* populate list of register descriptor */ + tmp += sizeof(struct guc_debug_capture_list); + guc_capture_list_init(guc, owner, type, capture_class, + (struct guc_mmio_reg *)tmp, num_regs); + + /* cache this list */ + cache->is_valid = true; + cache->ptr = caplist; + cache->size = size; + cache->status = 0; + + *outptr = caplist; + + return 0; +} + +/** + * xe_guc_capture_getnullheader - Get a null list for register capture + * @guc: The GuC object + * @outptr: Point to cached register capture list + * @size: Point to the size + * + * This function will alloc for a null list for register capture. + * + * Returns: 0 on success or a negative error code on failure. + */ +int +xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size) +{ + struct xe_guc_state_capture *gc = guc->capture; + int tmp = sizeof(u32) * 4; + void *null_header; + + if (gc->ads_null_cache) { + *outptr = gc->ads_null_cache; + *size = tmp; + return 0; + } + + null_header = drmm_kzalloc(guc_to_drm(guc), tmp, GFP_KERNEL); + if (!null_header) + return -ENOMEM; + + gc->ads_null_cache = null_header; + *outptr = null_header; + *size = tmp; + + return 0; +} + +/** + * xe_guc_capture_ads_input_worst_size - Calculate the worst size for GuC register capture + * @guc: point to xe_guc structure + * + * Calculate the worst size for GuC register capture by including all possible engines classes. + * + * Returns: Calculated size + */ +size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc) +{ + size_t total_size, class_size, instance_size, global_size; + int i, j; + + /* + * This function calculates the worst case register lists size by + * including all possible engines classes. It is called during the + * first of a two-phase GuC (and ADS-population) initialization + * sequence, that is, during the pre-hwconfig phase before we have + * the exact engine fusing info. + */ + total_size = PAGE_SIZE; /* Pad a page in front for empty lists */ + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { + for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) { + if (xe_guc_capture_getlistsize(guc, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + j, &class_size) < 0) + class_size = 0; + if (xe_guc_capture_getlistsize(guc, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, + j, &instance_size) < 0) + instance_size = 0; + total_size += class_size + instance_size; + } + if (xe_guc_capture_getlistsize(guc, i, + GUC_STATE_CAPTURE_TYPE_GLOBAL, + 0, &global_size) < 0) + global_size = 0; + total_size += global_size; + } + + return PAGE_ALIGN(total_size); +} + +static int guc_capture_output_size_est(struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + + int capture_size = 0; + size_t tmp = 0; + + if (!guc->capture) + return -ENODEV; + + /* + * If every single engine-instance suffered a failure in quick succession but + * were all unrelated, then a burst of multiple error-capture events would dump + * registers for every one engine instance, one at a time. In this case, GuC + * would even dump the global-registers repeatedly. + * + * For each engine instance, there would be 1 x guc_state_capture_group_t output + * followed by 3 x guc_state_capture_t lists. The latter is how the register + * dumps are split across different register types (where the '3' are global vs class + * vs instance). + */ + for_each_hw_engine(hwe, gt, id) { + enum guc_capture_list_class_type capture_class; + + capture_class = xe_engine_class_to_guc_capture_class(hwe->class); + capture_size += sizeof(struct guc_state_capture_group_header_t) + + (3 * sizeof(struct guc_state_capture_header_t)); + + if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_GLOBAL, + 0, &tmp, true)) + capture_size += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + capture_class, &tmp, true)) + capture_size += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, + capture_class, &tmp, true)) + capture_size += tmp; + } + + return capture_size; +} + +/* + * Add on a 3x multiplier to allow for multiple back-to-back captures occurring + * before the Xe can read the data out and process it + */ +#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3 + +static void check_guc_capture_size(struct xe_guc *guc) +{ + int capture_size = guc_capture_output_size_est(guc); + int spare_size = capture_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER; + u32 buffer_size = xe_guc_log_section_size_capture(&guc->log); + + /* + * NOTE: capture_size is much smaller than the capture region + * allocation (DG2: <80K vs 1MB). + * Additionally, its based on space needed to fit all engines getting + * reset at once within the same G2H handler task slot. This is very + * unlikely. However, if GuC really does run out of space for whatever + * reason, we will see an separate warning message when processing the + * G2H event capture-notification, search for: + * xe_guc_STATE_CAPTURE_EVENT_STATUS_NOSPACE. + */ + if (capture_size < 0) + xe_gt_dbg(guc_to_gt(guc), + "Failed to calculate error state capture buffer minimum size: %d!\n", + capture_size); + if (capture_size > buffer_size) + xe_gt_dbg(guc_to_gt(guc), "Error state capture buffer maybe small: %d < %d\n", + buffer_size, capture_size); + else if (spare_size > buffer_size) + xe_gt_dbg(guc_to_gt(guc), + "Error state capture buffer lacks spare size: %d < %d (min = %d)\n", + buffer_size, spare_size, capture_size); +} + +static void +guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node, + struct list_head *list) +{ + list_add(&node->link, list); +} + +static void +guc_capture_add_node_to_outlist(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + guc_capture_remove_stale_matches_from_list(gc, node); + guc_capture_add_node_to_list(node, &gc->outlist); +} + +static void +guc_capture_add_node_to_cachelist(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + guc_capture_add_node_to_list(node, &gc->cachelist); +} + +static void +guc_capture_free_outlist_node(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *n) +{ + if (n) { + n->locked = 0; + list_del(&n->link); + /* put node back to cache list */ + guc_capture_add_node_to_cachelist(gc, n); + } +} + +static void +guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + struct __guc_capture_parsed_output *n, *ntmp; + int guc_id = node->guc_id; + + list_for_each_entry_safe(n, ntmp, &gc->outlist, link) { + if (n != node && !n->locked && n->guc_id == guc_id) + guc_capture_free_outlist_node(gc, n); + } +} + +static void +guc_capture_init_node(struct xe_guc *guc, struct __guc_capture_parsed_output *node) +{ + struct guc_mmio_reg *tmp[GUC_STATE_CAPTURE_TYPE_MAX]; + int i; + + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + tmp[i] = node->reginfo[i].regs; + memset(tmp[i], 0, sizeof(struct guc_mmio_reg) * + guc->capture->max_mmio_per_node); + } + memset(node, 0, sizeof(*node)); + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) + node->reginfo[i].regs = tmp[i]; + + INIT_LIST_HEAD(&node->link); +} + +/** + * DOC: Init, G2H-event and reporting flows for GuC-error-capture + * + * KMD Init time flows: + * -------------------- + * --> alloc A: GuC input capture regs lists (registered to GuC via ADS). + * xe_guc_ads acquires the register lists by calling + * xe_guc_capture_getlistsize and xe_guc_capture_getlist 'n' times, + * where n = 1 for global-reg-list + + * num_engine_classes for class-reg-list + + * num_engine_classes for instance-reg-list + * (since all instances of the same engine-class type + * have an identical engine-instance register-list). + * ADS module also calls separately for PF vs VF. + * + * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param)) + * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small) + * Note2: 'x 3' to hold multiple capture groups + * + * GUC Runtime notify capture: + * -------------------------- + * --> G2H STATE_CAPTURE_NOTIFICATION + * L--> xe_guc_capture_process + * L--> Loop through B (head..tail) and for each engine instance's + * err-state-captured register-list we find, we alloc 'C': + * --> alloc C: A capture-output-node structure that includes misc capture info along + * with 3 register list dumps (global, engine-class and engine-instance) + * This node is created from a pre-allocated list of blank nodes in + * guc->capture->cachelist and populated with the error-capture + * data from GuC and then it's added into guc->capture->outlist linked + * list. This list is used for matchup and printout by xe_devcoredump_read + * and xe_engine_snapshot_print, (when user invokes the devcoredump sysfs). + * + * GUC --> notify context reset: + * ----------------------------- + * --> guc_exec_queue_timedout_job + * L--> xe_devcoredump + * L--> devcoredump_snapshot + * --> xe_hw_engine_snapshot_capture + * --> xe_engine_manual_capture(For manual capture) + * + * User Sysfs / Debugfs + * -------------------- + * --> xe_devcoredump_read-> + * L--> xxx_snapshot_print + * L--> xe_engine_snapshot_print + * Print register lists values saved at + * guc->capture->outlist + * + */ + +static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf) +{ + if (buf->wr >= buf->rd) + return (buf->wr - buf->rd); + return (buf->size - buf->rd) + buf->wr; +} + +static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf) +{ + if (buf->rd > buf->wr) + return (buf->size - buf->rd); + return (buf->wr - buf->rd); +} + +/* + * GuC's error-capture output is a ring buffer populated in a byte-stream fashion: + * + * The GuC Log buffer region for error-capture is managed like a ring buffer. + * The GuC firmware dumps error capture logs into this ring in a byte-stream flow. + * Additionally, as per the current and foreseeable future, all packed error- + * capture output structures are dword aligned. + * + * That said, if the GuC firmware is in the midst of writing a structure that is larger + * than one dword but the tail end of the err-capture buffer-region has lesser space left, + * we would need to extract that structure one dword at a time straddled across the end, + * onto the start of the ring. + * + * Below function, guc_capture_log_remove_bytes is a helper for that. All callers of this + * function would typically do a straight-up memcpy from the ring contents and will only + * call this helper if their structure-extraction is straddling across the end of the + * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease + * scalability for future expansion of output data types without requiring a redesign + * of the flow controls. + */ +static int +guc_capture_log_remove_bytes(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + void *out, int bytes_needed) +{ +#define GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX 3 + + int fill_size = 0, tries = GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX; + int copy_size, avail; + + xe_assert(guc_to_xe(guc), bytes_needed % sizeof(u32) == 0); + + if (bytes_needed > guc_capture_buf_cnt(buf)) + return -1; + + while (bytes_needed > 0 && tries--) { + int misaligned; + + avail = guc_capture_buf_cnt_to_end(buf); + misaligned = avail % sizeof(u32); + /* wrap if at end */ + if (!avail) { + /* output stream clipped */ + if (!buf->rd) + return fill_size; + buf->rd = 0; + continue; + } + + /* Only copy to u32 aligned data */ + copy_size = avail < bytes_needed ? avail - misaligned : bytes_needed; + xe_map_memcpy_from(guc_to_xe(guc), out + fill_size, &guc->log.bo->vmap, + buf->data_offset + buf->rd, copy_size); + buf->rd += copy_size; + fill_size += copy_size; + bytes_needed -= copy_size; + + if (misaligned) + xe_gt_warn(guc_to_gt(guc), + "Bytes extraction not dword aligned, clipping.\n"); + } + + return fill_size; +} + +static int +guc_capture_log_get_group_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_state_capture_group_header_t *ghdr) +{ + int fullsize = sizeof(struct guc_state_capture_group_header_t); + + if (guc_capture_log_remove_bytes(guc, buf, ghdr, fullsize) != fullsize) + return -1; + return 0; +} + +static int +guc_capture_log_get_data_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_state_capture_header_t *hdr) +{ + int fullsize = sizeof(struct guc_state_capture_header_t); + + if (guc_capture_log_remove_bytes(guc, buf, hdr, fullsize) != fullsize) + return -1; + return 0; +} + +static int +guc_capture_log_get_register(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_mmio_reg *reg) +{ + int fullsize = sizeof(struct guc_mmio_reg); + + if (guc_capture_log_remove_bytes(guc, buf, reg, fullsize) != fullsize) + return -1; + return 0; +} + +static struct __guc_capture_parsed_output * +guc_capture_get_prealloc_node(struct xe_guc *guc) +{ + struct __guc_capture_parsed_output *found = NULL; + + if (!list_empty(&guc->capture->cachelist)) { + struct __guc_capture_parsed_output *n, *ntmp; + + /* get first avail node from the cache list */ + list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) { + found = n; + break; + } + } else { + struct __guc_capture_parsed_output *n, *ntmp; + + /* + * traverse reversed and steal back the oldest node already + * allocated + */ + list_for_each_entry_safe_reverse(n, ntmp, &guc->capture->outlist, link) { + if (!n->locked) + found = n; + } + } + if (found) { + list_del(&found->link); + guc_capture_init_node(guc, found); + } + + return found; +} + +static struct __guc_capture_parsed_output * +guc_capture_clone_node(struct xe_guc *guc, struct __guc_capture_parsed_output *original, + u32 keep_reglist_mask) +{ + struct __guc_capture_parsed_output *new; + int i; + + new = guc_capture_get_prealloc_node(guc); + if (!new) + return NULL; + if (!original) + return new; + + new->is_partial = original->is_partial; + + /* copy reg-lists that we want to clone */ + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + if (keep_reglist_mask & BIT(i)) { + XE_WARN_ON(original->reginfo[i].num_regs > + guc->capture->max_mmio_per_node); + + memcpy(new->reginfo[i].regs, original->reginfo[i].regs, + original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg)); + + new->reginfo[i].num_regs = original->reginfo[i].num_regs; + new->reginfo[i].vfid = original->reginfo[i].vfid; + + if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS) { + new->eng_class = original->eng_class; + } else if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { + new->eng_inst = original->eng_inst; + new->guc_id = original->guc_id; + new->lrca = original->lrca; + } + } + } + + return new; +} + +static int +guc_capture_extract_reglists(struct xe_guc *guc, struct __guc_capture_bufstate *buf) +{ + struct xe_gt *gt = guc_to_gt(guc); + struct guc_state_capture_group_header_t ghdr = {0}; + struct guc_state_capture_header_t hdr = {0}; + struct __guc_capture_parsed_output *node = NULL; + struct guc_mmio_reg *regs = NULL; + int i, numlists, numregs, ret = 0; + enum guc_state_capture_type datatype; + struct guc_mmio_reg tmp; + bool is_partial = false; + + i = guc_capture_buf_cnt(buf); + if (!i) + return -ENODATA; + + if (i % sizeof(u32)) { + xe_gt_warn(gt, "Got mis-aligned register capture entries\n"); + ret = -EIO; + goto bailout; + } + + /* first get the capture group header */ + if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) { + ret = -EIO; + goto bailout; + } + /* + * we would typically expect a layout as below where n would be expected to be + * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine + * instances being reset together. + * ____________________________________________ + * | Capture Group | + * | ________________________________________ | + * | | Capture Group Header: | | + * | | - num_captures = 5 | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture1: | | + * | | Hdr: GLOBAL, numregs=a | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... rega | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture2: | | + * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regb | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture3: | | + * | | Hdr: INSTANCE=RCS, numregs=c | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regc | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture4: | | + * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regd | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture5: | | + * | | Hdr: INSTANCE=CCS0, numregs=e | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... rege | | | + * | | |__________________________________| | | + * | |______________________________________| | + * |__________________________________________| + */ + is_partial = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE, ghdr.info); + numlists = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES, ghdr.info); + + while (numlists--) { + if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) { + ret = -EIO; + break; + } + + datatype = FIELD_GET(GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE, hdr.info); + if (datatype > GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { + /* unknown capture type - skip over to next capture set */ + numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES, + hdr.num_mmio_entries); + while (numregs--) { + if (guc_capture_log_get_register(guc, buf, &tmp)) { + ret = -EIO; + break; + } + } + continue; + } else if (node) { + /* + * Based on the current capture type and what we have so far, + * decide if we should add the current node into the internal + * linked list for match-up when xe_devcoredump calls later + * (and alloc a blank node for the next set of reglists) + * or continue with the same node or clone the current node + * but only retain the global or class registers (such as the + * case of dependent engine resets). + */ + if (datatype == GUC_STATE_CAPTURE_TYPE_GLOBAL) { + guc_capture_add_node_to_outlist(guc->capture, node); + node = NULL; + } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && + node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS].num_regs) { + /* Add to list, clone node and duplicate global list */ + guc_capture_add_node_to_outlist(guc->capture, node); + node = guc_capture_clone_node(guc, node, + GCAP_PARSED_REGLIST_INDEX_GLOBAL); + } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE && + node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE].num_regs) { + /* Add to list, clone node and duplicate global + class lists */ + guc_capture_add_node_to_outlist(guc->capture, node); + node = guc_capture_clone_node(guc, node, + (GCAP_PARSED_REGLIST_INDEX_GLOBAL | + GCAP_PARSED_REGLIST_INDEX_ENGCLASS)); + } + } + + if (!node) { + node = guc_capture_get_prealloc_node(guc); + if (!node) { + ret = -ENOMEM; + break; + } + if (datatype != GUC_STATE_CAPTURE_TYPE_GLOBAL) + xe_gt_dbg(gt, "Register capture missing global dump: %08x!\n", + datatype); + } + node->is_partial = is_partial; + node->reginfo[datatype].vfid = FIELD_GET(GUC_STATE_CAPTURE_HEADER_VFID, hdr.owner); + node->source = XE_ENGINE_CAPTURE_SOURCE_GUC; + node->type = datatype; + + switch (datatype) { + case GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE: + node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS, + hdr.info); + node->eng_inst = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE, + hdr.info); + node->lrca = hdr.lrca; + node->guc_id = hdr.guc_id; + break; + case GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS: + node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS, + hdr.info); + break; + default: + break; + } + + numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES, + hdr.num_mmio_entries); + if (numregs > guc->capture->max_mmio_per_node) { + xe_gt_dbg(gt, "Register capture list extraction clipped by prealloc!\n"); + numregs = guc->capture->max_mmio_per_node; + } + node->reginfo[datatype].num_regs = numregs; + regs = node->reginfo[datatype].regs; + i = 0; + while (numregs--) { + if (guc_capture_log_get_register(guc, buf, ®s[i++])) { + ret = -EIO; + break; + } + } + } + +bailout: + if (node) { + /* If we have data, add to linked list for match-up when xe_devcoredump calls */ + for (i = GUC_STATE_CAPTURE_TYPE_GLOBAL; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + if (node->reginfo[i].regs) { + guc_capture_add_node_to_outlist(guc->capture, node); + node = NULL; + break; + } + } + if (node) /* else return it back to cache list */ + guc_capture_add_node_to_cachelist(guc->capture, node); + } + return ret; +} + +static int __guc_capture_flushlog_complete(struct xe_guc *guc) +{ + u32 action[] = { + XE_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, + GUC_LOG_BUFFER_CAPTURE + }; + + return xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action)); +} + +static void __guc_capture_process_output(struct xe_guc *guc) +{ + unsigned int buffer_size, read_offset, write_offset, full_count; + struct xe_uc *uc = container_of(guc, typeof(*uc), guc); + struct guc_log_buffer_state log_buf_state_local; + struct __guc_capture_bufstate buf; + bool new_overflow; + int ret, tmp; + u32 log_buf_state_offset; + u32 src_data_offset; + + log_buf_state_offset = sizeof(struct guc_log_buffer_state) * GUC_LOG_BUFFER_CAPTURE; + src_data_offset = xe_guc_get_log_buffer_offset(&guc->log, GUC_LOG_BUFFER_CAPTURE); + + /* + * Make a copy of the state structure, inside GuC log buffer + * (which is uncached mapped), on the stack to avoid reading + * from it multiple times. + */ + xe_map_memcpy_from(guc_to_xe(guc), &log_buf_state_local, &guc->log.bo->vmap, + log_buf_state_offset, sizeof(struct guc_log_buffer_state)); + + buffer_size = xe_guc_get_log_buffer_size(&guc->log, GUC_LOG_BUFFER_CAPTURE); + read_offset = log_buf_state_local.read_ptr; + write_offset = log_buf_state_local.sampled_write_ptr; + full_count = FIELD_GET(GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT, log_buf_state_local.flags); + + /* Bookkeeping stuff */ + tmp = FIELD_GET(GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE, log_buf_state_local.flags); + guc->log.stats[GUC_LOG_BUFFER_CAPTURE].flush += tmp; + new_overflow = xe_guc_check_log_buf_overflow(&guc->log, GUC_LOG_BUFFER_CAPTURE, + full_count); + + /* Now copy the actual logs. */ + if (unlikely(new_overflow)) { + /* copy the whole buffer in case of overflow */ + read_offset = 0; + write_offset = buffer_size; + } else if (unlikely((read_offset > buffer_size) || + (write_offset > buffer_size))) { + xe_gt_err(guc_to_gt(guc), + "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n", + read_offset, buffer_size); + /* copy whole buffer as offsets are unreliable */ + read_offset = 0; + write_offset = buffer_size; + } + + buf.size = buffer_size; + buf.rd = read_offset; + buf.wr = write_offset; + buf.data_offset = src_data_offset; + + if (!xe_guc_read_stopped(guc)) { + do { + ret = guc_capture_extract_reglists(guc, &buf); + if (ret && ret != -ENODATA) + xe_gt_dbg(guc_to_gt(guc), "Capture extraction failed:%d\n", ret); + } while (ret >= 0); + } + + /* Update the state of log buffer err-cap state */ + xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, + log_buf_state_offset + offsetof(struct guc_log_buffer_state, read_ptr), u32, + write_offset); + + /* + * Clear the flush_to_file from local first, the local was loaded by above + * xe_map_memcpy_from, then write out the "updated local" through + * xe_map_wr() + */ + log_buf_state_local.flags &= ~GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE; + xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, + log_buf_state_offset + offsetof(struct guc_log_buffer_state, flags), u32, + log_buf_state_local.flags); + __guc_capture_flushlog_complete(guc); +} + +/* + * xe_guc_capture_process - Process GuC register captured data + * @guc: The GuC object + * + * When GuC captured data is ready, GuC will send message + * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be + * called to process the data comes with the message. + * + * Returns: None + */ +void xe_guc_capture_process(struct xe_guc *guc) +{ + if (guc->capture) + __guc_capture_process_output(guc); +} + +static struct __guc_capture_parsed_output * +guc_capture_alloc_one_node(struct xe_guc *guc) +{ + struct drm_device *drm = guc_to_drm(guc); + struct __guc_capture_parsed_output *new; + int i; + + new = drmm_kzalloc(drm, sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + new->reginfo[i].regs = drmm_kzalloc(drm, guc->capture->max_mmio_per_node * + sizeof(struct guc_mmio_reg), GFP_KERNEL); + if (!new->reginfo[i].regs) { + while (i) + drmm_kfree(drm, new->reginfo[--i].regs); + drmm_kfree(drm, new); + return NULL; + } + } + guc_capture_init_node(guc, new); + + return new; +} + +static void +__guc_capture_create_prealloc_nodes(struct xe_guc *guc) +{ + struct __guc_capture_parsed_output *node = NULL; + int i; + + for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) { + node = guc_capture_alloc_one_node(guc); + if (!node) { + xe_gt_warn(guc_to_gt(guc), "Register capture pre-alloc-cache failure\n"); + /* dont free the priors, use what we got and cleanup at shutdown */ + return; + } + guc_capture_add_node_to_cachelist(guc->capture, node); + } +} + +static int +guc_get_max_reglist_count(struct xe_guc *guc) +{ + int i, j, k, tmp, maxregcount = 0; + + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) { + for (j = 0; j < GUC_STATE_CAPTURE_TYPE_MAX; ++j) { + for (k = 0; k < GUC_CAPTURE_LIST_CLASS_MAX; ++k) { + const struct __guc_mmio_reg_descr_group *match; + + if (j == GUC_STATE_CAPTURE_TYPE_GLOBAL && k > 0) + continue; + + tmp = 0; + match = guc_capture_get_one_list(guc->capture->reglists, i, j, k); + if (match) + tmp = match->num_regs; + + match = guc_capture_get_one_list(guc->capture->extlists, i, j, k); + if (match) + tmp += match->num_regs; + + if (tmp > maxregcount) + maxregcount = tmp; + } + } + } + if (!maxregcount) + maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS; + + return maxregcount; +} + +static void +guc_capture_create_prealloc_nodes(struct xe_guc *guc) +{ + /* skip if we've already done the pre-alloc */ + if (guc->capture->max_mmio_per_node) + return; + + guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); + __guc_capture_create_prealloc_nodes(guc); +} + +static void +read_reg_to_node(struct xe_hw_engine *hwe, const struct __guc_mmio_reg_descr_group *list, + struct guc_mmio_reg *regs) +{ + int i; + + if (!list || !list->list || list->num_regs == 0) + return; + + if (!regs) + return; + + for (i = 0; i < list->num_regs; i++) { + struct __guc_mmio_reg_descr desc = list->list[i]; + u32 value; + + if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { + value = xe_hw_engine_mmio_read32(hwe, desc.reg); + } else { + if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && + FIELD_GET(GUC_REGSET_STEERING_NEEDED, desc.flags)) { + int group, instance; + + group = FIELD_GET(GUC_REGSET_STEERING_GROUP, desc.flags); + instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, desc.flags); + value = xe_gt_mcr_unicast_read(hwe->gt, XE_REG_MCR(desc.reg.addr), + group, instance); + } else { + value = xe_mmio_read32(&hwe->gt->mmio, desc.reg); + } + } + + regs[i].value = value; + regs[i].offset = desc.reg.addr; + regs[i].flags = desc.flags; + regs[i].mask = desc.mask; + } +} + +/** + * xe_engine_manual_capture - Take a manual engine snapshot from engine. + * @hwe: Xe HW Engine. + * @snapshot: The engine snapshot + * + * Take engine snapshot from engine read. + * + * Returns: None + */ +void +xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot) +{ + struct xe_gt *gt = hwe->gt; + struct xe_device *xe = gt_to_xe(gt); + struct xe_guc *guc = >->uc.guc; + struct xe_devcoredump *devcoredump = &xe->devcoredump; + enum guc_capture_list_class_type capture_class; + const struct __guc_mmio_reg_descr_group *list; + struct __guc_capture_parsed_output *new; + enum guc_state_capture_type type; + u16 guc_id = 0; + u32 lrca = 0; + + if (IS_SRIOV_VF(xe)) + return; + + new = guc_capture_get_prealloc_node(guc); + if (!new) + return; + + capture_class = xe_engine_class_to_guc_capture_class(hwe->class); + for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) { + struct gcap_reg_list_info *reginfo = &new->reginfo[type]; + /* + * regsinfo->regs is allocated based on guc->capture->max_mmio_per_node + * which is based on the descriptor list driving the population so + * should not overflow + */ + + /* Get register list for the type/class */ + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type, + capture_class, false); + if (!list) { + xe_gt_dbg(gt, "Empty GuC capture register descriptor for %s", + hwe->name); + continue; + } + + read_reg_to_node(hwe, list, reginfo->regs); + reginfo->num_regs = list->num_regs; + + /* Capture steering registers for rcs/ccs */ + if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) { + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, + type, capture_class, true); + if (list) { + read_reg_to_node(hwe, list, ®info->regs[reginfo->num_regs]); + reginfo->num_regs += list->num_regs; + } + } + } + + if (devcoredump && devcoredump->captured) { + struct xe_guc_submit_exec_queue_snapshot *ge = devcoredump->snapshot.ge; + + if (ge) { + guc_id = ge->guc.id; + if (ge->lrc[0]) + lrca = ge->lrc[0]->context_desc; + } + } + + new->eng_class = xe_engine_class_to_guc_class(hwe->class); + new->eng_inst = hwe->instance; + new->guc_id = guc_id; + new->lrca = lrca; + new->is_partial = 0; + new->locked = 1; + new->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL; + + guc_capture_add_node_to_outlist(guc->capture, new); + devcoredump->snapshot.matched_node = new; +} + +static struct guc_mmio_reg * +guc_capture_find_reg(struct gcap_reg_list_info *reginfo, u32 addr, u32 flags) +{ + int i; + + if (reginfo && reginfo->num_regs > 0) { + struct guc_mmio_reg *regs = reginfo->regs; + + if (regs) + for (i = 0; i < reginfo->num_regs; i++) + if (regs[i].offset == addr && regs[i].flags == flags) + return ®s[i]; + } + + return NULL; +} + +static void +snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p, + u32 type, const struct __guc_mmio_reg_descr_group *list) +{ + struct xe_gt *gt = snapshot->hwe->gt; + struct xe_device *xe = gt_to_xe(gt); + struct xe_guc *guc = >->uc.guc; + struct xe_devcoredump *devcoredump = &xe->devcoredump; + struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot; + struct gcap_reg_list_info *reginfo = NULL; + u32 last_value, i; + bool is_ext; + + if (!list || list->num_regs == 0) + return; + XE_WARN_ON(!devcore_snapshot->matched_node); + + is_ext = list == guc->capture->extlists; + reginfo = &devcore_snapshot->matched_node->reginfo[type]; + + /* + * loop through descriptor first and find the register in the node + * this is more scalable for developer maintenance as it will ensure + * the printout matched the ordering of the static descriptor + * table-of-lists + */ + for (i = 0; i < list->num_regs; i++) { + const struct __guc_mmio_reg_descr *reg_desc = &list->list[i]; + struct guc_mmio_reg *reg; + u32 value; + + reg = guc_capture_find_reg(reginfo, reg_desc->reg.addr, reg_desc->flags); + if (!reg) + continue; + + value = reg->value; + if (reg_desc->data_type == REG_64BIT_LOW_DW) { + last_value = value; + /* Low 32 bit dword saved, continue for high 32 bit */ + continue; + } else if (reg_desc->data_type == REG_64BIT_HI_DW) { + u64 value_qw = ((u64)value << 32) | last_value; + + drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw); + continue; + } + + if (is_ext) { + int dss, group, instance; + + group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags); + instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags); + dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance); + + drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value); + } else { + drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value); + } + } +} + +/** + * xe_engine_snapshot_print - Print out a given Xe HW Engine snapshot. + * @snapshot: Xe HW Engine snapshot object. + * @p: drm_printer where it will be printed out. + * + * This function prints out a given Xe HW Engine snapshot object. + */ +void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p) +{ + const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = { + "full-capture", + "partial-capture" + }; + int type; + const struct __guc_mmio_reg_descr_group *list; + enum guc_capture_list_class_type capture_class; + + struct xe_gt *gt; + struct xe_device *xe; + struct xe_devcoredump *devcoredump; + struct xe_devcoredump_snapshot *devcore_snapshot; + + if (!snapshot) + return; + + gt = snapshot->hwe->gt; + xe = gt_to_xe(gt); + devcoredump = &xe->devcoredump; + devcore_snapshot = &devcoredump->snapshot; + + if (!devcore_snapshot->matched_node) + return; + + xe_gt_assert(gt, snapshot->source <= XE_ENGINE_CAPTURE_SOURCE_GUC); + xe_gt_assert(gt, snapshot->hwe); + + capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class); + + drm_printf(p, "%s (physical), logical instance=%d\n", + snapshot->name ? snapshot->name : "", + snapshot->logical_instance); + drm_printf(p, "\tCapture_source: %s\n", + snapshot->source == XE_ENGINE_CAPTURE_SOURCE_GUC ? "GuC" : "Manual"); + drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]); + drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n", + snapshot->forcewake.domain, snapshot->forcewake.ref); + drm_printf(p, "\tReserved: %s\n", + str_yes_no(snapshot->kernel_reserved)); + + for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) { + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type, + capture_class, false); + snapshot_print_by_list_order(snapshot, p, type, list); + } + + if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) { + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + capture_class, true); + snapshot_print_by_list_order(snapshot, p, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + list); + } + + drm_puts(p, "\n"); +} + +/** + * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the job. + * @job: The job object. + * + * Search within the capture outlist for the job, could be used for check if + * GuC capture is ready for the job. + * If found, the locked boolean of the node will be flagged. + * + * Returns: found guc-capture node ptr else NULL + */ +struct __guc_capture_parsed_output * +xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job) +{ + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + struct xe_exec_queue *q; + struct xe_device *xe; + u16 guc_class = GUC_LAST_ENGINE_CLASS + 1; + struct xe_devcoredump_snapshot *ss; + + if (!job) + return NULL; + + q = job->q; + if (!q || !q->gt) + return NULL; + + xe = gt_to_xe(q->gt); + if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe) || IS_SRIOV_VF(xe)) + return NULL; + + ss = &xe->devcoredump.snapshot; + if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC) + return ss->matched_node; + + /* Find hwe for the job */ + for_each_hw_engine(hwe, q->gt, id) { + if (hwe != q->hwe) + continue; + guc_class = xe_engine_class_to_guc_class(hwe->class); + break; + } + + if (guc_class <= GUC_LAST_ENGINE_CLASS) { + struct __guc_capture_parsed_output *n, *ntmp; + struct xe_guc *guc = &q->gt->uc.guc; + u16 guc_id = q->guc->id; + u32 lrca = xe_lrc_ggtt_addr(q->lrc[0]); + + /* + * Look for a matching GuC reported error capture node from + * the internal output link-list based on engine, guc id and + * lrca info. + */ + list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { + if (n->eng_class == guc_class && n->eng_inst == hwe->instance && + n->guc_id == guc_id && n->lrca == lrca && + n->source == XE_ENGINE_CAPTURE_SOURCE_GUC) { + n->locked = 1; + return n; + } + } + } + return NULL; +} + +/** + * xe_engine_snapshot_capture_for_job - Take snapshot of associated engine + * @job: The job object + * + * Take snapshot of associated HW Engine + * + * Returns: None. + */ +void +xe_engine_snapshot_capture_for_job(struct xe_sched_job *job) +{ + struct xe_exec_queue *q = job->q; + struct xe_device *xe = gt_to_xe(q->gt); + struct xe_devcoredump *coredump = &xe->devcoredump; + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + u32 adj_logical_mask = q->logical_mask; + + if (IS_SRIOV_VF(xe)) + return; + + for_each_hw_engine(hwe, q->gt, id) { + if (hwe->class != q->hwe->class || + !(BIT(hwe->logical_instance) & adj_logical_mask)) { + coredump->snapshot.hwe[id] = NULL; + continue; + } + + if (!coredump->snapshot.hwe[id]) { + coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe, job); + } else { + struct __guc_capture_parsed_output *new; + + new = xe_guc_capture_get_matching_and_lock(job); + if (new) { + struct xe_guc *guc = &q->gt->uc.guc; + + /* + * If we are in here, it means we found a fresh + * GuC-err-capture node for this engine after + * previously failing to find a match in the + * early part of guc_exec_queue_timedout_job. + * Thus we must free the manually captured node + */ + guc_capture_free_outlist_node(guc->capture, + coredump->snapshot.matched_node); + coredump->snapshot.matched_node = new; + } + } + + break; + } +} + +/* + * xe_guc_capture_put_matched_nodes - Cleanup macthed nodes + * @guc: The GuC object + * + * Free matched node and all nodes with the equal guc_id from + * GuC captured outlist + */ +void xe_guc_capture_put_matched_nodes(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_devcoredump *devcoredump = &xe->devcoredump; + struct __guc_capture_parsed_output *n = devcoredump->snapshot.matched_node; + + if (n) { + guc_capture_remove_stale_matches_from_list(guc->capture, n); + guc_capture_free_outlist_node(guc->capture, n); + devcoredump->snapshot.matched_node = NULL; + } +} + +/* + * xe_guc_capture_steered_list_init - Init steering register list + * @guc: The GuC object + * + * Init steering register list for GuC register capture, create pre-alloc node + */ +void xe_guc_capture_steered_list_init(struct xe_guc *guc) +{ + /* + * For certain engine classes, there are slice and subslice + * level registers requiring steering. We allocate and populate + * these based on hw config and add it as an extension list at + * the end of the pre-populated render list. + */ + guc_capture_alloc_steered_lists(guc); + check_guc_capture_size(guc); + guc_capture_create_prealloc_nodes(guc); +} + +/* + * xe_guc_capture_init - Init for GuC register capture + * @guc: The GuC object + * + * Init for GuC register capture, alloc memory for capture data structure. + * + * Returns: 0 if success. + * -ENOMEM if out of memory + */ +int xe_guc_capture_init(struct xe_guc *guc) +{ + guc->capture = drmm_kzalloc(guc_to_drm(guc), sizeof(*guc->capture), GFP_KERNEL); + if (!guc->capture) + return -ENOMEM; + + guc->capture->reglists = guc_capture_get_device_reglist(guc_to_xe(guc)); + + INIT_LIST_HEAD(&guc->capture->outlist); + INIT_LIST_HEAD(&guc->capture->cachelist); + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_guc_capture.h b/drivers/gpu/drm/xe/xe_guc_capture.h new file mode 100644 index 000000000000..97a795d13dd1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_capture.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021-2024 Intel Corporation + */ + +#ifndef _XE_GUC_CAPTURE_H +#define _XE_GUC_CAPTURE_H + +#include <linux/types.h> +#include "abi/guc_capture_abi.h" +#include "xe_guc.h" +#include "xe_guc_fwif.h" + +struct xe_guc; +struct xe_hw_engine; +struct xe_hw_engine_snapshot; +struct xe_sched_job; + +static inline enum guc_capture_list_class_type xe_guc_class_to_capture_class(u16 class) +{ + switch (class) { + case GUC_RENDER_CLASS: + case GUC_COMPUTE_CLASS: + return GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE; + case GUC_GSC_OTHER_CLASS: + return GUC_CAPTURE_LIST_CLASS_GSC_OTHER; + case GUC_VIDEO_CLASS: + case GUC_VIDEOENHANCE_CLASS: + case GUC_BLITTER_CLASS: + return class; + default: + XE_WARN_ON(class); + return GUC_CAPTURE_LIST_CLASS_MAX; + } +} + +static inline enum guc_capture_list_class_type +xe_engine_class_to_guc_capture_class(enum xe_engine_class class) +{ + return xe_guc_class_to_capture_class(xe_engine_class_to_guc_class(class)); +} + +void xe_guc_capture_process(struct xe_guc *guc); +int xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, void **outptr); +int xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, size_t *size); +int xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size); +size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc); +const struct __guc_mmio_reg_descr_group * +xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, bool is_ext); +struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job); +void xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot); +void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p); +void xe_engine_snapshot_capture_for_job(struct xe_sched_job *job); +void xe_guc_capture_steered_list_init(struct xe_guc *guc); +void xe_guc_capture_put_matched_nodes(struct xe_guc *guc); +int xe_guc_capture_init(struct xe_guc *guc); + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h new file mode 100644 index 000000000000..2057125b1bfa --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021-2024 Intel Corporation + */ + +#ifndef _XE_GUC_CAPTURE_TYPES_H +#define _XE_GUC_CAPTURE_TYPES_H + +#include <linux/types.h> +#include "regs/xe_reg_defs.h" + +struct xe_guc; + +/* data type of the register in register list */ +enum capture_register_data_type { + REG_32BIT = 0, + REG_64BIT_LOW_DW, + REG_64BIT_HI_DW, +}; + +/** + * struct __guc_mmio_reg_descr - GuC mmio register descriptor + * + * xe_guc_capture module uses these structures to define a register + * (offsets, names, flags,...) that are used at the ADS regisration + * time as well as during runtime processing and reporting of error- + * capture states generated by GuC just prior to engine reset events. + */ +struct __guc_mmio_reg_descr { + /** @reg: the register */ + struct xe_reg reg; + /** + * @data_type: data type of the register + * Could be 32 bit, low or hi dword of a 64 bit, see enum + * register_data_type + */ + enum capture_register_data_type data_type; + /** @flags: Flags for the register */ + u32 flags; + /** @mask: The mask to apply */ + u32 mask; + /** @regname: Name of the register */ + const char *regname; +}; + +/** + * struct __guc_mmio_reg_descr_group - The group of register descriptor + * + * xe_guc_capture module uses these structures to maintain static + * tables (per unique platform) that consists of lists of registers + * (offsets, names, flags,...) that are used at the ADS regisration + * time as well as during runtime processing and reporting of error- + * capture states generated by GuC just prior to engine reset events. + */ +struct __guc_mmio_reg_descr_group { + /** @list: The register list */ + const struct __guc_mmio_reg_descr *list; + /** @num_regs: Count of registers in the list */ + u32 num_regs; + /** @owner: PF/VF owner, see enum guc_capture_list_index_type */ + u32 owner; + /** @type: Capture register type, see enum guc_state_capture_type */ + u32 type; + /** @engine: The engine class, see enum guc_capture_list_class_type */ + u32 engine; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 9c505d3517cd..8aeb1789805c 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -8,6 +8,7 @@ #include <linux/bitfield.h> #include <linux/circ_buf.h> #include <linux/delay.h> +#include <linux/fault-inject.h> #include <kunit/static_stub.h> @@ -17,6 +18,7 @@ #include "abi/guc_actions_sriov_abi.h" #include "abi/guc_klvs_abi.h" #include "xe_bo.h" +#include "xe_devcoredump.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_pagefault.h" @@ -25,12 +27,48 @@ #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" +#include "xe_guc_log.h" #include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_map.h" #include "xe_pm.h" #include "xe_trace_guc.h" +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +enum { + /* Internal states, not error conditions */ + CT_DEAD_STATE_REARM, /* 0x0001 */ + CT_DEAD_STATE_CAPTURE, /* 0x0002 */ + + /* Error conditions */ + CT_DEAD_SETUP, /* 0x0004 */ + CT_DEAD_H2G_WRITE, /* 0x0008 */ + CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */ + CT_DEAD_G2H_READ, /* 0x0020 */ + CT_DEAD_G2H_RECV, /* 0x0040 */ + CT_DEAD_G2H_RELEASE, /* 0x0080 */ + CT_DEAD_DEADLOCK, /* 0x0100 */ + CT_DEAD_PROCESS_FAILED, /* 0x0200 */ + CT_DEAD_FAST_G2H, /* 0x0400 */ + CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */ + CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */ + CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */ + CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */ +}; + +static void ct_dead_worker_func(struct work_struct *w); +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); + +#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) +#else +#define CT_DEAD(ct, ctb, reason) \ + do { \ + struct guc_ctb *_ctb = (ctb); \ + if (_ctb) \ + _ctb->info.broken = true; \ + } while (0) +#endif + /* Used when a CT send wants to block and / or receive data */ struct g2h_fence { u32 *response_buffer; @@ -175,14 +213,18 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); - ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0); + ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); if (!ct->g2h_wq) return -ENOMEM; spin_lock_init(&ct->fast_lock); xa_init(&ct->fence_lookup); INIT_WORK(&ct->g2h_worker, g2h_worker_func); - INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); + INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + spin_lock_init(&ct->dead.lock); + INIT_WORK(&ct->dead.worker, ct_dead_worker_func); +#endif init_waitqueue_head(&ct->wq); init_waitqueue_head(&ct->g2h_fence_wq); @@ -209,6 +251,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) ct->state = XE_GUC_CT_STATE_DISABLED; return 0; } +ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */ #define desc_read(xe_, guc_ctb__, field_) \ xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ @@ -395,6 +438,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); + xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); @@ -419,10 +463,22 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (ct_needs_safe_mode(ct)) ct_enter_safe_mode(ct); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /* + * The CT has now been reset so the dumper can be re-armed + * after any existing dead state has been dumped. + */ + spin_lock_irq(&ct->dead.lock); + if (ct->dead.reason) + ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); + spin_unlock_irq(&ct->dead.lock); +#endif + return 0; err_out: xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); + CT_DEAD(ct, NULL, SETUP); return err; } @@ -466,6 +522,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) if (cmd_len > h2g->info.space) { h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); + + if (h2g->info.head > h2g->info.size) { + struct xe_device *xe = ct_to_xe(ct); + u32 desc_status = desc_read(xe, h2g, status); + + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + + xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", + h2g->info.head, h2g->info.size); + CT_DEAD(ct, h2g, H2G_HAS_ROOM); + return false; + } + h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, h2g->info.size) - h2g->info.resv_space; @@ -521,10 +590,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) { + bool bad = false; + lockdep_assert_held(&ct->fast_lock); - xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= - ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); - xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); + + bad = ct->ctbs.g2h.info.space + g2h_len > + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; + bad |= !ct->g2h_outstanding; + + if (bad) { + xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", + ct->ctbs.g2h.info.space, g2h_len, + ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, + ct->ctbs.g2h.info.space + g2h_len, + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, + ct->g2h_outstanding); + CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); + return; + } ct->ctbs.g2h.info.space += g2h_len; if (!--ct->g2h_outstanding) @@ -551,12 +634,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 full_len; struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, tail * sizeof(u32)); + u32 desc_status; full_len = len + GUC_CTB_HDR_LEN; lockdep_assert_held(&ct->lock); xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); - xe_gt_assert(gt, tail <= h2g->info.size); + + desc_status = desc_read(xe, h2g, status); + if (desc_status) { + xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); + goto corrupted; + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, h2g, tail); + u32 desc_head = desc_read(xe, h2g, head); + + if (tail != desc_tail) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); + goto corrupted; + } + + if (tail > h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", + tail, h2g->info.size); + goto corrupted; + } + + if (desc_head >= h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", + desc_head, h2g->info.size); + goto corrupted; + } + } /* Command will wrap, zero fill (NOPs), return and check credits again */ if (tail + full_len > h2g->info.size) { @@ -609,6 +723,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, desc_read(xe, h2g, head), h2g->info.tail); return 0; + +corrupted: + CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); + return -EPIPE; } /* @@ -716,7 +834,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, { struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); - struct drm_printer p = xe_gt_info_printer(gt); unsigned int sleep_period_ms = 1; int ret; @@ -769,8 +886,13 @@ try_again: goto broken; #undef g2h_avail - if (dequeue_one_g2h(ct) < 0) + ret = dequeue_one_g2h(ct); + if (ret < 0) { + if (ret != -ECANCELED) + xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", + ERR_PTR(ret)); goto broken; + } goto try_again; } @@ -779,8 +901,7 @@ try_again: broken: xe_gt_err(gt, "No forward process on H2G, reset required\n"); - xe_guc_ct_print(ct, &p, true); - ct->ctbs.h2g.info.broken = true; + CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); return -EDEADLK; } @@ -848,7 +969,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) #define ct_alive(ct) \ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ !ct->ctbs.g2h.info.broken) - if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) + if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) return false; #undef ct_alive @@ -890,7 +1011,7 @@ retry_same_fence: goto retry_same_fence; if (!g2h_fence_needs_alloc(&g2h_fence)) - xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + xa_erase(&ct->fence_lookup, g2h_fence.seqno); return ret; } @@ -916,7 +1037,7 @@ retry_same_fence: if (!ret) { xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); - xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + xa_erase(&ct->fence_lookup, g2h_fence.seqno); mutex_unlock(&ct->lock); return -ETIME; } @@ -1028,6 +1149,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) else xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", type, fence); + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); return -EPROTO; } @@ -1035,6 +1157,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) g2h_fence = xa_erase(&ct->fence_lookup, fence); if (unlikely(!g2h_fence)) { /* Don't tear down channel, as send could've timed out */ + /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); return 0; @@ -1079,7 +1202,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", origin); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); return -EPROTO; } @@ -1097,7 +1220,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) default: xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", type); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); ret = -EOPNOTSUPP; } @@ -1140,6 +1263,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) /* Selftest only at the moment */ break; case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: + ret = xe_guc_error_capture_handler(guc, payload, adj_len); + break; case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: /* FIXME: Handle this */ break; @@ -1174,9 +1299,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, PROCESS_FAILED); + } return 0; } @@ -1186,7 +1313,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; - u32 tail, head, len; + u32 tail, head, len, desc_status; s32 avail; u32 action; u32 *hxg; @@ -1205,6 +1332,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) xe_gt_assert(gt, xe_guc_ct_enabled(ct)); + desc_status = desc_read(xe, g2h, status); + if (desc_status) { + if (desc_status & GUC_CTB_STATUS_DISABLED) { + /* + * Potentially valid if a CLIENT_RESET request resulted in + * contexts/engines being reset. But should never happen as + * no contexts should be active when CLIENT_RESET is sent. + */ + xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); + desc_status &= ~GUC_CTB_STATUS_DISABLED; + } + + if (desc_status) { + xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); + goto corrupted; + } + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, g2h, tail); + /* + u32 desc_head = desc_read(xe, g2h, head); + + * info.head and desc_head are updated back-to-back at the end of + * this function and nowhere else. Hence, they cannot be different + * unless two g2h_read calls are running concurrently. Which is not + * possible because it is guarded by ct->fast_lock. And yet, some + * discrete platforms are reguarly hitting this error :(. + * + * desc_head rolling backwards shouldn't cause any noticeable + * problems - just a delay in GuC being allowed to proceed past that + * point in the queue. So for now, just disable the error until it + * can be root caused. + * + if (g2h->info.head != desc_head) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT read: head was modified %u != %u\n", + desc_head, g2h->info.head); + goto corrupted; + } + */ + + if (g2h->info.head > g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", + g2h->info.head, g2h->info.size); + goto corrupted; + } + + if (desc_tail >= g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", + desc_tail, g2h->info.size); + goto corrupted; + } + } + /* Calculate DW available to read */ tail = desc_read(xe, g2h, tail); avail = tail - g2h->info.head; @@ -1221,9 +1405,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) if (len > avail) { xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", avail, len); - g2h->info.broken = true; - - return -EPROTO; + goto corrupted; } head = (g2h->info.head + 1) % g2h->info.size; @@ -1269,6 +1451,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) action, len, g2h->info.head, tail); return len; + +corrupted: + CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); + return -EPROTO; } static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) @@ -1295,9 +1481,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_warn(gt, "NOT_POSSIBLE"); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, FAST_G2H); + } } /** @@ -1357,7 +1545,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct) static void receive_g2h(struct xe_guc_ct *ct) { - struct xe_gt *gt = ct_to_gt(ct); bool ongoing; int ret; @@ -1394,9 +1581,8 @@ static void receive_g2h(struct xe_guc_ct *ct) mutex_unlock(&ct->lock); if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { - struct drm_printer p = xe_gt_info_printer(gt); - - xe_guc_ct_print(ct, &p, false); + xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); + CT_DEAD(ct, NULL, G2H_RECV); kick_reset(ct); } } while (ret == 1); @@ -1412,49 +1598,34 @@ static void g2h_worker_func(struct work_struct *w) receive_g2h(ct); } -static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, - struct guc_ctb_snapshot *snapshot, - bool atomic) +static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) { - u32 head, tail; - - xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, - sizeof(struct guc_ct_buffer_desc)); - memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); + struct xe_guc_ct_snapshot *snapshot; - snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), - atomic ? GFP_ATOMIC : GFP_KERNEL); + snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot) + return NULL; - if (!snapshot->cmds) { - drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); - return; + if (ct->bo && want_ctb) { + snapshot->ctb_size = ct->bo->size; + snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); } - head = snapshot->desc.head; - tail = snapshot->desc.tail; - - if (head != tail) { - struct iosys_map map = - IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32)); - - while (head != tail) { - snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32); - ++head; - if (head == ctb->info.size) { - head = 0; - map = ctb->cmds; - } else { - iosys_map_incr(&map, sizeof(u32)); - } - } - } + return snapshot; +} + +static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, + struct guc_ctb_snapshot *snapshot) +{ + xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, + sizeof(struct guc_ct_buffer_desc)); + memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); } static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, struct drm_printer *p) { - u32 head, tail; - drm_printf(p, "\tsize: %d\n", snapshot->info.size); drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); drm_printf(p, "\thead: %d\n", snapshot->info.head); @@ -1464,63 +1635,46 @@ static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); +} - if (!snapshot->cmds) - return; +static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) +{ + struct xe_device *xe = ct_to_xe(ct); + struct xe_guc_ct_snapshot *snapshot; - head = snapshot->desc.head; - tail = snapshot->desc.tail; + snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb); + if (!snapshot) { + xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); + return NULL; + } - while (head != tail) { - drm_printf(p, "\tcmd[%d]: 0x%08x\n", head, - snapshot->cmds[head]); - ++head; - if (head == snapshot->info.size) - head = 0; + if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { + snapshot->ct_enabled = true; + snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); + guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g); + guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h); } -} -static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot) -{ - kfree(snapshot->cmds); + if (ct->bo && snapshot->ctb) + xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size); + + return snapshot; } /** * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. * @ct: GuC CT object. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. * * This can be printed out in a later stage like during dev_coredump - * analysis. + * analysis. This is safe to be called during atomic context. * * Returns: a GuC CT snapshot object that must be freed by the caller * by using `xe_guc_ct_snapshot_free`. */ -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, - bool atomic) +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct) { - struct xe_device *xe = ct_to_xe(ct); - struct xe_guc_ct_snapshot *snapshot; - - snapshot = kzalloc(sizeof(*snapshot), - atomic ? GFP_ATOMIC : GFP_KERNEL); - - if (!snapshot) { - drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); - return NULL; - } - - if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { - snapshot->ct_enabled = true; - snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); - guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, - &snapshot->h2g, atomic); - guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, - &snapshot->g2h, atomic); - } - - return snapshot; + return guc_ct_snapshot_capture(ct, true, true); } /** @@ -1540,11 +1694,13 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, drm_puts(p, "H2G CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->h2g, p); - drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); + drm_puts(p, "G2H CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->g2h, p); - drm_printf(p, "\tg2h outstanding: %d\n", snapshot->g2h_outstanding); + + if (snapshot->ctb) + xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size); } else { drm_puts(p, "CT disabled\n"); } @@ -1562,8 +1718,7 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) if (!snapshot) return; - guc_ctb_snapshot_free(&snapshot->h2g); - guc_ctb_snapshot_free(&snapshot->g2h); + kfree(snapshot->ctb); kfree(snapshot); } @@ -1571,16 +1726,121 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) * xe_guc_ct_print - GuC CT Print. * @ct: GuC CT. * @p: drm_printer where it will be printed out. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. + * @want_ctb: Should the full CTB content be dumped (vs just the headers) * - * This function quickly capture a snapshot and immediately print it out. + * This function will quickly capture a snapshot of the CT state + * and immediately print it out. */ -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) { struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_capture(ct, atomic); + snapshot = guc_ct_snapshot_capture(ct, false, want_ctb); xe_guc_ct_snapshot_print(snapshot, p); xe_guc_ct_snapshot_free(snapshot); } + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) +{ + struct xe_guc_log_snapshot *snapshot_log; + struct xe_guc_ct_snapshot *snapshot_ct; + struct xe_guc *guc = ct_to_guc(ct); + unsigned long flags; + bool have_capture; + + if (ctb) + ctb->info.broken = true; + + /* Ignore further errors after the first dump until a reset */ + if (ct->dead.reported) + return; + + spin_lock_irqsave(&ct->dead.lock, flags); + + /* And only capture one dump at a time */ + have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); + ct->dead.reason |= (1 << reason_code) | + (1 << CT_DEAD_STATE_CAPTURE); + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + if (have_capture) + return; + + snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); + snapshot_ct = xe_guc_ct_snapshot_capture((ct)); + + spin_lock_irqsave(&ct->dead.lock, flags); + + if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { + xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); + xe_guc_log_snapshot_free(snapshot_log); + xe_guc_ct_snapshot_free(snapshot_ct); + } else { + ct->dead.snapshot_log = snapshot_log; + ct->dead.snapshot_ct = snapshot_ct; + } + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + queue_work(system_unbound_wq, &(ct)->dead.worker); +} + +static void ct_dead_print(struct xe_dead_ct *dead) +{ + struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + if (!dead->reason) { + xe_gt_err(gt, "CTB is dead for no reason!?\n"); + return; + } + + drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason); + + /* Can't generate a genuine core dump at this point, so just do the good bits */ + drm_puts(&lp, "**** Xe Device Coredump ****\n"); + xe_device_snapshot_print(xe, &lp); + + drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); + drm_printf(&lp, "\tTile: %d\n", gt->tile->id); + + drm_puts(&lp, "**** GuC Log ****\n"); + xe_guc_log_snapshot_print(dead->snapshot_log, &lp); + + drm_puts(&lp, "**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp); + + drm_puts(&lp, "Done.\n"); +} + +static void ct_dead_worker_func(struct work_struct *w) +{ + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); + + if (!ct->dead.reported) { + ct->dead.reported = true; + ct_dead_print(&ct->dead); + } + + spin_lock_irq(&ct->dead.lock); + + xe_guc_log_snapshot_free(ct->dead.snapshot_log); + ct->dead.snapshot_log = NULL; + xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); + ct->dead.snapshot_ct = NULL; + + if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { + /* A reset has occurred so re-arm the error reporting */ + ct->dead.reason = 0; + ct->dead.reported = false; + } + + spin_unlock_irq(&ct->dead.lock); +} +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 190202fce2d0..82c4ae458dda 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -9,6 +9,7 @@ #include "xe_guc_ct_types.h" struct drm_printer; +struct xe_device; int xe_guc_ct_init(struct xe_guc_ct *ct); int xe_guc_ct_enable(struct xe_guc_ct *ct); @@ -16,12 +17,10 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct); void xe_guc_ct_stop(struct xe_guc_ct *ct); void xe_guc_ct_fast_path(struct xe_guc_ct *ct); -struct xe_guc_ct_snapshot * -xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic); -void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, - struct drm_printer *p); +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct); +void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p); void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic); +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb); static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index 761cb9031298..8e1b9d981d61 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -52,8 +52,6 @@ struct guc_ctb { struct guc_ctb_snapshot { /** @desc: snapshot of the CTB descriptor */ struct guc_ct_buffer_desc desc; - /** @cmds: snapshot of the CTB commands */ - u32 *cmds; /** @info: snapshot of the CTB info */ struct guc_ctb_info info; }; @@ -70,6 +68,10 @@ struct xe_guc_ct_snapshot { struct guc_ctb_snapshot g2h; /** @h2g: H2G CTB snapshot */ struct guc_ctb_snapshot h2g; + /** @ctb_size: size of the snapshot of the CTB */ + size_t ctb_size; + /** @ctb: snapshot of the entire CTB */ + u32 *ctb; }; /** @@ -86,6 +88,24 @@ enum xe_guc_ct_state { XE_GUC_CT_STATE_ENABLED, }; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +/** struct xe_dead_ct - Information for debugging a dead CT */ +struct xe_dead_ct { + /** @lock: protects memory allocation/free operations, and @reason updates */ + spinlock_t lock; + /** @reason: bit mask of CT_DEAD_* reason codes */ + unsigned int reason; + /** @reported: for preventing multiple dumps per error sequence */ + bool reported; + /** @worker: worker thread to get out of interrupt context before dumping */ + struct work_struct worker; + /** snapshot_ct: copy of CT state and CTB content at point of error */ + struct xe_guc_ct_snapshot *snapshot_ct; + /** snapshot_log: copy of GuC log at point of error */ + struct xe_guc_log_snapshot *snapshot_log; +}; +#endif + /** * struct xe_guc_ct - GuC command transport (CT) layer * @@ -128,6 +148,11 @@ struct xe_guc_ct { u32 msg[GUC_CTB_MSG_MAX_LEN]; /** @fast_msg: Message buffer */ u32 fast_msg[GUC_CTB_MSG_MAX_LEN]; + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /** @dead: information for debugging dead CTs */ + struct xe_dead_ct dead; +#endif }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c index d3822cbea273..995b306aced7 100644 --- a/drivers/gpu/drm/xe/xe_guc_debugfs.c +++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c @@ -47,9 +47,23 @@ static int guc_log(struct seq_file *m, void *data) return 0; } +static int guc_ctb(struct seq_file *m, void *data) +{ + struct xe_guc *guc = node_to_guc(m->private); + struct xe_device *xe = guc_to_xe(guc); + struct drm_printer p = drm_seq_file_printer(m); + + xe_pm_runtime_get(xe); + xe_guc_ct_print(&guc->ct, &p, true); + xe_pm_runtime_put(xe); + + return 0; +} + static const struct drm_info_list debugfs_list[] = { {"guc_info", guc_info, 0}, {"guc_log", guc_log, 0}, + {"guc_ctb", guc_ctb, 0}, }; void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent) diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index 19ee71aeaf17..08ffe59f22fa 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -8,7 +8,9 @@ #include <linux/bits.h> +#include "abi/guc_capture_abi.h" #include "abi/guc_klvs_abi.h" +#include "xe_hw_engine_types.h" #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4 #define G2H_LEN_DW_DEREGISTER_CONTEXT 3 @@ -103,6 +105,7 @@ struct guc_update_exec_queue_policy { #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) +#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) #define GUC_CTL_DEBUG 3 @@ -157,24 +160,6 @@ struct guc_policies { u32 reserved[4]; } __packed; -/* GuC MMIO reg state struct */ -struct guc_mmio_reg { - u32 offset; - u32 value; - u32 flags; - u32 mask; -#define GUC_REGSET_MASKED BIT(0) -#define GUC_REGSET_MASKED_WITH_VALUE BIT(2) -#define GUC_REGSET_RESTORE_ONLY BIT(3) -} __packed; - -/* GuC register sets */ -struct guc_mmio_reg_set { - u32 address; - u16 count; - u16 reserved; -} __packed; - /* Generic GT SysInfo data types */ #define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0 #define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1 @@ -188,12 +173,6 @@ struct guc_gt_system_info { u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX]; } __packed; -enum { - GUC_CAPTURE_LIST_INDEX_PF = 0, - GUC_CAPTURE_LIST_INDEX_VF = 1, - GUC_CAPTURE_LIST_INDEX_MAX = 2, -}; - /* GuC Additional Data Struct */ struct guc_ads { struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; diff --git a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h index da0fedbbdbaf..da10cf0389cb 100644 --- a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h +++ b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h @@ -18,6 +18,13 @@ MAKE_GUC_KLV_KEY(CONCATENATE(VF_CFG_THRESHOLD_, TAG)) /** + * MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN - Prepare the name of the KLV length constant. + * @TAG: unique tag of the GuC threshold KLV key. + */ +#define MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) \ + MAKE_GUC_KLV_LEN(CONCATENATE(VF_CFG_THRESHOLD_, TAG)) + +/** * xe_guc_klv_threshold_key_to_index - Find index of the tracked GuC threshold. * @key: GuC threshold KLV key. * diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index a37ee3419428..df4cfb698cdb 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -5,13 +5,26 @@ #include "xe_guc_log.h" +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> +#include "regs/xe_guc_regs.h" #include "xe_bo.h" +#include "xe_devcoredump.h" +#include "xe_force_wake.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_map.h" +#include "xe_mmio.h" #include "xe_module.h" +static struct xe_guc * +log_to_guc(struct xe_guc_log *log) +{ + return container_of(log, struct xe_guc, log); +} + static struct xe_gt * log_to_gt(struct xe_guc_log *log) { @@ -49,32 +62,194 @@ static size_t guc_log_size(void) CAPTURE_BUFFER_SIZE; } -void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p) +#define GUC_LOG_CHUNK_SIZE SZ_2M + +static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *log, bool atomic) +{ + struct xe_guc_log_snapshot *snapshot; + size_t remain; + int i; + + snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot) + return NULL; + + /* + * NB: kmalloc has a hard limit well below the maximum GuC log buffer size. + * Also, can't use vmalloc as might be called from atomic context. So need + * to break the buffer up into smaller chunks that can be allocated. + */ + snapshot->size = log->bo->size; + snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE); + + snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy), + atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot->copy) + goto fail_snap; + + remain = snapshot->size; + for (i = 0; i < snapshot->num_chunks; i++) { + size_t size = min(GUC_LOG_CHUNK_SIZE, remain); + + snapshot->copy[i] = kmalloc(size, atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot->copy[i]) + goto fail_copy; + remain -= size; + } + + return snapshot; + +fail_copy: + for (i = 0; i < snapshot->num_chunks; i++) + kfree(snapshot->copy[i]); + kfree(snapshot->copy); +fail_snap: + kfree(snapshot); + return NULL; +} + +/** + * xe_guc_log_snapshot_free - free a previously captured GuC log snapshot + * @snapshot: GuC log snapshot structure + * + * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is + * responsible for calling xe_guc_log_snapshot_free when done with the snapshot. + */ +void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot) +{ + int i; + + if (!snapshot) + return; + + if (snapshot->copy) { + for (i = 0; i < snapshot->num_chunks; i++) + kfree(snapshot->copy[i]); + kfree(snapshot->copy); + } + + kfree(snapshot); +} + +/** + * xe_guc_log_snapshot_capture - create a new snapshot copy the GuC log for later dumping + * @log: GuC log structure + * @atomic: is the call inside an atomic section of some kind? + * + * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is + * responsible for calling xe_guc_log_snapshot_free when done with the snapshot. + */ +struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic) { + struct xe_guc_log_snapshot *snapshot; struct xe_device *xe = log_to_xe(log); - size_t size; - int i, j; + struct xe_guc *guc = log_to_guc(log); + struct xe_gt *gt = log_to_gt(log); + unsigned int fw_ref; + size_t remain; + int i; - xe_assert(xe, log->bo); + if (!log->bo) { + xe_gt_err(gt, "GuC log buffer not allocated\n"); + return NULL; + } - size = log->bo->size; + snapshot = xe_guc_log_snapshot_alloc(log, atomic); + if (!snapshot) { + xe_gt_err(gt, "GuC log snapshot not allocated\n"); + return NULL; + } -#define DW_PER_READ 128 - xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32)))); - for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) { - u32 read[DW_PER_READ]; + remain = snapshot->size; + for (i = 0; i < snapshot->num_chunks; i++) { + size_t size = min(GUC_LOG_CHUNK_SIZE, remain); - xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32), - DW_PER_READ * sizeof(u32)); -#define DW_PER_PRINT 4 - for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) { - u32 *print = read + j * DW_PER_PRINT; + xe_map_memcpy_from(xe, snapshot->copy[i], &log->bo->vmap, + i * GUC_LOG_CHUNK_SIZE, size); + remain -= size; + } - drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", - *(print + 0), *(print + 1), - *(print + 2), *(print + 3)); - } + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + snapshot->stamp = ~0ULL; + } else { + snapshot->stamp = xe_mmio_read64_2x32(>->mmio, GUC_PMTIMESTAMP_LO); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } + snapshot->ktime = ktime_get_boottime_ns(); + snapshot->level = log->level; + snapshot->ver_found = guc->fw.versions.found[XE_UC_FW_VER_RELEASE]; + snapshot->ver_want = guc->fw.versions.wanted; + snapshot->path = guc->fw.path; + + return snapshot; +} + +/** + * xe_guc_log_snapshot_print - dump a previously saved copy of the GuC log to some useful location + * @snapshot: a snapshot of the GuC log + * @p: the printer object to output to + */ +void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p) +{ + size_t remain; + int i; + + if (!snapshot) { + drm_printf(p, "GuC log snapshot not allocated!\n"); + return; + } + + drm_printf(p, "GuC firmware: %s\n", snapshot->path); + drm_printf(p, "GuC version: %u.%u.%u (wanted %u.%u.%u)\n", + snapshot->ver_found.major, snapshot->ver_found.minor, snapshot->ver_found.patch, + snapshot->ver_want.major, snapshot->ver_want.minor, snapshot->ver_want.patch); + drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", snapshot->ktime, snapshot->ktime); + drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp); + drm_printf(p, "Log level: %u\n", snapshot->level); + + remain = snapshot->size; + for (i = 0; i < snapshot->num_chunks; i++) { + size_t size = min(GUC_LOG_CHUNK_SIZE, remain); + + xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size); + remain -= size; + } +} + +/** + * xe_guc_log_print_dmesg - dump a copy of the GuC log to dmesg + * @log: GuC log structure + */ +void xe_guc_log_print_dmesg(struct xe_guc_log *log) +{ + struct xe_gt *gt = log_to_gt(log); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + drm_printf(&lp, "Dumping GuC log for %ps...\n", __builtin_return_address(0)); + + xe_guc_log_print(log, &lp); + + drm_printf(&lp, "Done.\n"); +} + +/** + * xe_guc_log_print - dump a copy of the GuC log to some useful location + * @log: GuC log structure + * @p: the printer object to output to + */ +void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p) +{ + struct xe_guc_log_snapshot *snapshot; + + drm_printf(p, "**** GuC Log ****\n"); + + snapshot = xe_guc_log_snapshot_capture(log, false); + drm_printf(p, "CS reference clock: %u\n", log_to_gt(log)->info.reference_clock); + xe_guc_log_snapshot_print(snapshot, p); + xe_guc_log_snapshot_free(snapshot); } int xe_guc_log_init(struct xe_guc_log *log) @@ -96,3 +271,105 @@ int xe_guc_log_init(struct xe_guc_log *log) return 0; } + +ALLOW_ERROR_INJECTION(xe_guc_log_init, ERRNO); /* See xe_pci_probe() */ + +static u32 xe_guc_log_section_size_crash(struct xe_guc_log *log) +{ + return CRASH_BUFFER_SIZE; +} + +static u32 xe_guc_log_section_size_debug(struct xe_guc_log *log) +{ + return DEBUG_BUFFER_SIZE; +} + +/** + * xe_guc_log_section_size_capture - Get capture buffer size within log sections. + * @log: The log object. + * + * This function will return the capture buffer size within log sections. + * + * Return: capture buffer size. + */ +u32 xe_guc_log_section_size_capture(struct xe_guc_log *log) +{ + return CAPTURE_BUFFER_SIZE; +} + +/** + * xe_guc_get_log_buffer_size - Get log buffer size for a type. + * @log: The log object. + * @type: The log buffer type + * + * Return: buffer size. + */ +u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type) +{ + switch (type) { + case GUC_LOG_BUFFER_CRASH_DUMP: + return xe_guc_log_section_size_crash(log); + case GUC_LOG_BUFFER_DEBUG: + return xe_guc_log_section_size_debug(log); + case GUC_LOG_BUFFER_CAPTURE: + return xe_guc_log_section_size_capture(log); + } + return 0; +} + +/** + * xe_guc_get_log_buffer_offset - Get offset in log buffer for a type. + * @log: The log object. + * @type: The log buffer type + * + * This function will return the offset in the log buffer for a type. + * Return: buffer offset. + */ +u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type) +{ + enum guc_log_buffer_type i; + u32 offset = PAGE_SIZE;/* for the log_buffer_states */ + + for (i = GUC_LOG_BUFFER_CRASH_DUMP; i < GUC_LOG_BUFFER_TYPE_MAX; ++i) { + if (i == type) + break; + offset += xe_guc_get_log_buffer_size(log, i); + } + + return offset; +} + +/** + * xe_guc_check_log_buf_overflow - Check if log buffer overflowed + * @log: The log object. + * @type: The log buffer type + * @full_cnt: The count of buffer full + * + * This function will check count of buffer full against previous, mismatch + * indicate overflowed. + * Update the sampled_overflow counter, if the 4 bit counter overflowed, add + * up 16 to correct the value. + * + * Return: True if overflowed. + */ +bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log, enum guc_log_buffer_type type, + unsigned int full_cnt) +{ + unsigned int prev_full_cnt = log->stats[type].sampled_overflow; + bool overflow = false; + + if (full_cnt != prev_full_cnt) { + overflow = true; + + log->stats[type].overflow = full_cnt; + log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; + + if (full_cnt < prev_full_cnt) { + /* buffer_full_cnt is a 4 bit counter */ + log->stats[type].sampled_overflow += 16; + } + xe_gt_notice(log_to_gt(log), "log buffer overflow\n"); + } + + return overflow; +} diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h index 2d25ab28b4b3..5b896f5fafaf 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.h +++ b/drivers/gpu/drm/xe/xe_guc_log.h @@ -7,8 +7,10 @@ #define _XE_GUC_LOG_H_ #include "xe_guc_log_types.h" +#include "abi/guc_log_abi.h" struct drm_printer; +struct xe_device; #if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER) #define CRASH_BUFFER_SIZE SZ_1M @@ -17,7 +19,7 @@ struct drm_printer; #else #define CRASH_BUFFER_SIZE SZ_8K #define DEBUG_BUFFER_SIZE SZ_64K -#define CAPTURE_BUFFER_SIZE SZ_16K +#define CAPTURE_BUFFER_SIZE SZ_1M #endif /* * While we're using plain log level in i915, GuC controls are much more... @@ -38,6 +40,10 @@ struct drm_printer; int xe_guc_log_init(struct xe_guc_log *log); void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p); +void xe_guc_log_print_dmesg(struct xe_guc_log *log); +struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic); +void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p); +void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot); static inline u32 xe_guc_log_get_level(struct xe_guc_log *log) @@ -45,4 +51,11 @@ xe_guc_log_get_level(struct xe_guc_log *log) return log->level; } +u32 xe_guc_log_section_size_capture(struct xe_guc_log *log); +u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type); +u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type); +bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log, + enum guc_log_buffer_type type, + unsigned int full_cnt); + #endif diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h index 125080d138a7..b3d5c72ac752 100644 --- a/drivers/gpu/drm/xe/xe_guc_log_types.h +++ b/drivers/gpu/drm/xe/xe_guc_log_types.h @@ -7,10 +7,38 @@ #define _XE_GUC_LOG_TYPES_H_ #include <linux/types.h> +#include "abi/guc_log_abi.h" + +#include "xe_uc_fw_types.h" struct xe_bo; /** + * struct xe_guc_log_snapshot: + * Capture of the GuC log plus various state useful for decoding the log + */ +struct xe_guc_log_snapshot { + /** @size: Size in bytes of the @copy allocation */ + size_t size; + /** @copy: Host memory copy of the log buffer for later dumping, split into chunks */ + void **copy; + /** @num_chunks: Number of chunks within @copy */ + int num_chunks; + /** @ktime: Kernel time the snapshot was taken */ + u64 ktime; + /** @stamp: GuC timestamp at which the snapshot was taken */ + u64 stamp; + /** @level: GuC log verbosity level */ + u32 level; + /** @ver_found: GuC firmware version */ + struct xe_uc_fw_version ver_found; + /** @ver_want: GuC firmware version that driver expected */ + struct xe_uc_fw_version ver_want; + /** @path: Path of GuC firmware blob */ + const char *path; +}; + +/** * struct xe_guc_log - GuC log */ struct xe_guc_log { @@ -18,6 +46,12 @@ struct xe_guc_log { u32 level; /** @bo: XE BO for GuC log */ struct xe_bo *bo; + /** @stats: logging related stats */ + struct { + u32 sampled_overflow; + u32 overflow; + u32 flush; + } stats[GUC_LOG_BUFFER_TYPE_MAX]; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 034b29984d5e..e8b9faeaef64 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -262,7 +262,7 @@ static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable) u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE; /* Allow/Disallow punit to process software freq requests */ - xe_mmio_write32(gt, RP_CONTROL, state); + xe_mmio_write32(>->mmio, RP_CONTROL, state); } static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) @@ -274,7 +274,7 @@ static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) /* Req freq is in units of 16.66 Mhz */ rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq)); - xe_mmio_write32(gt, RPNSWREQ, rpnswreq); + xe_mmio_write32(>->mmio, RPNSWREQ, rpnswreq); /* Sleep for a small time to allow pcode to respond */ usleep_range(100, 300); @@ -334,9 +334,9 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc) u32 reg; if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY); + reg = xe_mmio_read32(>->mmio, MTL_MPE_FREQUENCY); else - reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY); + reg = xe_mmio_read32(>->mmio, MTL_GT_RPE_FREQUENCY); pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); } @@ -353,9 +353,9 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc) * PCODE at a different register */ if (xe->info.platform == XE_PVC) - reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); else - reg = xe_mmio_read32(gt, FREQ_INFO_REC); + reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } @@ -392,10 +392,10 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) /* When in RC6, actual frequency reported will be 0. */ if (GRAPHICS_VERx100(xe) >= 1270) { - freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1); + freq = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); freq = REG_FIELD_GET(MTL_CAGF_MASK, freq); } else { - freq = xe_mmio_read32(gt, GT_PERF_STATUS); + freq = xe_mmio_read32(>->mmio, GT_PERF_STATUS); freq = REG_FIELD_GET(CAGF_MASK, freq); } @@ -415,22 +415,24 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) { struct xe_gt *gt = pc_to_gt(pc); - int ret; + unsigned int fw_ref; /* * GuC SLPC plays with cur freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } - *freq = xe_mmio_read32(gt, RPNSWREQ); + *freq = xe_mmio_read32(>->mmio, RPNSWREQ); *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq); *freq = decode_freq(*freq); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; } @@ -480,6 +482,7 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) { struct xe_gt *gt = pc_to_gt(pc); + unsigned int fw_ref; int ret; mutex_lock(&pc->freq_lock); @@ -493,9 +496,11 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) * GuC SLPC plays with min freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + ret = -ETIMEDOUT; + goto fw; + } ret = pc_action_query_task_state(pc); if (ret) @@ -504,7 +509,7 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) *freq = pc_get_min_freq(pc); fw: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: mutex_unlock(&pc->freq_lock); return ret; @@ -612,10 +617,10 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) u32 reg, gt_c_state; if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { - reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1); + reg = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg); } else { - reg = xe_mmio_read32(gt, GT_CORE_STATUS); + reg = xe_mmio_read32(>->mmio, GT_CORE_STATUS); gt_c_state = REG_FIELD_GET(RCN_MASK, reg); } @@ -638,7 +643,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u32 reg; - reg = xe_mmio_read32(gt, GT_GFX_RC6); + reg = xe_mmio_read32(>->mmio, GT_GFX_RC6); return reg; } @@ -652,7 +657,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u64 reg; - reg = xe_mmio_read32(gt, MTL_MEDIA_MC6); + reg = xe_mmio_read32(>->mmio, MTL_MEDIA_MC6); return reg; } @@ -665,9 +670,9 @@ static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) xe_device_assert_mem_access(pc_to_xe(pc)); if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, MTL_MEDIAP_STATE_CAP); else - reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, MTL_RP_STATE_CAP); pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg)); @@ -683,9 +688,9 @@ static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) xe_device_assert_mem_access(pc_to_xe(pc)); if (xe->info.platform == XE_PVC) - reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); else - reg = xe_mmio_read32(gt, RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, RP_STATE_CAP); pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } @@ -855,6 +860,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) { struct xe_device *xe = pc_to_xe(pc); struct xe_gt *gt = pc_to_gt(pc); + unsigned int fw_ref; int ret = 0; if (xe->info.skip_guc_pc) @@ -864,13 +870,15 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) if (ret) return ret; - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } xe_gt_idle_disable_c6(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; } @@ -956,13 +964,16 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) struct xe_device *xe = pc_to_xe(pc); struct xe_gt *gt = pc_to_gt(pc); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + unsigned int fw_ref; int ret; xe_gt_assert(gt, xe_device_uc_enabled(xe)); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } if (xe->info.skip_guc_pc) { if (xe->info.platform != XE_PVC) @@ -1005,7 +1016,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL); out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return ret; } @@ -1037,18 +1048,19 @@ static void xe_guc_pc_fini_hw(void *arg) { struct xe_guc_pc *pc = arg; struct xe_device *xe = pc_to_xe(pc); + unsigned int fw_ref; if (xe_device_wedged(xe)) return; - XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); xe_guc_pc_gucrc_disable(pc); XE_WARN_ON(xe_guc_pc_stop(pc)); /* Bind requested freq to mert_freq_cap before unload */ pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq)); - xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref); } /** diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c index ade6162dc259..8f62de026724 100644 --- a/drivers/gpu/drm/xe/xe_guc_relay.c +++ b/drivers/gpu/drm/xe/xe_guc_relay.c @@ -5,6 +5,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> +#include <linux/fault-inject.h> #include <drm/drm_managed.h> @@ -355,6 +356,7 @@ int xe_guc_relay_init(struct xe_guc_relay *relay) return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay); } +ALLOW_ERROR_INJECTION(xe_guc_relay_init, ERRNO); /* See xe_pci_probe() */ static u32 to_relay_error(int err) { diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 4f5d00aea716..9a8564ea06b5 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -27,6 +27,7 @@ #include "xe_gt_clock.h" #include "xe_gt_printk.h" #include "xe_guc.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_guc_exec_queue_types.h" #include "xe_guc_id_mgr.h" @@ -716,6 +717,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_device *xe = guc_to_xe(guc); + struct dma_fence *fence = NULL; bool lr = xe_exec_queue_is_lr(q); xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || @@ -733,12 +735,12 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) if (lr) { xe_sched_job_set_error(job, -EOPNOTSUPP); - return NULL; - } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) { - return job->fence; + dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */ } else { - return dma_fence_get(job->fence); + fence = job->fence; } + + return fence; } static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) @@ -749,7 +751,7 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) xe_sched_job_put(job); } -static int guc_read_stopped(struct xe_guc *guc) +int xe_guc_read_stopped(struct xe_guc *guc) { return atomic_read(&guc->submission_state.stopped); } @@ -771,7 +773,7 @@ static void disable_scheduling_deregister(struct xe_guc *guc, set_min_preemption_timeout(guc, q); smp_rmb(); ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc), HZ * 5); if (!ret) { struct xe_gpu_scheduler *sched = &q->guc->sched; @@ -897,7 +899,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) */ ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_disable(q) || - guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc), HZ * 5); if (!ret) { drm_warn(&xe->drm, "Schedule disable failed to respond"); xe_sched_submission_start(sched); @@ -975,8 +977,8 @@ static void enable_scheduling(struct xe_exec_queue *q) ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - guc_read_stopped(guc), HZ * 5); - if (!ret || guc_read_stopped(guc)) { + xe_guc_read_stopped(guc), HZ * 5); + if (!ret || xe_guc_read_stopped(guc)) { xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond"); set_exec_queue_banned(q); xe_gt_reset_async(q->gt); @@ -1031,6 +1033,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_guc *guc = exec_queue_to_guc(q); const char *process_name = "no process"; + struct xe_device *xe = guc_to_xe(guc); + unsigned int fw_ref; int err = -ETIME; pid_t pid = -1; int i = 0; @@ -1058,6 +1062,22 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) exec_queue_destroyed(q); /* + * If devcoredump not captured and GuC capture for the job is not ready + * do manual capture first and decide later if we need to use it + */ + if (!exec_queue_killed(q) && !xe->devcoredump.captured && + !xe_guc_capture_get_matching_and_lock(job)) { + /* take force wake before engine register manual capture */ + fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n"); + + xe_engine_snapshot_capture_for_job(job); + + xe_force_wake_put(gt_to_fw(q->gt), fw_ref); + } + + /* * XXX: Sampling timeout doesn't work in wedged mode as we have to * modify scheduling state to read timestamp. We could read the * timestamp from a register to accumulate current running time but this @@ -1080,8 +1100,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) */ ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - guc_read_stopped(guc), HZ * 5); - if (!ret || guc_read_stopped(guc)) + xe_guc_read_stopped(guc), HZ * 5); + if (!ret || xe_guc_read_stopped(guc)) goto trigger_reset; /* @@ -1105,8 +1125,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) smp_rmb(); ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_disable(q) || - guc_read_stopped(guc), HZ * 5); - if (!ret || guc_read_stopped(guc)) { + xe_guc_read_stopped(guc), HZ * 5); + if (!ret || xe_guc_read_stopped(guc)) { trigger_reset: if (!ret) xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond"); @@ -1295,7 +1315,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q) struct xe_device *xe = guc_to_xe(guc); xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) || - guc_read_stopped(guc)); + xe_guc_read_stopped(guc)); xe_assert(xe, q->guc->suspend_pending); __suspend_fence_signal(q); @@ -1309,9 +1329,9 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && exec_queue_enabled(q)) { wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING || - guc_read_stopped(guc)); + xe_guc_read_stopped(guc)); - if (!guc_read_stopped(guc)) { + if (!xe_guc_read_stopped(guc)) { s64 since_resume_ms = ktime_ms_delta(ktime_get(), q->guc->resume_time); @@ -1435,7 +1455,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) q->entity = &ge->entity; - if (guc_read_stopped(guc)) + if (xe_guc_read_stopped(guc)) xe_sched_stop(sched); mutex_unlock(&guc->submission_state.lock); @@ -1591,7 +1611,7 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) ret = wait_event_interruptible_timeout(q->guc->suspend_wait, !READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || - guc_read_stopped(guc), + xe_guc_read_stopped(guc), HZ * 5); if (!ret) { @@ -1717,7 +1737,7 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc) void xe_guc_submit_reset_wait(struct xe_guc *guc) { wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) || - !guc_read_stopped(guc)); + !xe_guc_read_stopped(guc)); } void xe_guc_submit_stop(struct xe_guc *guc) @@ -1726,7 +1746,7 @@ void xe_guc_submit_stop(struct xe_guc *guc) unsigned long index; struct xe_device *xe = guc_to_xe(guc); - xe_assert(xe, guc_read_stopped(guc) == 1); + xe_assert(xe, xe_guc_read_stopped(guc) == 1); mutex_lock(&guc->submission_state.lock); @@ -1770,7 +1790,7 @@ int xe_guc_submit_start(struct xe_guc *guc) unsigned long index; struct xe_device *xe = guc_to_xe(guc); - xe_assert(xe, guc_read_stopped(guc) == 1); + xe_assert(xe, xe_guc_read_stopped(guc) == 1); mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); @@ -1949,8 +1969,6 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len) xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d", xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); - /* FIXME: Do error capture, most likely async */ - trace_xe_exec_queue_reset(q); /* @@ -1966,6 +1984,36 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len) return 0; } +/* + * xe_guc_error_capture_handler - Handler of GuC captured message + * @guc: The GuC object + * @msg: Point to the message + * @len: The message length + * + * When GuC captured data is ready, GuC will send message + * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be + * called 1st to check status before process the data comes with the message. + * + * Returns: error code. 0 if success + */ +int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len) +{ + u32 status; + + if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN)) { + xe_gt_dbg(guc_to_gt(guc), "Invalid length %u", len); + return -EPROTO; + } + + status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK; + if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE) + xe_gt_warn(guc_to_gt(guc), "G2H-Error capture no space"); + + xe_guc_capture_process(guc); + + return 0; +} + int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, u32 len) { @@ -2180,7 +2228,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps if (!snapshot) return; - drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id); + drm_printf(p, "GuC ID: %d\n", snapshot->guc.id); drm_printf(p, "\tName: %s\n", snapshot->name); drm_printf(p, "\tClass: %d\n", snapshot->class); drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h index bdf8c9f3d24a..9b71a986c6ca 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.h +++ b/drivers/gpu/drm/xe/xe_guc_submit.h @@ -20,12 +20,14 @@ void xe_guc_submit_stop(struct xe_guc *guc); int xe_guc_submit_start(struct xe_guc *guc); void xe_guc_submit_wedge(struct xe_guc *guc); +int xe_guc_read_stopped(struct xe_guc *guc); int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len); +int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len); struct xe_guc_submit_exec_queue_snapshot * xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q); diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index ed150fc09ad0..fa75f57bf5da 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -58,6 +58,8 @@ struct xe_guc { struct xe_guc_ads ads; /** @ct: GuC ct */ struct xe_guc_ct ct; + /** @capture: the error-state-capture module's data and objects */ + struct xe_guc_state_capture *capture; /** @pc: GuC Power Conservation */ struct xe_guc_pc pc; /** @dbm: GuC Doorbell Manager */ diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index f5459f97af23..6a846e4cb221 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -229,7 +229,7 @@ bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type) { struct xe_gt *gt = huc_to_gt(huc); - return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val; + return xe_mmio_read32(>->mmio, huc_auth_modes[type].reg) & huc_auth_modes[type].val; } int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) @@ -268,7 +268,7 @@ int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) goto fail; } - ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val, + ret = xe_mmio_wait32(>->mmio, huc_auth_modes[type].reg, huc_auth_modes[type].val, huc_auth_modes[type].val, 100000, NULL, false); if (ret) { xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret)); @@ -296,19 +296,19 @@ void xe_huc_sanitize(struct xe_huc *huc) void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p) { struct xe_gt *gt = huc_to_gt(huc); - int err; + unsigned int fw_ref; xe_uc_fw_print(&huc->fw, p); if (!xe_uc_fw_is_enabled(&huc->fw)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; drm_printf(p, "\nHuC status: 0x%08x\n", - xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO)); + xe_mmio_read32(>->mmio, HUC_KERNEL_LOAD_INFO)); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index c9c3beb3ce8d..1557acee3523 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -12,6 +12,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_assert.h" #include "xe_bo.h" #include "xe_device.h" @@ -23,6 +24,7 @@ #include "xe_gt_printk.h" #include "xe_gt_mcr.h" #include "xe_gt_topology.h" +#include "xe_guc_capture.h" #include "xe_hw_engine_group.h" #include "xe_hw_fence.h" #include "xe_irq.h" @@ -295,7 +297,7 @@ void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, reg.addr += hwe->mmio_base; - xe_mmio_write32(hwe->gt, reg, val); + xe_mmio_write32(&hwe->gt->mmio, reg, val); } /** @@ -315,7 +317,7 @@ u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) reg.addr += hwe->mmio_base; - return xe_mmio_read32(hwe->gt, reg); + return xe_mmio_read32(&hwe->gt->mmio, reg); } void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) @@ -324,7 +326,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE); if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask) - xe_mmio_write32(hwe->gt, RCU_MODE, + xe_mmio_write32(&hwe->gt->mmio, RCU_MODE, _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0); @@ -354,7 +356,7 @@ static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt, hwe->class != XE_ENGINE_CLASS_RENDER) return false; - return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE; + return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE; } void @@ -460,6 +462,30 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe) xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr); } +static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance) +{ + const struct engine_info *info; + enum xe_hw_engine_id id; + + for (id = 0; id < XE_NUM_HW_ENGINES; ++id) { + info = &engine_infos[id]; + if (info->class == class && info->instance == instance) + return info; + } + + return NULL; +} + +static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class) +{ + /* For MSI-X, hw engines report to offset of engine instance zero */ + const struct engine_info *info = find_engine_info(class, 0); + + xe_gt_assert(gt, info); + + return info ? info->irq_offset : 0; +} + static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, enum xe_hw_engine_id id) { @@ -479,7 +505,9 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, hwe->class = info->class; hwe->instance = info->instance; hwe->mmio_base = info->mmio_base; - hwe->irq_offset = info->irq_offset; + hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ? + get_msix_irq_offset(gt, info->class) : + info->irq_offset; hwe->domain = info->domain; hwe->name = info->name; hwe->fence_irq = >->fence_irq[info->class]; @@ -612,7 +640,7 @@ static void read_media_fuses(struct xe_gt *gt) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE); + media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE); /* * Pre-Xe_HP platforms had register bits representing absent engines, @@ -657,7 +685,7 @@ static void read_copy_fuses(struct xe_gt *gt) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3); + bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3); bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask); /* BCS0 is always present; only BCS1-BCS8 may be fused off */ @@ -704,7 +732,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); u32 ccs_mask; - ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4); + ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4); ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask); for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) { @@ -742,8 +770,8 @@ static void check_gsc_availability(struct xe_gt *gt) gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0); /* interrupts where previously enabled, so turn them off */ - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, 0); - xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~0); + xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0); + xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0); drm_info(&xe->drm, "gsccs disabled due to lack of FW\n"); } @@ -798,60 +826,10 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec) xe_hw_fence_irq_run(hwe->fence_irq); } -static bool -is_slice_common_per_gslice(struct xe_device *xe) -{ - return GRAPHICS_VERx100(xe) >= 1255; -} - -static void -xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, - struct xe_hw_engine_snapshot *snapshot) -{ - struct xe_gt *gt = hwe->gt; - struct xe_device *xe = gt_to_xe(gt); - unsigned int dss; - u16 group, instance; - - snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0)); - - if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - if (is_slice_common_per_gslice(xe) == false) { - snapshot->reg.instdone.slice_common[0] = - xe_mmio_read32(gt, SC_INSTDONE); - snapshot->reg.instdone.slice_common_extra[0] = - xe_mmio_read32(gt, SC_INSTDONE_EXTRA); - snapshot->reg.instdone.slice_common_extra2[0] = - xe_mmio_read32(gt, SC_INSTDONE_EXTRA2); - } else { - for_each_geometry_dss(dss, gt, group, instance) { - snapshot->reg.instdone.slice_common[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE, group, instance); - snapshot->reg.instdone.slice_common_extra[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA, group, instance); - snapshot->reg.instdone.slice_common_extra2[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA2, group, instance); - } - } - - for_each_geometry_dss(dss, gt, group, instance) { - snapshot->reg.instdone.sampler[dss] = - xe_gt_mcr_unicast_read(gt, SAMPLER_INSTDONE, group, instance); - snapshot->reg.instdone.row[dss] = - xe_gt_mcr_unicast_read(gt, ROW_INSTDONE, group, instance); - - if (GRAPHICS_VERx100(xe) >= 1255) - snapshot->reg.instdone.geom_svg[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_INSTDONE_GEOM_SVGUNIT, - group, instance); - } -} - /** * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine. * @hwe: Xe HW Engine. + * @job: The job object. * * This can be printed out in a later stage like during dev_coredump * analysis. @@ -860,11 +838,10 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, * caller, using `xe_hw_engine_snapshot_free`. */ struct xe_hw_engine_snapshot * -xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) +xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job) { struct xe_hw_engine_snapshot *snapshot; - size_t len; - u64 val; + struct __guc_capture_parsed_output *node; if (!xe_hw_engine_is_valid(hwe)) return NULL; @@ -874,28 +851,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) if (!snapshot) return NULL; - /* Because XE_MAX_DSS_FUSE_BITS is defined in xe_gt_types.h and it - * includes xe_hw_engine_types.h the length of this 3 registers can't be - * set in struct xe_hw_engine_snapshot, so here doing additional - * allocations. - */ - len = (XE_MAX_DSS_FUSE_BITS * sizeof(u32)); - snapshot->reg.instdone.slice_common = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.slice_common_extra = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.slice_common_extra2 = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.sampler = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.row = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.geom_svg = kzalloc(len, GFP_ATOMIC); - if (!snapshot->reg.instdone.slice_common || - !snapshot->reg.instdone.slice_common_extra || - !snapshot->reg.instdone.slice_common_extra2 || - !snapshot->reg.instdone.sampler || - !snapshot->reg.instdone.row || - !snapshot->reg.instdone.geom_svg) { - xe_hw_engine_snapshot_free(snapshot); - return NULL; - } - snapshot->name = kstrdup(hwe->name, GFP_ATOMIC); snapshot->hwe = hwe; snapshot->logical_instance = hwe->logical_instance; @@ -903,157 +858,32 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt), hwe->domain); snapshot->mmio_base = hwe->mmio_base; + snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe); /* no more VF accessible data below this point */ if (IS_SRIOV_VF(gt_to_xe(hwe->gt))) return snapshot; - snapshot->reg.ring_execlist_status = - xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); - snapshot->reg.ring_execlist_status |= val << 32; - - snapshot->reg.ring_execlist_sq_contents = - xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0)); - snapshot->reg.ring_execlist_sq_contents |= val << 32; - - snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); - snapshot->reg.ring_acthd |= val << 32; - - snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); - snapshot->reg.ring_bbaddr |= val << 32; - - snapshot->reg.ring_dma_fadd = - xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); - snapshot->reg.ring_dma_fadd |= val << 32; - - snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); - snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)); - snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0)); - if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) { - val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0)); - snapshot->reg.ring_start |= val << 32; - } - if (xe_gt_has_indirect_ring_state(hwe->gt)) { - snapshot->reg.indirect_ring_state = - xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0)); - } - - snapshot->reg.ring_head = - xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR; - snapshot->reg.ring_tail = - xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR; - snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0)); - snapshot->reg.ring_mi_mode = - xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); - snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0)); - snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0)); - snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0)); - snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0)); - snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0)); - snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0)); - xe_hw_engine_snapshot_instdone_capture(hwe, snapshot); - - if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) - snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE); - - return snapshot; -} - -static void -xe_hw_engine_snapshot_instdone_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p) -{ - struct xe_gt *gt = snapshot->hwe->gt; - struct xe_device *xe = gt_to_xe(gt); - u16 group, instance; - unsigned int dss; - - drm_printf(p, "\tRING_INSTDONE: 0x%08x\n", snapshot->reg.instdone.ring); - - if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - if (is_slice_common_per_gslice(xe) == false) { - drm_printf(p, "\tSC_INSTDONE[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common[0]); - drm_printf(p, "\tSC_INSTDONE_EXTRA[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common_extra[0]); - drm_printf(p, "\tSC_INSTDONE_EXTRA2[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common_extra2[0]); - } else { - for_each_geometry_dss(dss, gt, group, instance) { - drm_printf(p, "\tSC_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common[dss]); - drm_printf(p, "\tSC_INSTDONE_EXTRA[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common_extra[dss]); - drm_printf(p, "\tSC_INSTDONE_EXTRA2[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common_extra2[dss]); + if (job) { + /* If got guc capture, set source to GuC */ + node = xe_guc_capture_get_matching_and_lock(job); + if (node) { + struct xe_device *xe = gt_to_xe(hwe->gt); + struct xe_devcoredump *coredump = &xe->devcoredump; + + coredump->snapshot.matched_node = node; + snapshot->source = XE_ENGINE_CAPTURE_SOURCE_GUC; + xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node"); + return snapshot; } } - for_each_geometry_dss(dss, gt, group, instance) { - drm_printf(p, "\tSAMPLER_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.sampler[dss]); - drm_printf(p, "\tROW_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.row[dss]); - - if (GRAPHICS_VERx100(xe) >= 1255) - drm_printf(p, "\tINSTDONE_GEOM_SVGUNIT[%u]: 0x%08x\n", - dss, snapshot->reg.instdone.geom_svg[dss]); - } -} + /* otherwise, do manual capture */ + xe_engine_manual_capture(hwe, snapshot); + snapshot->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL; + xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot"); -/** - * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot. - * @snapshot: Xe HW Engine snapshot object. - * @p: drm_printer where it will be printed out. - * - * This function prints out a given Xe HW Engine snapshot object. - */ -void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, - struct drm_printer *p) -{ - if (!snapshot) - return; - - drm_printf(p, "%s (physical), logical instance=%d\n", - snapshot->name ? snapshot->name : "", - snapshot->logical_instance); - drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n", - snapshot->forcewake.domain, snapshot->forcewake.ref); - drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam); - drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga); - drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n", - snapshot->reg.ring_execlist_status); - drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n", - snapshot->reg.ring_execlist_sq_contents); - drm_printf(p, "\tRING_START: 0x%016llx\n", snapshot->reg.ring_start); - drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head); - drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail); - drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl); - drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode); - drm_printf(p, "\tRING_MODE: 0x%08x\n", - snapshot->reg.ring_mode); - drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr); - drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr); - drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr); - drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir); - drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd); - drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr); - drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd); - drm_printf(p, "\tINDIRECT_RING_STATE: 0x%08x\n", - snapshot->reg.indirect_ring_state); - drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr); - xe_hw_engine_snapshot_instdone_print(snapshot, p); - - if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) - drm_printf(p, "\tRCU_MODE: 0x%08x\n", - snapshot->reg.rcu_mode); - drm_puts(p, "\n"); + return snapshot; } /** @@ -1065,15 +895,18 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, */ void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot) { + struct xe_gt *gt; if (!snapshot) return; - kfree(snapshot->reg.instdone.slice_common); - kfree(snapshot->reg.instdone.slice_common_extra); - kfree(snapshot->reg.instdone.slice_common_extra2); - kfree(snapshot->reg.instdone.sampler); - kfree(snapshot->reg.instdone.row); - kfree(snapshot->reg.instdone.geom_svg); + gt = snapshot->hwe->gt; + /* + * xe_guc_capture_put_matched_nodes is called here and from + * xe_devcoredump_snapshot_free, to cover the 2 calling paths + * of hw_engines - debugfs and devcoredump free. + */ + xe_guc_capture_put_matched_nodes(>->uc.guc); + kfree(snapshot->name); kfree(snapshot); } @@ -1089,8 +922,8 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p) { struct xe_hw_engine_snapshot *snapshot; - snapshot = xe_hw_engine_snapshot_capture(hwe); - xe_hw_engine_snapshot_print(snapshot, p); + snapshot = xe_hw_engine_snapshot_capture(hwe, NULL); + xe_engine_snapshot_print(snapshot, p); xe_hw_engine_snapshot_free(snapshot); } @@ -1150,7 +983,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class) u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe) { - return xe_mmio_read64_2x32(hwe->gt, RING_TIMESTAMP(hwe->mmio_base)); + return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base)); } enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe) diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h index 022819a4a8eb..da0a6922a26f 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.h +++ b/drivers/gpu/drm/xe/xe_hw_engine.h @@ -11,6 +11,7 @@ struct drm_printer; struct drm_xe_engine_class_instance; struct xe_device; +struct xe_sched_job; #ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN #define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN @@ -54,12 +55,9 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec); void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe); u32 xe_hw_engine_mask_per_class(struct xe_gt *gt, enum xe_engine_class engine_class); - struct xe_hw_engine_snapshot * -xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe); +xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job); void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot); -void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, - struct drm_printer *p); void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p); void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe); diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h index 8be6d420ece4..719f27ef00a5 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_types.h +++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h @@ -152,6 +152,11 @@ struct xe_hw_engine { struct xe_hw_engine_group *hw_engine_group; }; +enum xe_hw_engine_snapshot_source_id { + XE_ENGINE_CAPTURE_SOURCE_MANUAL, + XE_ENGINE_CAPTURE_SOURCE_GUC +}; + /** * struct xe_hw_engine_snapshot - Hardware engine snapshot * @@ -160,6 +165,8 @@ struct xe_hw_engine { struct xe_hw_engine_snapshot { /** @name: name of the hw engine */ char *name; + /** @source: Data source, either manual or GuC */ + enum xe_hw_engine_snapshot_source_id source; /** @hwe: hw engine */ struct xe_hw_engine *hwe; /** @logical_instance: logical instance of this hw engine */ @@ -173,65 +180,8 @@ struct xe_hw_engine_snapshot { } forcewake; /** @mmio_base: MMIO base address of this hw engine*/ u32 mmio_base; - /** @reg: Useful MMIO register snapshot */ - struct { - /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */ - u64 ring_execlist_status; - /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */ - u64 ring_execlist_sq_contents; - /** @reg.ring_acthd: RING_ACTHD */ - u64 ring_acthd; - /** @reg.ring_bbaddr: RING_BBADDR */ - u64 ring_bbaddr; - /** @reg.ring_dma_fadd: RING_DMA_FADD */ - u64 ring_dma_fadd; - /** @reg.ring_hwstam: RING_HWSTAM */ - u32 ring_hwstam; - /** @reg.ring_hws_pga: RING_HWS_PGA */ - u32 ring_hws_pga; - /** @reg.ring_start: RING_START */ - u64 ring_start; - /** @reg.ring_head: RING_HEAD */ - u32 ring_head; - /** @reg.ring_tail: RING_TAIL */ - u32 ring_tail; - /** @reg.ring_ctl: RING_CTL */ - u32 ring_ctl; - /** @reg.ring_mi_mode: RING_MI_MODE */ - u32 ring_mi_mode; - /** @reg.ring_mode: RING_MODE */ - u32 ring_mode; - /** @reg.ring_imr: RING_IMR */ - u32 ring_imr; - /** @reg.ring_esr: RING_ESR */ - u32 ring_esr; - /** @reg.ring_emr: RING_EMR */ - u32 ring_emr; - /** @reg.ring_eir: RING_EIR */ - u32 ring_eir; - /** @reg.indirect_ring_state: INDIRECT_RING_STATE */ - u32 indirect_ring_state; - /** @reg.ipehr: IPEHR */ - u32 ipehr; - /** @reg.rcu_mode: RCU_MODE */ - u32 rcu_mode; - struct { - /** @reg.instdone.ring: RING_INSTDONE */ - u32 ring; - /** @reg.instdone.slice_common: SC_INSTDONE */ - u32 *slice_common; - /** @reg.instdone.slice_common_extra: SC_INSTDONE_EXTRA */ - u32 *slice_common_extra; - /** @reg.instdone.slice_common_extra2: SC_INSTDONE_EXTRA2 */ - u32 *slice_common_extra2; - /** @reg.instdone.sampler: SAMPLER_INSTDONE */ - u32 *sampler; - /** @reg.instdone.row: ROW_INSTDONE */ - u32 *row; - /** @reg.instdone.geom_svg: INSTDONE_GEOM_SVGUNIT */ - u32 *geom_svg; - } instdone; - } reg; + /** @kernel_reserved: Engine reserved, can't be used by userspace */ + bool kernel_reserved; }; #endif diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index aa11728e7e79..fde56dad3ab7 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -149,7 +149,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v u64 reg_val, min, max; struct xe_device *xe = hwmon->xe; struct xe_reg rapl_limit, pkg_power_sku; - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); @@ -190,7 +190,7 @@ unlock: static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); int ret = 0; u64 reg_val; struct xe_reg rapl_limit; @@ -222,7 +222,7 @@ unlock: static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); u64 reg_val; @@ -259,7 +259,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l static void xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; u64 reg_val; @@ -282,7 +282,7 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at char *buf) { struct xe_hwmon *hwmon = dev_get_drvdata(dev); - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u32 x, y, x_w = 2; /* 2 bits */ u64 r, tau4, out; int sensor_index = to_sensor_dev_attr(attr)->index; @@ -323,7 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a const char *buf, size_t count) { struct xe_hwmon *hwmon = dev_get_drvdata(dev); - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u32 x, y, rxy, x_w = 2; /* 2 bits */ u64 tau4, r, max_win; unsigned long val; @@ -498,7 +498,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u64 reg_val; reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); @@ -781,7 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = { static void xe_hwmon_get_preregistration_info(struct xe_device *xe) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct xe_hwmon *hwmon = xe->hwmon; long energy; u64 val_sku_unit = 0; diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 5f2c368c35ad..b7995ebd54ab 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -10,8 +10,7 @@ #include <drm/drm_managed.h> #include "display/xe_display.h" -#include "regs/xe_gt_regs.h" -#include "regs/xe_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_device.h" #include "xe_drv.h" #include "xe_gsc_proxy.h" @@ -30,14 +29,14 @@ #define IIR(offset) XE_REG(offset + 0x8) #define IER(offset) XE_REG(offset + 0xc) -static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg) +static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg) { u32 val = xe_mmio_read32(mmio, reg); if (val == 0) return; - drm_WARN(>_to_xe(mmio)->drm, 1, + drm_WARN(&mmio->tile->xe->drm, 1, "Interrupt register 0x%x is not zero: 0x%08x\n", reg.addr, val); xe_mmio_write32(mmio, reg, 0xffffffff); @@ -52,7 +51,7 @@ static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg) */ static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; /* * If we're just enabling an interrupt now, it shouldn't already @@ -70,7 +69,7 @@ static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits) /* Mask and disable all interrupts. */ static void mask_and_disable(struct xe_tile *tile, u32 irqregs) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; xe_mmio_write32(mmio, IMR(irqregs), ~0); /* Posting read */ @@ -87,7 +86,7 @@ static void mask_and_disable(struct xe_tile *tile, u32 irqregs) static u32 xelp_intr_disable(struct xe_device *xe) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0); @@ -103,7 +102,7 @@ static u32 xelp_intr_disable(struct xe_device *xe) static u32 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); u32 iir; if (!(master_ctl & GU_MISC_IRQ)) @@ -118,7 +117,7 @@ gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) static inline void xelp_intr_enable(struct xe_device *xe, bool stall) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ); if (stall) @@ -129,12 +128,13 @@ static inline void xelp_intr_enable(struct xe_device *xe, bool stall) void xe_irq_enable_hwe(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + struct xe_mmio *mmio = >->mmio; u32 ccs_mask, bcs_mask; u32 irqs, dmask, smask; u32 gsc_mask = 0; u32 heci_mask = 0; - if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) + if (xe_device_uses_memirq(xe)) return; if (xe_device_uc_enabled(xe)) { @@ -155,35 +155,35 @@ void xe_irq_enable_hwe(struct xe_gt *gt) if (!xe_gt_is_media_type(gt)) { /* Enable interrupts for each engine class */ - xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask); + xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); if (ccs_mask) - xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask); + xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask); /* Unmask interrupts for each engine instance */ - xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask); - xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask); + xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask); + xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); if (bcs_mask & (BIT(1)|BIT(2))) - xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); if (bcs_mask & (BIT(3)|BIT(4))) - xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); if (bcs_mask & (BIT(5)|BIT(6))) - xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); if (bcs_mask & (BIT(7)|BIT(8))) - xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); if (ccs_mask & (BIT(0)|BIT(1))) - xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask); } if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { /* Enable interrupts for each engine class */ - xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask); + xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask); /* Unmask interrupts for each engine instance */ - xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask); - xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask); - xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask); /* * the heci2 interrupt is enabled via the same register as the @@ -197,17 +197,17 @@ void xe_irq_enable_hwe(struct xe_gt *gt) } if (gsc_mask) { - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); - xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask); + xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); + xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask); } if (heci_mask) - xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); + xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); } } static u32 gt_engine_identity(struct xe_device *xe, - struct xe_gt *mmio, + struct xe_mmio *mmio, const unsigned int bank, const unsigned int bit) { @@ -279,7 +279,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile, return tile->media_gt; default: break; - }; + } fallthrough; default: return tile->primary_gt; @@ -291,7 +291,7 @@ static void gt_irq_handler(struct xe_tile *tile, u32 *identity) { struct xe_device *xe = tile_to_xe(tile); - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; unsigned int bank, bit; u16 instance, intr_vec; enum xe_engine_class class; @@ -376,7 +376,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg) static u32 dg1_intr_disable(struct xe_device *xe) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); u32 val; /* First disable interrupts */ @@ -394,7 +394,7 @@ static u32 dg1_intr_disable(struct xe_device *xe) static void dg1_intr_enable(struct xe_device *xe, bool stall) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); if (stall) @@ -431,7 +431,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) } for_each_tile(tile, xe, id) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0) continue; @@ -474,7 +474,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) static void gt_irq_reset(struct xe_tile *tile) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, XE_ENGINE_CLASS_COMPUTE); @@ -504,7 +504,7 @@ static void gt_irq_reset(struct xe_tile *tile) if (ccs_mask & (BIT(0)|BIT(1))) xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); if ((tile->media_gt && xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) || @@ -547,7 +547,7 @@ static void dg1_irq_reset(struct xe_tile *tile) static void dg1_irq_reset_mstr(struct xe_tile *tile) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0); } @@ -566,7 +566,7 @@ static void vf_irq_reset(struct xe_device *xe) for_each_tile(tile, xe, id) { if (xe_device_has_memirq(xe)) - xe_memirq_reset(&tile->sriov.vf.memirq); + xe_memirq_reset(&tile->memirq); else gt_irq_reset(tile); } @@ -609,7 +609,7 @@ static void vf_irq_postinstall(struct xe_device *xe) for_each_tile(tile, xe, id) if (xe_device_has_memirq(xe)) - xe_memirq_postinstall(&tile->sriov.vf.memirq); + xe_memirq_postinstall(&tile->memirq); if (GRAPHICS_VERx100(xe) < 1210) xelp_intr_enable(xe, true); @@ -652,7 +652,7 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg) spin_unlock(&xe->irq.lock); for_each_tile(tile, xe, id) - xe_memirq_handler(&tile->sriov.vf.memirq); + xe_memirq_handler(&tile->memirq); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 8999ac511555..a60ceae4c6dd 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -193,7 +193,7 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt) lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo)); lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K)); - xe_mmio_write32(tile->primary_gt, + xe_mmio_write32(&tile->mmio, GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG, LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K)); } diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index aec7db39c061..4f64c7f4e68d 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -38,24 +38,6 @@ #define LRC_INDIRECT_RING_STATE_SIZE SZ_4K -struct xe_lrc_snapshot { - struct xe_bo *lrc_bo; - void *lrc_snapshot; - unsigned long lrc_size, lrc_offset; - - u32 context_desc; - u32 indirect_context_desc; - u32 head; - struct { - u32 internal; - u32 memory; - } tail; - u32 start_seqno; - u32 seqno; - u32 ctx_timestamp; - u32 ctx_job_timestamp; -}; - static struct xe_device * lrc_to_xe(struct xe_lrc *lrc) { @@ -599,10 +581,10 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe) static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) { - struct xe_memirq *memirq = >_to_tile(hwe->gt)->sriov.vf.memirq; + struct xe_memirq *memirq = >_to_tile(hwe->gt)->memirq; struct xe_device *xe = gt_to_xe(hwe->gt); - if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe)) + if (!xe_device_uses_memirq(xe)) return; regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM | @@ -613,9 +595,9 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED; regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr; - regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq); + regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe); regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr; - regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq); + regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe); } static int lrc_ring_mi_mode(struct xe_hw_engine *hwe) diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index c24542e89318..40d8f6906d3e 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -17,9 +17,26 @@ enum xe_engine_class; struct xe_gt; struct xe_hw_engine; struct xe_lrc; -struct xe_lrc_snapshot; struct xe_vm; +struct xe_lrc_snapshot { + struct xe_bo *lrc_bo; + void *lrc_snapshot; + unsigned long lrc_size, lrc_offset; + + u32 context_desc; + u32 indirect_context_desc; + u32 head; + struct { + u32 internal; + u32 memory; + } tail; + u32 start_seqno; + u32 seqno; + u32 ctx_timestamp; + u32 ctx_job_timestamp; +}; + #define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4) struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c index 95b6e9d7b7db..f833da88150a 100644 --- a/drivers/gpu/drm/xe/xe_memirq.c +++ b/drivers/gpu/drm/xe/xe_memirq.c @@ -5,8 +5,8 @@ #include <drm/drm_managed.h> -#include "regs/xe_gt_regs.h" #include "regs/xe_guc_regs.h" +#include "regs/xe_irq_regs.h" #include "regs/xe_regs.h" #include "xe_assert.h" @@ -19,15 +19,25 @@ #include "xe_hw_engine.h" #include "xe_map.h" #include "xe_memirq.h" -#include "xe_sriov.h" -#include "xe_sriov_printk.h" #define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition) -#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg) +#define memirq_printk(m, _level, _fmt, ...) \ + drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \ + memirq_to_tile(m)->id, ##__VA_ARGS__) + +#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ +#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__) +#else +#define memirq_debug(...) +#endif + +#define memirq_err(m, _fmt, ...) memirq_printk(m, err, _fmt, ##__VA_ARGS__) +#define memirq_err_ratelimited(m, _fmt, ...) \ + memirq_printk(m, err_ratelimited, _fmt, ##__VA_ARGS__) static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq) { - return container_of(memirq, struct xe_tile, sriov.vf.memirq); + return container_of(memirq, struct xe_tile, memirq); } static struct xe_device *memirq_to_xe(struct xe_memirq *memirq) @@ -105,6 +115,44 @@ static const char *guc_name(struct xe_guc *guc) * | | * | | * +-----------+ + * + * + * MSI-X use case + * + * When using MSI-X, hw engines report interrupt status and source to engine + * instance 0. For this scenario, in order to differentiate between the + * engines, we need to pass different status/source pointers in the LRC. + * + * The requirements on those pointers are: + * - Interrupt status should be 4KiB aligned + * - Interrupt source should be 64 bytes aligned + * + * To accommodate this, we duplicate the memirq page layout above - + * allocating a page for each engine instance and pass this page in the LRC. + * Note that the same page can be reused for different engine types. + * For example, an LRC executing on CCS #x will have pointers to page #x, + * and an LRC executing on BCS #x will have the same pointers. + * + * :: + * + * 0x0000 +==============================+ <== page for instance 0 (BCS0, CCS0, etc.) + * | Interrupt Status Report Page | + * 0x0400 +==============================+ + * | Interrupt Source Report Page | + * 0x0440 +==============================+ + * | Interrupt Enable Mask | + * +==============================+ + * | Not used | + * 0x1000 +==============================+ <== page for instance 1 (BCS1, CCS1, etc.) + * | Interrupt Status Report Page | + * 0x1400 +==============================+ + * | Interrupt Source Report Page | + * 0x1440 +==============================+ + * | Not used | + * 0x2000 +==============================+ <== page for instance 2 (BCS2, CCS2, etc.) + * | ... | + * +==============================+ + * */ static void __release_xe_bo(struct drm_device *drm, void *arg) @@ -114,18 +162,30 @@ static void __release_xe_bo(struct drm_device *drm, void *arg) xe_bo_unpin_map_no_vm(bo); } +static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq) +{ + /* + * When the HW engines are configured to use MSI-X, + * they report interrupt status and source to the offset of + * engine instance 0. + */ + return xe_device_has_msix(memirq_to_xe(memirq)); +} + static int memirq_alloc_pages(struct xe_memirq *memirq) { struct xe_device *xe = memirq_to_xe(memirq); struct xe_tile *tile = memirq_to_tile(memirq); + size_t bo_size = hw_reports_to_instance_zero(memirq) ? + XE_HW_ENGINE_MAX_INSTANCE * SZ_4K : SZ_4K; struct xe_bo *bo; int err; - BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64)); - BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K)); + BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64)); + BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K)); /* XXX: convert to managed bo */ - bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, + bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size, ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | @@ -140,25 +200,25 @@ static int memirq_alloc_pages(struct xe_memirq *memirq) memirq_assert(memirq, !xe_bo_is_vram(bo)); memirq_assert(memirq, !memirq->bo); - iosys_map_memset(&bo->vmap, 0, 0, SZ_4K); + iosys_map_memset(&bo->vmap, 0, 0, bo_size); memirq->bo = bo; - memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET); - memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET); + memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0)); + memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0)); memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET); memirq_assert(memirq, !memirq->source.is_iomem); memirq_assert(memirq, !memirq->status.is_iomem); memirq_assert(memirq, !memirq->mask.is_iomem); - memirq_debug(memirq, "page offsets: source %#x status %#x\n", - xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq)); + memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n", + xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0), + XE_MEMIRQ_STATUS_OFFSET(0)); return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo); out: - xe_sriov_err(memirq_to_xe(memirq), - "Failed to allocate memirq page (%pe)\n", ERR_PTR(err)); + memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err)); return err; } @@ -178,9 +238,7 @@ static void memirq_set_enable(struct xe_memirq *memirq, bool enable) * * These allocations are managed and will be implicitly released on unload. * - * Note: This function shall be called only by the VF driver. - * - * If this function fails then VF driver won't be able to operate correctly. + * If this function fails then the driver won't be able to operate correctly. * If `Memory Based Interrupts`_ are not used this function will return 0. * * Return: 0 on success or a negative error code on failure. @@ -190,9 +248,7 @@ int xe_memirq_init(struct xe_memirq *memirq) struct xe_device *xe = memirq_to_xe(memirq); int err; - memirq_assert(memirq, IS_SRIOV_VF(xe)); - - if (!xe_device_has_memirq(xe)) + if (!xe_device_uses_memirq(xe)) return 0; err = memirq_alloc_pages(memirq); @@ -205,55 +261,70 @@ int xe_memirq_init(struct xe_memirq *memirq) return 0; } +static u32 __memirq_source_page(struct xe_memirq *memirq, u16 instance) +{ + memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE); + memirq_assert(memirq, memirq->bo); + + instance = hw_reports_to_instance_zero(memirq) ? instance : 0; + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET(instance); +} + /** * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_. * @memirq: the &xe_memirq to query + * @hwe: the hw engine for which we want the report page * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: GGTT's offset of the `Interrupt Source Report Page`_. */ -u32 xe_memirq_source_ptr(struct xe_memirq *memirq) +u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); + + return __memirq_source_page(memirq, hwe->instance); +} + +static u32 __memirq_status_page(struct xe_memirq *memirq, u16 instance) +{ + memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE); memirq_assert(memirq, memirq->bo); - return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET; + instance = hw_reports_to_instance_zero(memirq) ? instance : 0; + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET(instance); } /** * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_. * @memirq: the &xe_memirq to query + * @hwe: the hw engine for which we want the report page * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: GGTT's offset of the `Interrupt Status Report Page`_. */ -u32 xe_memirq_status_ptr(struct xe_memirq *memirq) +u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); - memirq_assert(memirq, memirq->bo); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); - return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET; + return __memirq_status_page(memirq, hwe->instance); } /** * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask. * @memirq: the &xe_memirq to query * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: GGTT's offset of the Interrupt Enable Mask. */ u32 xe_memirq_enable_ptr(struct xe_memirq *memirq) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); memirq_assert(memirq, memirq->bo); return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET; @@ -267,7 +338,7 @@ u32 xe_memirq_enable_ptr(struct xe_memirq *memirq) * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_ * to be used by the GuC when `Memory Based Interrupts`_ are required. * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: 0 on success or a negative error code on failure. @@ -279,12 +350,10 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc) u32 source, status; int err; - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); - memirq_assert(memirq, memirq->bo); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); - source = xe_memirq_source_ptr(memirq) + offset; - status = xe_memirq_status_ptr(memirq) + offset * SZ_16; + source = __memirq_source_page(memirq, 0) + offset; + status = __memirq_status_page(memirq, 0) + offset * SZ_16; err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY, source); @@ -299,9 +368,8 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc) return 0; failed: - xe_sriov_err(memirq_to_xe(memirq), - "Failed to setup report pages in %s (%pe)\n", - guc_name(guc), ERR_PTR(err)); + memirq_err(memirq, "Failed to setup report pages in %s (%pe)\n", + guc_name(guc), ERR_PTR(err)); return err; } @@ -311,13 +379,12 @@ failed: * * This is part of the driver IRQ setup flow. * - * This function shall only be used by the VF driver on platforms that use + * This function shall only be used on platforms that use * `Memory Based Interrupts`_. */ void xe_memirq_reset(struct xe_memirq *memirq) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); if (memirq->bo) memirq_set_enable(memirq, false); @@ -329,13 +396,12 @@ void xe_memirq_reset(struct xe_memirq *memirq) * * This is part of the driver IRQ setup flow. * - * This function shall only be used by the VF driver on platforms that use + * This function shall only be used on platforms that use * `Memory Based Interrupts`_. */ void xe_memirq_postinstall(struct xe_memirq *memirq) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); if (memirq->bo) memirq_set_enable(memirq, true); @@ -349,9 +415,9 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector, value = iosys_map_rd(vector, offset, u8); if (value) { if (value != 0xff) - xe_sriov_err_ratelimited(memirq_to_xe(memirq), - "Unexpected memirq value %#x from %s at %u\n", - value, name, offset); + memirq_err_ratelimited(memirq, + "Unexpected memirq value %#x from %s at %u\n", + value, name, offset); iosys_map_wr(vector, offset, u8, 0x00); } @@ -379,6 +445,28 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat } /** + * xe_memirq_hwe_handler - Check and process interrupts for a specific HW engine. + * @memirq: the &xe_memirq + * @hwe: the hw engine to process + * + * This function reads and dispatches `Memory Based Interrupts` for the provided HW engine. + */ +void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe) +{ + u16 offset = hwe->irq_offset; + u16 instance = hw_reports_to_instance_zero(memirq) ? hwe->instance : 0; + struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap, + XE_MEMIRQ_SOURCE_OFFSET(instance)); + + if (memirq_received(memirq, &src_offset, offset, "SRC")) { + struct iosys_map status_offset = + IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap, + XE_MEMIRQ_STATUS_OFFSET(instance) + offset * SZ_16); + memirq_dispatch_engine(memirq, &status_offset, hwe); + } +} + +/** * xe_memirq_handler - The `Memory Based Interrupts`_ Handler. * @memirq: the &xe_memirq * @@ -405,13 +493,8 @@ void xe_memirq_handler(struct xe_memirq *memirq) if (gt->tile != tile) continue; - for_each_hw_engine(hwe, gt, id) { - if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) { - map = IOSYS_MAP_INIT_OFFSET(&memirq->status, - hwe->irq_offset * SZ_16); - memirq_dispatch_engine(memirq, &map, hwe); - } - } + for_each_hw_engine(hwe, gt, id) + xe_memirq_hwe_handler(memirq, hwe); } /* GuC and media GuC (if present) must be checked separately */ diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h index 2d40d03c3095..06130650e9d6 100644 --- a/drivers/gpu/drm/xe/xe_memirq.h +++ b/drivers/gpu/drm/xe/xe_memirq.h @@ -9,16 +9,18 @@ #include <linux/types.h> struct xe_guc; +struct xe_hw_engine; struct xe_memirq; int xe_memirq_init(struct xe_memirq *memirq); -u32 xe_memirq_source_ptr(struct xe_memirq *memirq); -u32 xe_memirq_status_ptr(struct xe_memirq *memirq); +u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe); +u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe); u32 xe_memirq_enable_ptr(struct xe_memirq *memirq); void xe_memirq_reset(struct xe_memirq *memirq); void xe_memirq_postinstall(struct xe_memirq *memirq); +void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe); void xe_memirq_handler(struct xe_memirq *memirq); int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc); diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h index 625b6b8736cc..9d0f6c1cdb9d 100644 --- a/drivers/gpu/drm/xe/xe_memirq_types.h +++ b/drivers/gpu/drm/xe/xe_memirq_types.h @@ -11,9 +11,9 @@ struct xe_bo; /* ISR */ -#define XE_MEMIRQ_STATUS_OFFSET 0x0 +#define XE_MEMIRQ_STATUS_OFFSET(inst) ((inst) * SZ_4K + 0x0) /* IIR */ -#define XE_MEMIRQ_SOURCE_OFFSET 0x400 +#define XE_MEMIRQ_SOURCE_OFFSET(inst) ((inst) * SZ_4K + 0x400) /* IMR */ #define XE_MEMIRQ_ENABLE_OFFSET 0x440 diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 3fd462fda625..a48f239cad1c 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -36,13 +36,19 @@ static void tiles_fini(void *arg) /* * On multi-tile devices, partition the BAR space for MMIO on each tile, * possibly accounting for register override on the number of tiles available. + * tile_mmio_size contains both the tile's 4MB register space, as well as + * additional space for the GTT and other (possibly unused) regions). * Resulting memory layout is like below: * * .----------------------. <- tile_count * tile_mmio_size * | .... | * |----------------------| <- 2 * tile_mmio_size + * | tile1 GTT + other | + * |----------------------| <- 1 * tile_mmio_size + 4MB * | tile1->mmio.regs | * |----------------------| <- 1 * tile_mmio_size + * | tile0 GTT + other | + * |----------------------| <- 4MB * | tile0->mmio.regs | * '----------------------' <- 0MB */ @@ -61,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) /* Possibly override number of tile based on configuration register */ if (!xe->info.skip_mtcfg) { - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); u8 tile_count; u32 mtcfg; /* * Although the per-tile mmio regs are not yet initialized, this - * is fine as it's going to the root gt, that's guaranteed to be - * initialized earlier in xe_mmio_init() + * is fine as it's going to the root tile's mmio, that's + * guaranteed to be initialized earlier in xe_mmio_init() */ - mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR); + mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR); tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; if (tile_count < xe->info.tile_count) { @@ -90,8 +96,9 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) regs = xe->mmio.regs; for_each_tile(tile, xe, id) { - tile->mmio.size = tile_mmio_size; + tile->mmio.regs_size = SZ_4M; tile->mmio.regs = regs; + tile->mmio.tile = tile; regs += tile_mmio_size; } } @@ -126,8 +133,9 @@ static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size, regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count; for_each_tile(tile, xe, id) { - tile->mmio_ext.size = tile_mmio_ext_size; + tile->mmio_ext.regs_size = tile_mmio_ext_size; tile->mmio_ext.regs = regs; + tile->mmio_ext.tile = tile; regs += tile_mmio_ext_size; } } @@ -157,137 +165,132 @@ int xe_mmio_init(struct xe_device *xe) { struct xe_tile *root_tile = xe_device_get_root_tile(xe); struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - const int mmio_bar = 0; /* * Map the entire BAR. * The first 16MB of the BAR, belong to the root tile, and include: * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB). */ - xe->mmio.size = pci_resource_len(pdev, mmio_bar); - xe->mmio.regs = pci_iomap(pdev, mmio_bar, GTTMMADR_BAR); + xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); + xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); if (xe->mmio.regs == NULL) { drm_err(&xe->drm, "failed to map registers\n"); return -EIO; } /* Setup first tile; other tiles (if present) will be setup later. */ - root_tile->mmio.size = SZ_16M; + root_tile->mmio.regs_size = SZ_4M; root_tile->mmio.regs = xe->mmio.regs; + root_tile->mmio.tile = root_tile; return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); } -static void mmio_flush_pending_writes(struct xe_gt *gt) +static void mmio_flush_pending_writes(struct xe_mmio *mmio) { #define DUMMY_REG_OFFSET 0x130030 - struct xe_tile *tile = gt_to_tile(gt); int i; - if (tile->xe->info.platform != XE_LUNARLAKE) + if (mmio->tile->xe->info.platform != XE_LUNARLAKE) return; /* 4 dummy writes */ for (i = 0; i < 4; i++) - writel(0, tile->mmio.regs + DUMMY_REG_OFFSET); + writel(0, mmio->regs + DUMMY_REG_OFFSET); } -u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg) +u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u8 val; /* Wa_15015404425 */ - mmio_flush_pending_writes(gt); + mmio_flush_pending_writes(mmio); - val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); - trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); + val = readb(mmio->regs + addr); + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); return val; } -u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg) +u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u16 val; /* Wa_15015404425 */ - mmio_flush_pending_writes(gt); + mmio_flush_pending_writes(mmio); - val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); - trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); + val = readw(mmio->regs + addr); + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); return val; } -void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) +void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); - trace_xe_reg_rw(gt, true, addr, val, sizeof(val)); + trace_xe_reg_rw(mmio, true, addr, val, sizeof(val)); - if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) - xe_gt_sriov_vf_write32(gt, reg, val); + if (!reg.vf && mmio->sriov_vf_gt) + xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); else - writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); + writel(val, mmio->regs + addr); } -u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg) +u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u32 val; /* Wa_15015404425 */ - mmio_flush_pending_writes(gt); + mmio_flush_pending_writes(mmio); - if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) - val = xe_gt_sriov_vf_read32(gt, reg); + if (!reg.vf && mmio->sriov_vf_gt) + val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); else - val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); + val = readl(mmio->regs + addr); - trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); return val; } -u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set) +u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set) { u32 old, reg_val; - old = xe_mmio_read32(gt, reg); + old = xe_mmio_read32(mmio, reg); reg_val = (old & ~clr) | set; - xe_mmio_write32(gt, reg, reg_val); + xe_mmio_write32(mmio, reg, reg_val); return old; } -int xe_mmio_write32_and_verify(struct xe_gt *gt, +int xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg, u32 val, u32 mask, u32 eval) { u32 reg_val; - xe_mmio_write32(gt, reg, val); - reg_val = xe_mmio_read32(gt, reg); + xe_mmio_write32(mmio, reg, val); + reg_val = xe_mmio_read32(mmio, reg); return (reg_val & mask) != eval ? -EINVAL : 0; } -bool xe_mmio_in_range(const struct xe_gt *gt, +bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg) { - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); return range && addr >= range->start && addr <= range->end; } /** * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads - * @gt: MMIO target GT + * @mmio: MMIO target * @reg: register to read value from * * Although Intel GPUs have some 64-bit registers, the hardware officially @@ -307,21 +310,21 @@ bool xe_mmio_in_range(const struct xe_gt *gt, * * Returns the value of the 64-bit register. */ -u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) +u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg) { struct xe_reg reg_udw = { .addr = reg.addr + 0x4 }; u32 ldw, udw, oldudw, retries; - reg.addr = xe_mmio_adjusted_addr(gt, reg.addr); - reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr); + reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); + reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); /* we shouldn't adjust just one register address */ - xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4); + xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); - oldudw = xe_mmio_read32(gt, reg_udw); + oldudw = xe_mmio_read32(mmio, reg_udw); for (retries = 5; retries; --retries) { - ldw = xe_mmio_read32(gt, reg); - udw = xe_mmio_read32(gt, reg_udw); + ldw = xe_mmio_read32(mmio, reg); + udw = xe_mmio_read32(mmio, reg_udw); if (udw == oldudw) break; @@ -329,13 +332,13 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) oldudw = udw; } - xe_gt_WARN(gt, retries == 0, - "64-bit read of %#x did not stabilize\n", reg.addr); + drm_WARN(&mmio->tile->xe->drm, retries == 0, + "64-bit read of %#x did not stabilize\n", reg.addr); return (u64)udw << 32 | ldw; } -static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, +static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic, bool expect_match) { ktime_t cur = ktime_get_raw(); @@ -346,7 +349,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v bool check; for (;;) { - read = xe_mmio_read32(gt, reg); + read = xe_mmio_read32(mmio, reg); check = (read & mask) == val; if (!expect_match) @@ -372,7 +375,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v } if (ret != 0) { - read = xe_mmio_read32(gt, reg); + read = xe_mmio_read32(mmio, reg); check = (read & mask) == val; if (!expect_match) @@ -390,7 +393,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v /** * xe_mmio_wait32() - Wait for a register to match the desired masked value - * @gt: MMIO target GT + * @mmio: MMIO target * @reg: register to read value from * @mask: mask to be applied to the value read from the register * @val: desired value after applying the mask @@ -407,15 +410,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v * @timeout_us for different reasons, specially in non-atomic contexts. Thus, * it is possible that this function succeeds even after @timeout_us has passed. */ -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, +int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true); + return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true); } /** * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value - * @gt: MMIO target GT + * @mmio: MMIO target * @reg: register to read value from * @mask: mask to be applied to the value read from the register * @val: value not to be matched after applying the mask @@ -426,8 +429,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t * This function works exactly like xe_mmio_wait32() with the exception that * @val is expected not to be matched. */ -int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, +int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false); + return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false); } diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h index 26551410ecc8..8a46f4006a84 100644 --- a/drivers/gpu/drm/xe/xe_mmio.h +++ b/drivers/gpu/drm/xe/xe_mmio.h @@ -14,25 +14,30 @@ struct xe_reg; int xe_mmio_init(struct xe_device *xe); int xe_mmio_probe_tiles(struct xe_device *xe); -u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg); -u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg); -void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); -u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg); -u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set); -int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval); -bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg); - -u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg); -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic); -int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic); - -static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr) +u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg); +u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg); +void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val); +u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg); +u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set); +int xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg, u32 val, u32 mask, u32 eval); +bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg); + +u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg); +int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, + u32 timeout_us, u32 *out_val, bool atomic); +int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, + u32 val, u32 timeout_us, u32 *out_val, bool atomic); + +static inline u32 xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr) { - if (addr < gt->mmio.adj_limit) - addr += gt->mmio.adj_offset; + if (addr < mmio->adj_limit) + addr += mmio->adj_offset; return addr; } +static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe) +{ + return &xe->tiles[0].mmio; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index 7ff0ac5b799a..54d199b5cfb2 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -278,7 +278,7 @@ static void xelp_lncf_dump(struct xe_mocs_info *info, struct xe_gt *gt, struct d if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i)); drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", j++, @@ -310,7 +310,7 @@ static void xelp_mocs_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u ] (%#8x)\n", i, @@ -383,7 +383,7 @@ static void xehp_lncf_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i)); drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", j++, @@ -428,7 +428,7 @@ static void pvc_mocs_dump(struct xe_mocs_info *info, unsigned int flags, struct if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i)); drm_printf(p, "LNCFCMOCS[%2d] = [ %u ] (%#8x)\n", j++, @@ -510,7 +510,7 @@ static void mtl_mocs_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u] (%#8x)\n", i, @@ -553,7 +553,7 @@ static void xe2_mocs_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u] (%#8x)\n", i, @@ -576,6 +576,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe, memset(info, 0, sizeof(struct xe_mocs_info)); switch (xe->info.platform) { + case XE_PANTHERLAKE: case XE_LUNARLAKE: case XE_BATTLEMAGE: info->ops = &xe2_mocs_ops; @@ -690,7 +691,7 @@ static void __init_mocs_table(struct xe_gt *gt, if (regs_are_mcr(gt)) xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs); else - xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs); + xe_mmio_write32(>->mmio, XELP_GLOBAL_MOCS(i), mocs); } } @@ -730,7 +731,7 @@ static void init_l3cc_table(struct xe_gt *gt, if (regs_are_mcr(gt)) xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc); else - xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc); + xe_mmio_write32(>->mmio, XELP_LNCFCMOCS(i), l3cc); } } @@ -773,25 +774,21 @@ void xe_mocs_init(struct xe_gt *gt) void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) { - struct xe_mocs_info table; - unsigned int flags; - u32 ret; struct xe_device *xe = gt_to_xe(gt); + struct xe_mocs_info table; + unsigned int fw_ref, flags; flags = get_mocs_settings(xe, &table); xe_pm_runtime_get_noresume(xe); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - - if (ret) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) goto err_fw; table.ops->dump(&table, flags, gt, p); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_fw: - xe_assert(xe, !ret); xe_pm_runtime_put(xe); } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 78823f53d290..8dd55798ab31 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -36,11 +36,22 @@ #include "xe_pm.h" #include "xe_sched_job.h" #include "xe_sriov.h" +#include "xe_sync.h" #define DEFAULT_POLL_FREQUENCY_HZ 200 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) #define XE_OA_UNIT_INVALID U32_MAX +enum xe_oa_submit_deps { + XE_OA_SUBMIT_NO_DEPS, + XE_OA_SUBMIT_ADD_DEPS, +}; + +enum xe_oa_user_extn_from { + XE_OA_USER_EXTN_FROM_OPEN, + XE_OA_USER_EXTN_FROM_CONFIG, +}; + struct xe_oa_reg { struct xe_reg addr; u32 value; @@ -70,6 +81,7 @@ struct flex { }; struct xe_oa_open_param { + struct xe_file *xef; u32 oa_unit_id; bool sample; u32 metric_set; @@ -81,6 +93,9 @@ struct xe_oa_open_param { struct xe_exec_queue *exec_q; struct xe_hw_engine *hwe; bool no_preempt; + struct drm_xe_sync __user *syncs_user; + int num_syncs; + struct xe_sync_entry *syncs; }; struct xe_oa_config_bo { @@ -90,6 +105,17 @@ struct xe_oa_config_bo { struct xe_bb *bb; }; +struct xe_oa_fence { + /* @base: dma fence base */ + struct dma_fence base; + /* @lock: lock for the fence */ + spinlock_t lock; + /* @work: work to signal @base */ + struct delayed_work work; + /* @cb: callback to schedule @work */ + struct dma_fence_cb cb; +}; + #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x static const struct xe_oa_format oa_formats[] = { @@ -162,10 +188,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se return oa_config; } -static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo) +static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence) { xe_oa_config_put(oa_bo->oa_config); - xe_bb_free(oa_bo->bb, NULL); + xe_bb_free(oa_bo->bb, last_fence); kfree(oa_bo); } @@ -176,7 +202,7 @@ static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream) static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream) { - return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) & + return xe_mmio_read32(&stream->gt->mmio, __oa_regs(stream)->oa_tail_ptr) & OAG_OATAILPTR_MASK; } @@ -366,7 +392,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf, struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); - xe_mmio_write32(stream->gt, oaheadptr, + xe_mmio_write32(&stream->gt->mmio, oaheadptr, (head + gtt_offset) & OAG_OAHEADPTR_MASK); stream->oa_buffer.head = head; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -377,22 +403,23 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf, static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream) { + struct xe_mmio *mmio = &stream->gt->mmio; u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT; unsigned long flags; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0); + xe_mmio_write32(mmio, __oa_regs(stream)->oa_head_ptr, gtt_offset & OAG_OAHEADPTR_MASK); stream->oa_buffer.head = 0; /* * PRM says: "This MMIO must be set before the OATAILPTR register and after the * OAHEADPTR register. This is to enable proper functionality of the overflow bit". */ - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_buffer, oa_buf); + xe_mmio_write32(mmio, __oa_regs(stream)->oa_tail_ptr, gtt_offset & OAG_OATAILPTR_MASK); /* Mark that we need updated tail pointer to read from */ @@ -444,21 +471,23 @@ static void xe_oa_enable(struct xe_oa_stream *stream) stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) val |= OAG_OACONTROL_OA_PES_DISAG_EN; - xe_mmio_write32(stream->gt, regs->oa_ctrl, val); + xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val); } static void xe_oa_disable(struct xe_oa_stream *stream) { - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0); - if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl, + struct xe_mmio *mmio = &stream->gt->mmio; + + xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0); + if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl, OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false)) drm_err(&stream->oa->xe->drm, "wait for OA to be disabled timed out\n"); if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) { /* <= XE_METEORLAKE except XE_PVC */ - xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1); - if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false)) + xe_mmio_write32(mmio, OA_TLB_INV_CR, 1); + if (xe_mmio_wait32(mmio, OA_TLB_INV_CR, 1, 0, 50000, NULL, false)) drm_err(&stream->oa->xe->drm, "wait for OA tlb invalidate timed out\n"); } @@ -481,7 +510,7 @@ static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf, size_t count, size_t *offset) { /* Only clear our bits to avoid side-effects */ - stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status, + stream->oa_status = xe_mmio_rmw32(&stream->gt->mmio, __oa_regs(stream)->oa_status, OASTATUS_RELEVANT_BITS, 0); /* * Signal to userspace that there is non-zero OA status to read via @@ -567,11 +596,11 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait) return ret; } -static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) +static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps, + struct xe_bb *bb) { struct xe_sched_job *job; struct dma_fence *fence; - long timeout; int err = 0; /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */ @@ -581,18 +610,24 @@ static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) goto exit; } + if (deps == XE_OA_SUBMIT_ADD_DEPS) { + for (int i = 0; i < stream->num_syncs && !err; i++) + err = xe_sync_entry_add_deps(&stream->syncs[i], job); + if (err) { + drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err); + goto err_put_job; + } + } + xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - timeout = dma_fence_wait_timeout(fence, false, HZ); - dma_fence_put(fence); - if (timeout < 0) - err = timeout; - else if (!timeout) - err = -ETIME; + return fence; +err_put_job: + xe_sched_job_put(job); exit: - return err; + return ERR_PTR(err); } static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs) @@ -636,7 +671,8 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream) xe_oa_config_put(stream->oa_config); llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) - free_oa_config_bo(oa_bo); + free_oa_config_bo(oa_bo, stream->last_fence); + dma_fence_put(stream->last_fence); } static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, @@ -656,6 +692,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc, const struct flex *flex, u32 count) { + struct dma_fence *fence; struct xe_bb *bb; int err; @@ -667,7 +704,16 @@ static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lr xe_oa_store_flex(stream, lrc, bb, flex, count); - err = xe_oa_submit_bb(stream, bb); + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; + } + xe_bb_free(bb, fence); + dma_fence_put(fence); + + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -675,6 +721,7 @@ exit: static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri) { + struct dma_fence *fence; struct xe_bb *bb; int err; @@ -686,7 +733,16 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re write_cs_mi_lri(bb, reg_lri, 1); - err = xe_oa_submit_bb(stream, bb); + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; + } + xe_bb_free(bb, fence); + dma_fence_put(fence); + + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -749,7 +805,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable) int err; /* Set ccs select to enable programming of OAC_OACONTROL */ - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream)); + xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl, + __oa_ccs_select(stream)); /* Modify stream hwe context image with regs_context */ err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0], @@ -785,6 +842,7 @@ static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool en static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) { + struct xe_mmio *mmio = &stream->gt->mmio; u32 sqcnt1; /* @@ -798,7 +856,7 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) _MASKED_BIT_DISABLE(DISABLE_DOP_GATING)); } - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug, oag_configure_mmio_trigger(stream, false)); /* disable the context save/restore or OAR counters */ @@ -806,13 +864,13 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) xe_oa_configure_oa_context(stream, false); /* Make sure we disable noa to save power. */ - xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0); + xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0); sqcnt1 = SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); /* Reset PMON Enable to save power. */ - xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0); + xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0); } static void xe_oa_stream_destroy(struct xe_oa_stream *stream) @@ -832,7 +890,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_oa_free_oa_buffer(stream); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(stream->oa->xe); /* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */ @@ -840,6 +898,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); xe_oa_free_configs(stream); + xe_file_put(stream->xef); } static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream) @@ -910,11 +969,62 @@ out: return oa_bo; } +static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence) +{ + dma_fence_put(stream->last_fence); + stream->last_fence = dma_fence_get(fence); +} + +static void xe_oa_fence_work_fn(struct work_struct *w) +{ + struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work); + + /* Signal fence to indicate new OA configuration is active */ + dma_fence_signal(&ofence->base); + dma_fence_put(&ofence->base); +} + +static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + /* Additional empirical delay needed for NOA programming after registers are written */ +#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 + + struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb); + + INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn); + queue_delayed_work(system_unbound_wq, &ofence->work, + usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US)); + dma_fence_put(fence); +} + +static const char *xe_oa_get_driver_name(struct dma_fence *fence) +{ + return "xe_oa"; +} + +static const char *xe_oa_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops xe_oa_fence_ops = { + .get_driver_name = xe_oa_get_driver_name, + .get_timeline_name = xe_oa_get_timeline_name, +}; + static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config) { #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 struct xe_oa_config_bo *oa_bo; - int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US; + struct xe_oa_fence *ofence; + int i, err, num_signal = 0; + struct dma_fence *fence; + + ofence = kzalloc(sizeof(*ofence), GFP_KERNEL); + if (!ofence) { + err = -ENOMEM; + goto exit; + } oa_bo = xe_oa_alloc_config_buffer(stream, config); if (IS_ERR(oa_bo)) { @@ -922,11 +1032,50 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config goto exit; } - err = xe_oa_submit_bb(stream, oa_bo->bb); + /* Emit OA configuration batch */ + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto exit; + } - /* Additional empirical delay needed for NOA programming after registers are written */ - usleep_range(us, 2 * us); + /* Point of no return: initialize and set fence to signal */ + spin_lock_init(&ofence->lock); + dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0); + + for (i = 0; i < stream->num_syncs; i++) { + if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL) + num_signal++; + xe_sync_entry_signal(&stream->syncs[i], &ofence->base); + } + + /* Additional dma_fence_get in case we dma_fence_wait */ + if (!num_signal) + dma_fence_get(&ofence->base); + + /* Update last fence too before adding callback */ + xe_oa_update_last_fence(stream, fence); + + /* Add job fence callback to schedule work to signal ofence->base */ + err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb); + xe_gt_assert(stream->gt, !err || err == -ENOENT); + if (err == -ENOENT) + xe_oa_config_cb(fence, &ofence->cb); + + /* If nothing needs to be signaled we wait synchronously */ + if (!num_signal) { + dma_fence_wait(&ofence->base, false); + dma_fence_put(&ofence->base); + } + + /* Done with syncs */ + for (i = 0; i < stream->num_syncs; i++) + xe_sync_entry_cleanup(&stream->syncs[i]); + kfree(stream->syncs); + + return 0; exit: + kfree(ofence); return err; } @@ -940,6 +1089,7 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream) static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) { + struct xe_mmio *mmio = &stream->gt->mmio; u32 oa_debug, sqcnt1; int ret; @@ -966,12 +1116,12 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL | OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL; - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug, _MASKED_BIT_ENABLE(oa_debug) | oag_report_ctx_switches(stream) | oag_configure_mmio_trigger(stream, true)); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ? + xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ? (OAG_OAGLBCTXCTRL_COUNTER_RESUME | OAG_OAGLBCTXCTRL_TIMER_ENABLE | REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK, @@ -985,7 +1135,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) sqcnt1 = SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); - xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1); + xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1); /* Configure OAR/OAC */ if (stream->exec_q) { @@ -997,6 +1147,262 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) return xe_oa_emit_oa_config(stream, stream->oa_config); } +static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) +{ + u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); + u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); + u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); + u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); + int idx; + + for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { + const struct xe_oa_format *f = &oa->oa_formats[idx]; + + if (counter_size == f->counter_size && bc_report == f->bc_report && + type == f->type && counter_sel == f->counter_select) { + *name = idx; + return 0; + } + } + + return -EINVAL; +} + +static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + if (value >= oa->oa_unit_ids) { + drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); + return -EINVAL; + } + param->oa_unit_id = value; + return 0; +} + +static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->sample = value; + return 0; +} + +static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->metric_set = value; + return 0; +} + +static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + int ret = decode_oa_format(oa, value, ¶m->oa_format); + + if (ret) { + drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); + return ret; + } + return 0; +} + +static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ +#define OA_EXPONENT_MAX 31 + + if (value > OA_EXPONENT_MAX) { + drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); + return -EINVAL; + } + param->period_exponent = value; + return 0; +} + +static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->disabled = value; + return 0; +} + +static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->exec_queue_id = value; + return 0; +} + +static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->engine_instance = value; + return 0; +} + +static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->no_preempt = value; + return 0; +} + +static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->num_syncs = value; + return 0; +} + +static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->syncs_user = u64_to_user_ptr(value); + return 0; +} + +static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + return -EINVAL; +} + +typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param); +static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_ext_set_property ext; + int err; + u32 idx; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) != + ARRAY_SIZE(xe_oa_set_property_funcs_config)); + + if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) || + XE_IOCTL_DBG(oa->xe, ext.pad)) + return -EINVAL; + + idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open)); + + if (from == XE_OA_USER_EXTN_FROM_CONFIG) + return xe_oa_set_property_funcs_config[idx](oa, ext.value, param); + else + return xe_oa_set_property_funcs_open[idx](oa, ext.value, param); +} + +typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param); +static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { + [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, +}; + +#define MAX_USER_EXTENSIONS 16 +static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension, + int ext_number, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_user_extension ext; + int err; + u32 idx; + + if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) + return -E2BIG; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(oa->xe, ext.pad) || + XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) + return -EINVAL; + + idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); + err = xe_oa_user_extension_funcs[idx](oa, from, extension, param); + if (XE_IOCTL_DBG(oa->xe, err)) + return err; + + if (ext.next_extension) + return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param); + + return 0; +} + +static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) +{ + int ret, num_syncs, num_ufence = 0; + + if (param->num_syncs && !param->syncs_user) { + drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n"); + ret = -EINVAL; + goto exit; + } + + if (param->num_syncs) { + param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL); + if (!param->syncs) { + ret = -ENOMEM; + goto exit; + } + } + + for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { + ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs], + ¶m->syncs_user[num_syncs], 0); + if (ret) + goto err_syncs; + + if (xe_sync_is_ufence(¶m->syncs[num_syncs])) + num_ufence++; + } + + if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) { + ret = -EINVAL; + goto err_syncs; + } + + return 0; + +err_syncs: + while (num_syncs--) + xe_sync_entry_cleanup(¶m->syncs[num_syncs]); + kfree(param->syncs); +exit: + return ret; +} + static void xe_oa_stream_enable(struct xe_oa_stream *stream) { stream->pollin = false; @@ -1090,36 +1496,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream) static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg) { - struct drm_xe_ext_set_property ext; + struct xe_oa_open_param param = {}; long ret = stream->oa_config->id; struct xe_oa_config *config; int err; - err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext)); - if (XE_IOCTL_DBG(stream->oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) || - XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET)) - return -EINVAL; + err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, ¶m); + if (err) + return err; - config = xe_oa_get_oa_config(stream->oa, ext.value); + config = xe_oa_get_oa_config(stream->oa, param.metric_set); if (!config) return -ENODEV; - if (config != stream->oa_config) { - err = xe_oa_emit_oa_config(stream, config); - if (!err) - config = xchg(&stream->oa_config, config); - else - ret = err; + param.xef = stream->xef; + err = xe_oa_parse_syncs(stream->oa, ¶m); + if (err) + goto err_config_put; + + stream->num_syncs = param.num_syncs; + stream->syncs = param.syncs; + + err = xe_oa_emit_oa_config(stream, config); + if (!err) { + config = xchg(&stream->oa_config, config); + drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n", + stream->oa_config->uuid); } +err_config_put: xe_oa_config_put(config); - return ret; + return err ?: ret; } static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg) @@ -1349,6 +1757,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, { struct xe_oa_unit *u = param->hwe->oa_unit; struct xe_gt *gt = param->hwe->gt; + unsigned int fw_ref; int ret; stream->exec_q = param->exec_q; @@ -1362,6 +1771,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, stream->period_exponent = param->period_exponent; stream->no_preempt = param->no_preempt; + stream->xef = xe_file_get(param->xef); + stream->num_syncs = param->num_syncs; + stream->syncs = param->syncs; + /* * For Xe2+, when overrun mode is enabled, there are no partial reports at the end * of buffer, making the OA buffer effectively a non-power-of-2 size circular @@ -1409,7 +1822,11 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, /* Take runtime pm ref and forcewake to disable RC6 */ xe_pm_runtime_get(stream->oa->xe); - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + ret = -ETIMEDOUT; + goto err_fw_put; + } ret = xe_oa_alloc_oa_buffer(stream); if (ret) @@ -1451,13 +1868,14 @@ err_put_k_exec_q: err_free_oa_buf: xe_oa_free_oa_buffer(stream); err_fw_put: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(stream->oa->xe); if (stream->override_gucrc) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); err_free_configs: xe_oa_free_configs(stream); exit: + xe_file_put(stream->xef); return ret; } @@ -1535,7 +1953,7 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt) case XE_PVC: case XE_METEORLAKE: xe_pm_runtime_get(gt_to_xe(gt)); - reg = xe_mmio_read32(gt, RPM_CONFIG0); + reg = xe_mmio_read32(>->mmio, RPM_CONFIG0); xe_pm_runtime_put(gt_to_xe(gt)); shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg); @@ -1567,27 +1985,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type) } } -static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) -{ - u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); - u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); - u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); - u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); - int idx; - - for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { - const struct xe_oa_format *f = &oa->oa_formats[idx]; - - if (counter_size == f->counter_size && bc_report == f->bc_report && - type == f->type && counter_sel == f->counter_select) { - *name = idx; - return 0; - } - } - - return -EINVAL; -} - /** * xe_oa_unit_id - Return OA unit ID for a hardware engine * @hwe: @xe_hw_engine @@ -1634,155 +2031,6 @@ out: return ret; } -static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - if (value >= oa->oa_unit_ids) { - drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); - return -EINVAL; - } - param->oa_unit_id = value; - return 0; -} - -static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->sample = value; - return 0; -} - -static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->metric_set = value; - return 0; -} - -static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - int ret = decode_oa_format(oa, value, ¶m->oa_format); - - if (ret) { - drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); - return ret; - } - return 0; -} - -static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ -#define OA_EXPONENT_MAX 31 - - if (value > OA_EXPONENT_MAX) { - drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); - return -EINVAL; - } - param->period_exponent = value; - return 0; -} - -static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->disabled = value; - return 0; -} - -static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->exec_queue_id = value; - return 0; -} - -static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->engine_instance = value; - return 0; -} - -static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->no_preempt = value; - return 0; -} - -typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param); -static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = { - [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, - [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, - [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, - [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, - [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, - [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, - [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, - [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, - [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, -}; - -static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_ext_set_property ext; - int err; - u32 idx; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) || - XE_IOCTL_DBG(oa->xe, ext.pad)) - return -EINVAL; - - idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs)); - return xe_oa_set_property_funcs[idx](oa, ext.value, param); -} - -typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param); -static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { - [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, -}; - -#define MAX_USER_EXTENSIONS 16 -static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_user_extension ext; - int err; - u32 idx; - - if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) - return -E2BIG; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.pad) || - XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) - return -EINVAL; - - idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); - err = xe_oa_user_extension_funcs[idx](oa, extension, param); - if (XE_IOCTL_DBG(oa->xe, err)) - return err; - - if (ext.next_extension) - return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param); - - return 0; -} - /** * xe_oa_stream_open_ioctl - Opens an OA stream * @dev: @drm_device @@ -1808,7 +2056,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f return -ENODEV; } - ret = xe_oa_user_extensions(oa, data, 0, ¶m); + param.xef = xef; + ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m); if (ret) return ret; @@ -1876,11 +2125,24 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz); } + ret = xe_oa_parse_syncs(oa, ¶m); + if (ret) + goto err_exec_q; + mutex_lock(¶m.hwe->gt->oa.gt_lock); ret = xe_oa_stream_open_ioctl_locked(oa, ¶m); mutex_unlock(¶m.hwe->gt->oa.gt_lock); + if (ret < 0) + goto err_sync_cleanup; + + return ret; + +err_sync_cleanup: + while (param.num_syncs--) + xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]); + kfree(param.syncs); err_exec_q: - if (ret < 0 && param.exec_q) + if (param.exec_q) xe_exec_queue_put(param.exec_q); return ret; } @@ -2351,7 +2613,7 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt) } /* Ensure MMIO trigger remains disabled till there is a stream */ - xe_mmio_write32(gt, u->regs.oa_debug, + xe_mmio_write32(>->mmio, u->regs.oa_debug, oag_configure_mmio_trigger(NULL, false)); /* Set oa_unit_ids now to ensure ids remain contiguous */ diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 8862eca73fbe..fea9d981e414 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -238,5 +238,17 @@ struct xe_oa_stream { /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */ u32 no_preempt; + + /** @xef: xe_file with which the stream was opened */ + struct xe_file *xef; + + /** @last_fence: fence to use in stream destroy when needed */ + struct dma_fence *last_fence; + + /** @num_syncs: size of @syncs array */ + u32 num_syncs; + + /** @syncs: syncs to wait on and to signal */ + struct xe_sync_entry *syncs; }; #endif diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index f291a1730024..30fdbdb9341e 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -100,6 +100,10 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = { * Reserved entries should be programmed with the maximum caching, minimum * coherency (which matches an all-0's encoding), so we can just omit them * in the table. + * + * Note: There is an implicit assumption in the driver that compression and + * coh_1way+ are mutually exclusive. If this is ever not true then userptr + * and imported dma-buf from external device will have uncleared ccs state. */ #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \ { \ @@ -109,7 +113,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = { REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \ REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \ REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \ - .coh_mode = __coh_mode ? XE_COH_AT_LEAST_1WAY : XE_COH_NONE \ + .coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \ + XE_COH_AT_LEAST_1WAY : XE_COH_NONE \ } static const struct xe_pat_table_entry xe2_pat_table[] = { @@ -160,7 +165,7 @@ static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[ for (int i = 0; i < n_entries; i++) { struct xe_reg reg = XE_REG(_PAT_INDEX(i)); - xe_mmio_write32(gt, reg, table[i].value); + xe_mmio_write32(>->mmio, reg, table[i].value); } } @@ -177,25 +182,24 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); for (i = 0; i < xe->pat.n_entries; i++) { - u32 pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i))); + u32 pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_INDEX(i))); u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat); drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i, XELP_MEM_TYPE_STR_MAP[mem_type], pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xelp_pat_ops = { @@ -206,11 +210,12 @@ static const struct xe_pat_ops xelp_pat_ops = { static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -224,9 +229,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) XELP_MEM_TYPE_STR_MAP[mem_type], pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xehp_pat_ops = { @@ -237,11 +240,12 @@ static const struct xe_pat_ops xehp_pat_ops = { static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -253,9 +257,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xehpc_pat_ops = { @@ -266,11 +268,12 @@ static const struct xe_pat_ops xehpc_pat_ops = { static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -278,7 +281,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) u32 pat; if (xe_gt_is_media_type(gt)) - pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i))); + pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_INDEX(i))); else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i))); @@ -287,9 +290,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /* @@ -316,27 +317,28 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry int n_entries) { program_pat(gt, table, n_entries); - xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value); + xe_mmio_write32(>->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value); if (IS_DGFX(gt_to_xe(gt))) - xe_mmio_write32(gt, XE_REG(_PAT_PTA), xe2_pat_pta.value); + xe_mmio_write32(>->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value); } static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; u32 pat; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); for (i = 0; i < xe->pat.n_entries; i++) { if (xe_gt_is_media_type(gt)) - pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i))); + pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_INDEX(i))); else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i))); @@ -355,7 +357,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) * PPGTT entries. */ if (xe_gt_is_media_type(gt)) - pat = xe_mmio_read32(gt, XE_REG(_PAT_PTA)); + pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_PTA)); else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA)); @@ -369,9 +371,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XE2_COH_MODE, pat), pat); - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xe2_pat_ops = { @@ -382,7 +382,7 @@ static const struct xe_pat_ops xe2_pat_ops = { void xe_pat_init_early(struct xe_device *xe) { - if (GRAPHICS_VER(xe) == 20) { + if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) { xe->pat.ops = &xe2_pat_ops; xe->pat.table = xe2_pat_table; diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 5e962e72c97e..e6640283893f 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -13,7 +13,7 @@ #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> -#include <drm/intel/xe_pciids.h> +#include <drm/intel/pciids.h> #include "display/xe_display.h" #include "regs/xe_gt_regs.h" @@ -103,7 +103,6 @@ static const struct xe_graphics_desc graphics_xelpp = { #define XE_HP_FEATURES \ .has_range_tlb_invalidation = true, \ - .has_flat_ccs = true, \ .dma_mask_size = 46, \ .va_bits = 48, \ .vm_max_level = 3 @@ -120,6 +119,8 @@ static const struct xe_graphics_desc graphics_xehpg = { XE_HP_FEATURES, .vram_flags = XE_VRAM_FLAGS_NEED64K, + + .has_flat_ccs = 1, }; static const struct xe_graphics_desc graphics_xehpc = { @@ -145,7 +146,6 @@ static const struct xe_graphics_desc graphics_xehpc = { .has_asid = 1, .has_atomic_enable_pte_bit = 1, - .has_flat_ccs = 0, .has_usm = 1, }; @@ -156,7 +156,6 @@ static const struct xe_graphics_desc graphics_xelpg = { BIT(XE_HW_ENGINE_CCS0), XE_HP_FEATURES, - .has_flat_ccs = 0, }; #define XE2_GFX_FEATURES \ @@ -209,7 +208,7 @@ static const struct xe_media_desc media_xelpmp = { }; static const struct xe_media_desc media_xe2 = { - .name = "Xe2_LPM / Xe2_HPM", + .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM", .hw_engine_mask = GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | @@ -234,7 +233,7 @@ static const struct xe_device_desc rkl_desc = { .require_force_probe = true, }; -static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 }; +static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; static const struct xe_device_desc adl_s_desc = { .graphics = &graphics_xelp, @@ -249,7 +248,7 @@ static const struct xe_device_desc adl_s_desc = { }, }; -static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 }; +static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; static const struct xe_device_desc adl_p_desc = { .graphics = &graphics_xelp, @@ -286,9 +285,9 @@ static const struct xe_device_desc dg1_desc = { .require_force_probe = true, }; -static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 }; -static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 }; -static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 }; +static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; +static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; +static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; #define DG2_FEATURES \ DGFX_FEATURES, \ @@ -347,6 +346,12 @@ static const struct xe_device_desc bmg_desc = { .has_heci_cscfi = 1, }; +static const struct xe_device_desc ptl_desc = { + PLATFORM(PANTHERLAKE), + .has_display = true, + .require_force_probe = true, +}; + #undef PLATFORM __diag_pop(); @@ -357,6 +362,8 @@ static const struct gmdid_map graphics_ip_map[] = { { 1274, &graphics_xelpg }, /* Xe_LPG+ */ { 2001, &graphics_xe2 }, { 2004, &graphics_xe2 }, + { 3000, &graphics_xe2 }, + { 3001, &graphics_xe2 }, }; /* Map of GMD_ID values to media IP */ @@ -364,13 +371,9 @@ static const struct gmdid_map media_ip_map[] = { { 1300, &media_xelpmp }, { 1301, &media_xe2 }, { 2000, &media_xe2 }, + { 3000, &media_xe2 }, }; -#define INTEL_VGA_DEVICE(id, info) { \ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \ - PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \ - (unsigned long) info } - /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -378,25 +381,26 @@ static const struct gmdid_map media_ip_map[] = { * PCI ID matches, otherwise we'll use the wrong info struct above. */ static const struct pci_device_id pciidlist[] = { - XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), - XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), - XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), - XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), - XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), - XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), - XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), - XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), - XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), - XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), - XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), - XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), - XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), + INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), + INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), + INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), + INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), + INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), + INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), + INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), + INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), + INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), + INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), + INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), + INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), + INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), + INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), + INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), + INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), { } }; MODULE_DEVICE_TABLE(pci, pciidlist); -#undef INTEL_VGA_DEVICE - /* is device_id present in comma separated list of ids */ static bool device_id_in_list(u16 device_id, const char *devices, bool negative) { @@ -467,13 +471,15 @@ enum xe_gmdid_type { static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) { - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct xe_reg gmdid_reg = GMD_ID; u32 val; KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); if (IS_SRIOV_VF(xe)) { + struct xe_gt *gt = xe_root_mmio_gt(xe); + /* * To get the value of the GMDID register, VFs must obtain it * from the GuC using MMIO communication. @@ -509,14 +515,17 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, gt->info.type = XE_GT_TYPE_UNINITIALIZED; } else { /* - * We need to apply the GSI offset explicitly here as at this - * point the xe_gt is not fully uninitialized and only basic - * access to MMIO registers is possible. + * GMD_ID is a GT register, but at this point in the driver + * init we haven't fully initialized the GT yet so we need to + * read the register with the tile's MMIO accessor. That means + * we need to apply the GSI offset manually since it won't get + * automatically added as it would if we were using a GT mmio + * accessor. */ if (type == GMDID_MEDIA) gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; - val = xe_mmio_read32(gt, gmdid_reg); + val = xe_mmio_read32(mmio, gmdid_reg); } *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); @@ -678,7 +687,10 @@ static int xe_info_init(struct xe_device *xe, xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; if (xe->info.platform != XE_PVC) xe->info.has_device_atomics_on_smem = 1; + + /* Runtime detection may change this later */ xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; + xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; xe->info.has_usm = graphics_desc->has_usm; @@ -707,6 +719,7 @@ static int xe_info_init(struct xe_device *xe, gt->info.type = XE_GT_TYPE_MAIN; gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; gt->info.engine_mask = graphics_desc->hw_engine_mask; + if (MEDIA_VER(xe) < 13 && media_desc) gt->info.engine_mask |= media_desc->hw_engine_mask; @@ -725,8 +738,6 @@ static int xe_info_init(struct xe_device *xe, gt->info.type = XE_GT_TYPE_MEDIA; gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; gt->info.engine_mask = media_desc->hw_engine_mask; - gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; - gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; /* * FIXME: At the moment multi-tile and standalone media are @@ -757,6 +768,25 @@ static void xe_pci_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +/* + * Probe the PCI device, initialize various parts of the driver. + * + * Fault injection is used to test the error paths of some initialization + * functions called either directly from xe_pci_probe() or indirectly for + * example through xe_device_probe(). Those functions use the kernel fault + * injection capabilities infrastructure, see + * Documentation/fault-injection/fault-injection.rst for details. The macro + * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution + * at runtime and use a provided return value. The first requirement for + * error injectable functions is proper handling of the error code by the + * caller for recovery, which is always the case here. The second + * requirement is that no state is changed before the first error return. + * It is not strictly fullfilled for all initialization functions using the + * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those + * error cases at probe time, the error code is simply propagated up by the + * caller. Therefore there is no consequence on those specific callers when + * function error injection skips the whole function. + */ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct xe_device_desc *desc = (const void *)ent->driver_data; diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index 7397d556996a..d95d9835de42 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -44,7 +44,7 @@ static int pcode_mailbox_status(struct xe_tile *tile) [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"}, }; - err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK; + err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK; if (err) { drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err, err_decode[err].str ?: "Unknown"); @@ -58,7 +58,7 @@ static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *d unsigned int timeout_ms, bool return_data, bool atomic) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; int err; if (tile_to_xe(tile)->info.skip_pcode) diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h index 79b7042c4534..d08574c4cdb8 100644 --- a/drivers/gpu/drm/xe/xe_platform_types.h +++ b/drivers/gpu/drm/xe/xe_platform_types.h @@ -23,6 +23,7 @@ enum xe_platform { XE_METEORLAKE, XE_LUNARLAKE, XE_BATTLEMAGE, + XE_PANTHERLAKE, }; enum xe_subplatform { diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 33eb039053e4..40f7c844ed44 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -5,6 +5,7 @@ #include "xe_pm.h" +#include <linux/fault-inject.h> #include <linux/pm_runtime.h> #include <drm/drm_managed.h> @@ -263,6 +264,7 @@ int xe_pm_init_early(struct xe_device *xe) return 0; } +ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ /** * xe_pm_init - Initialize Xe Power Management diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 848da8e68c7a..170ae72d1a7b 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -9,6 +9,7 @@ #include <linux/sched/clock.h> #include <drm/ttm/ttm_placement.h> +#include <generated/xe_wa_oob.h> #include <uapi/drm/xe_drm.h> #include "regs/xe_engine_regs.h" @@ -23,6 +24,7 @@ #include "xe_macros.h" #include "xe_mmio.h" #include "xe_ttm_vram_mgr.h" +#include "xe_wa.h" static const u16 xe_to_user_engine_class[] = { [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER, @@ -83,24 +85,22 @@ static __ktime_func_t __clock_id_to_func(clockid_t clk_id) } static void -__read_timestamps(struct xe_gt *gt, - struct xe_reg lower_reg, - struct xe_reg upper_reg, - u64 *engine_ts, - u64 *cpu_ts, - u64 *cpu_delta, - __ktime_func_t cpu_clock) +hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, + u64 *cpu_delta, __ktime_func_t cpu_clock) { + struct xe_mmio *mmio = &hwe->gt->mmio; u32 upper, lower, old_upper, loop = 0; + struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), + lower_reg = RING_TIMESTAMP(hwe->mmio_base); - upper = xe_mmio_read32(gt, upper_reg); + upper = xe_mmio_read32(mmio, upper_reg); do { *cpu_delta = local_clock(); *cpu_ts = cpu_clock(); - lower = xe_mmio_read32(gt, lower_reg); + lower = xe_mmio_read32(mmio, lower_reg); *cpu_delta = local_clock() - *cpu_delta; old_upper = upper; - upper = xe_mmio_read32(gt, upper_reg); + upper = xe_mmio_read32(mmio, upper_reg); } while (upper != old_upper && loop++ < 2); *engine_ts = (u64)upper << 32 | lower; @@ -117,6 +117,7 @@ query_engine_cycles(struct xe_device *xe, __ktime_func_t cpu_clock; struct xe_hw_engine *hwe; struct xe_gt *gt; + unsigned int fw_ref; if (query->size == 0) { query->size = size; @@ -149,18 +150,16 @@ query_engine_cycles(struct xe_device *xe, if (!hwe) return -EINVAL; - if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); return -EIO; + } - __read_timestamps(gt, - RING_TIMESTAMP(hwe->mmio_base), - RING_TIMESTAMP_UDW(hwe->mmio_base), - &resp.engine_cycles, - &resp.cpu_timestamp, - &resp.cpu_delta, - cpu_clock); + hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp, + &resp.cpu_delta, cpu_clock); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); if (GRAPHICS_VER(xe) >= 20) resp.width = 64; @@ -168,16 +167,10 @@ query_engine_cycles(struct xe_device *xe, resp.width = 36; /* Only write to the output fields of user query */ - if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp)) - return -EFAULT; - - if (put_user(resp.cpu_delta, &query_ptr->cpu_delta)) - return -EFAULT; - - if (put_user(resp.engine_cycles, &query_ptr->engine_cycles)) - return -EFAULT; - - if (put_user(resp.width, &query_ptr->width)) + if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || + put_user(resp.cpu_delta, &query_ptr->cpu_delta) || + put_user(resp.engine_cycles, &query_ptr->engine_cycles) || + put_user(resp.width, &query_ptr->width)) return -EFAULT; return 0; @@ -458,12 +451,23 @@ static int query_hwconfig(struct xe_device *xe, static size_t calc_topo_query_size(struct xe_device *xe) { - return xe->info.gt_count * - (4 * sizeof(struct drm_xe_query_topology_mask) + - sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + - sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + - sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) + - sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss)); + struct xe_gt *gt; + size_t query_size = 0; + int id; + + for_each_gt(gt, xe, id) { + query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + + sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + + sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + + sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); + + /* L3bank mask may not be available for some GTs */ + if (!XE_WA(gt, no_media_l3)) + query_size += sizeof(struct drm_xe_query_topology_mask) + + sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); + } + + return query_size; } static int copy_mask(void __user **ptr, @@ -516,11 +520,18 @@ static int query_gt_topology(struct xe_device *xe, if (err) return err; - topo.type = DRM_XE_TOPO_L3_BANK; - err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, - sizeof(gt->fuse_topo.l3_bank_mask)); - if (err) - return err; + /* + * If the kernel doesn't have a way to obtain a correct L3bank + * mask, then it's better to omit L3 from the query rather than + * reporting bogus or zeroed information to userspace. + */ + if (!XE_WA(gt, no_media_l3)) { + topo.type = DRM_XE_TOPO_L3_BANK; + err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, + sizeof(gt->fuse_topo.l3_bank_mask)); + if (err) + return err; + } topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? DRM_XE_TOPO_SIMD16_EU_PER_DSS : @@ -659,7 +670,7 @@ static int query_oa_units(struct xe_device *xe, du->oa_unit_id = u->oa_unit_id; du->oa_unit_type = u->type; du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); - du->capabilities = DRM_XE_OA_CAPS_BASE; + du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index 440ac572f6e5..e1a0e27cda14 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -15,6 +15,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" +#include "xe_device.h" #include "xe_device_types.h" #include "xe_force_wake.h" #include "xe_gt.h" @@ -164,7 +165,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry) else if (entry->clr_bits + 1) val = (reg.mcr ? xe_gt_mcr_unicast_read_any(gt, reg_mcr) : - xe_mmio_read32(gt, reg)) & (~entry->clr_bits); + xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits); else val = 0; @@ -180,34 +181,34 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry) if (entry->reg.mcr) xe_gt_mcr_multicast_write(gt, reg_mcr, val); else - xe_mmio_write32(gt, reg, val); + xe_mmio_write32(>->mmio, reg, val); } void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) { struct xe_reg_sr_entry *entry; unsigned long reg; - int err; + unsigned int fw_ref; if (xa_empty(&sr->xa)) return; xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name); - err = xe_force_wake_get(>->mmio.fw, XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_force_wake; xa_for_each(&sr->xa, reg, entry) apply_one_mmio(gt, entry); - err = xe_force_wake_put(>->mmio.fw, XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return; err_force_wake: - xe_gt_err(gt, "Failed to apply, err=%d\n", err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n"); } void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) @@ -220,15 +221,15 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) u32 mmio_base = hwe->mmio_base; unsigned long reg; unsigned int slot = 0; - int err; + unsigned int fw_ref; if (xa_empty(&sr->xa)) return; drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name); - err = xe_force_wake_get(>->mmio.fw, XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_force_wake; p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL); @@ -241,7 +242,7 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) } xe_reg_whitelist_print_entry(&p, 0, reg, entry); - xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), + xe_mmio_write32(>->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), reg | entry->set_bits); slot++; } @@ -250,16 +251,16 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) { u32 addr = RING_NOPID(mmio_base).addr; - xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr); + xe_mmio_write32(>->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr); } - err = xe_force_wake_put(>->mmio.fw, XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return; err_force_wake: - drm_err(&xe->drm, "Failed to apply, err=%d\n", err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + drm_err(&xe->drm, "Failed to apply, err=-ETIMEDOUT\n"); } /** diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 86c705d18c0d..b13d4d62f0b1 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -196,7 +196,7 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx, *gt = (*hwe)->gt; *xe = gt_to_xe(*gt); break; - }; + } } /** diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index fe2cb2a96f78..e055bed7ae55 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -53,7 +53,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 if (IS_ERR(bo)) { drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n", PTR_ERR(bo)); - return (struct xe_sa_manager *)bo; + return ERR_CAST(bo); } sa_manager->bo = bo; sa_manager->is_iomem = bo->vmap.is_iomem; diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index eeccc1c318ae..1905ca590965 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -280,7 +280,7 @@ void xe_sched_job_arm(struct xe_sched_job *job) fence = &chain->base; } - job->fence = fence; + job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */ drm_sched_job_arm(&job->drm); } diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h index 0d3f76fb05ce..f13f333f00be 100644 --- a/drivers/gpu/drm/xe/xe_sched_job_types.h +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h @@ -40,7 +40,6 @@ struct xe_sched_job { * @fence: dma fence to indicate completion. 1 way relationship - job * can safely reference fence, fence cannot safely reference job. */ -#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS struct dma_fence *fence; /** @user_fence: write back value when BB is complete */ struct { @@ -63,7 +62,7 @@ struct xe_sched_job { struct xe_sched_job_snapshot { u16 batch_addr_len; - u64 batch_addr[]; + u64 batch_addr[] __counted_by(batch_addr_len); }; #endif diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index 5a1d65e4f19f..ef10782af656 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> #include "regs/xe_regs.h" @@ -35,7 +37,7 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode) static bool test_is_vf(struct xe_device *xe) { - u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); + u32 value = xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG); return value & VF_CAP; } @@ -119,6 +121,7 @@ int xe_sriov_init(struct xe_device *xe) return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe); } +ALLOW_ERROR_INJECTION(xe_sriov_init, ERRNO); /* See xe_pci_probe() */ /** * xe_sriov_print_info - Print basic SR-IOV information. diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 2e72c06fd40d..a90480c6aecf 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -83,6 +83,8 @@ static void user_fence_worker(struct work_struct *w) XE_WARN_ON("Copy to user failed"); kthread_unuse_mm(ufence->mm); mmput(ufence->mm); + } else { + drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n"); } wake_up_all(&ufence->xe->ufence_wq); diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index dda5268507d8..07cf7cfe4abd 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> #include "xe_device.h" @@ -129,6 +131,7 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id) return 0; } +ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */ static int tile_ttm_mgr_init(struct xe_tile *tile) { diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 8573d7a87d84..91130ad8999c 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -21,6 +21,7 @@ #include "xe_vm.h" #define __dev_name_xe(xe) dev_name((xe)->drm.dev) +#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile))) #define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt))) #define __dev_name_eq(q) __dev_name_gt((q)->gt) @@ -342,12 +343,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal, ); TRACE_EVENT(xe_reg_rw, - TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len), + TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len), - TP_ARGS(gt, write, reg, val, len), + TP_ARGS(mmio, write, reg, val, len), TP_STRUCT__entry( - __string(dev, __dev_name_gt(gt)) + __string(dev, __dev_name_tile(mmio->tile)) __field(u64, val) __field(u32, reg) __field(u16, write) diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index 9b1a1d4304ae..30a3cfbaaa09 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(xe_vm, ), TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), - __entry->vm, __entry->asid) + __entry->vm, __entry->asid) ); DEFINE_EVENT(xe_vm, xe_vm_kill, diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index f7113cf6109d..423856cc18d4 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -60,7 +60,7 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) { struct xe_tile *tile = xe_device_get_root_tile(xe); - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct pci_dev *pdev = to_pci_dev(xe->drm.dev); u64 stolen_size; u64 tile_offset; @@ -94,7 +94,7 @@ static u32 get_wopcm_size(struct xe_device *xe) u32 wopcm_size; u64 val; - val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED); + val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED); val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val); switch (val) { @@ -119,7 +119,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr u32 stolen_size, wopcm_size; u32 ggc, gms; - ggc = xe_mmio_read32(xe_root_mmio_gt(xe), GGC); + ggc = xe_mmio_read32(xe_root_tile_mmio(xe), GGC); /* * Check GGMS: it should be fixed 0x3 (8MB), which corresponds to the @@ -159,7 +159,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr stolen_size -= wopcm_size; if (media_gt && XE_WA(media_gt, 14019821291)) { - u64 gscpsmi_base = xe_mmio_read64_2x32(media_gt, GSCPSMI_BASE) + u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE) & ~GENMASK_ULL(5, 0); /* diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index 0d5e04158917..d449de0fb6ec 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -33,7 +33,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, { XE_RTP_NAME("Tuning: L3 cache - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, @@ -43,7 +43,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { SET(CCCHKNREG1, L3CMPCTRL)) }, { XE_RTP_NAME("Tuning: Compression Overfetch - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX), SET(XE2LPM_CCCHKNREG1, L3CMPCTRL)) }, @@ -52,7 +52,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN)) }, { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN)) }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"), @@ -61,7 +61,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { COMPMEMRD256BOVRFETCHEN)) }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2, COMPMEMRD256BOVRFETCHEN)) }, @@ -71,7 +71,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) }, { XE_RTP_NAME("Tuning: Stateless compression control - media"), - XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT, REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) }, diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index d431d0031185..fb0eda3d5682 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -4,6 +4,7 @@ */ #include <linux/bitfield.h> +#include <linux/fault-inject.h> #include <linux/firmware.h> #include <drm/drm_managed.h> @@ -796,6 +797,7 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw) return err; } +ALLOW_ERROR_INJECTION(xe_uc_fw_init, ERRNO); /* See xe_pci_probe() */ static u32 uc_fw_ggtt_offset(struct xe_uc_fw *uc_fw) { @@ -806,6 +808,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) { struct xe_device *xe = uc_fw_to_xe(uc_fw); struct xe_gt *gt = uc_fw_to_gt(uc_fw); + struct xe_mmio *mmio = >->mmio; u64 src_offset; u32 dma_ctrl; int ret; @@ -814,34 +817,34 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) /* Set the source address for the uCode */ src_offset = uc_fw_ggtt_offset(uc_fw) + uc_fw->css_offset; - xe_mmio_write32(gt, DMA_ADDR_0_LOW, lower_32_bits(src_offset)); - xe_mmio_write32(gt, DMA_ADDR_0_HIGH, + xe_mmio_write32(mmio, DMA_ADDR_0_LOW, lower_32_bits(src_offset)); + xe_mmio_write32(mmio, DMA_ADDR_0_HIGH, upper_32_bits(src_offset) | DMA_ADDRESS_SPACE_GGTT); /* Set the DMA destination */ - xe_mmio_write32(gt, DMA_ADDR_1_LOW, offset); - xe_mmio_write32(gt, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + xe_mmio_write32(mmio, DMA_ADDR_1_LOW, offset); + xe_mmio_write32(mmio, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); /* * Set the transfer size. The header plus uCode will be copied to WOPCM * via DMA, excluding any other components */ - xe_mmio_write32(gt, DMA_COPY_SIZE, + xe_mmio_write32(mmio, DMA_COPY_SIZE, sizeof(struct uc_css_header) + uc_fw->ucode_size); /* Start the DMA */ - xe_mmio_write32(gt, DMA_CTRL, + xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_ENABLE(dma_flags | START_DMA)); /* Wait for DMA to finish */ - ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl, + ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl, false); if (ret) drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", xe_uc_fw_type_repr(uc_fw->type), dma_ctrl); /* Disable the bits once DMA is over */ - xe_mmio_write32(gt, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); + xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); return ret; } diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 80ba2fc78837..b1f81dca610d 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -169,7 +169,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) u64 offset_hi, offset_lo; u32 nodes, num_enabled; - reg = xe_mmio_read32(gt, MIRROR_FUSE3); + reg = xe_mmio_read32(>->mmio, MIRROR_FUSE3); nodes = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, reg); num_enabled = hweight32(nodes); /* Number of enabled l3 nodes */ @@ -185,7 +185,8 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */ /* We don't expect any holes */ - xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size), + xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(>_to_tile(gt)->mmio, GSMBASE) - + ccs_size), "Hole between CCS and GSM.\n"); } else { reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR); @@ -219,8 +220,8 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, { struct xe_device *xe = tile_to_xe(tile); struct xe_gt *gt = tile->primary_gt; + unsigned int fw_ref; u64 offset; - int err; u32 reg; if (IS_SRIOV_VF(xe)) { @@ -239,9 +240,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, return 0; } - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; /* actual size */ if (unlikely(xe->info.platform == XE_DG1)) { @@ -257,13 +258,15 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, if (xe->info.has_flat_ccs) { offset = get_flat_ccs_offset(gt, *tile_size); } else { - offset = xe_mmio_read64_2x32(gt, GSMBASE); + offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE); } /* remove the tile offset so we have just the available size */ *vram_size = offset - *tile_offset; - return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + + return 0; } static void vram_fini(void *arg) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 353936a0f877..02cf647f86d8 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -8,6 +8,7 @@ #include <drm/drm_managed.h> #include <kunit/visibility.h> #include <linux/compiler_types.h> +#include <linux/fault-inject.h> #include <generated/xe_wa_oob.h> @@ -251,6 +252,34 @@ static const struct xe_rtp_entry_sr gt_was[] = { XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), }, + /* Xe3_LPG */ + + { XE_RTP_NAME("14021871409"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)), + XE_RTP_ACTIONS(SET(UNSLCGCTL9454, LSCFE_CLKGATE_DIS)) + }, + + /* Xe3_LPM */ + + { XE_RTP_NAME("16021867713"), + XE_RTP_RULES(MEDIA_VERSION(3000), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + { XE_RTP_NAME("16021865536"), + XE_RTP_RULES(MEDIA_VERSION(3000), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + { XE_RTP_NAME("14021486841"), + XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + {} }; @@ -567,6 +596,18 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, + /* Xe3_LPG */ + + { XE_RTP_NAME("14021402888"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) + }, + { XE_RTP_NAME("18034896535"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0), + FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH)) + }, + {} }; @@ -742,6 +783,18 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) }, + /* Xe3_LPG */ + { XE_RTP_NAME("14021490052"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0), + ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(FF_MODE, + DIS_MESH_PARTIAL_AUTOSTRIP | + DIS_MESH_AUTOSTRIP), + SET(VFLSKPD, + DIS_PARTIAL_AUTOSTRIP | + DIS_AUTOSTRIP)) + }, + {} }; @@ -854,6 +907,7 @@ int xe_wa_init(struct xe_gt *gt) return 0; } +ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) { @@ -891,11 +945,11 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) */ void xe_wa_apply_tile_workarounds(struct xe_tile *tile) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; if (IS_SRIOV_VF(tile->xe)) return; - if (XE_WA(mmio, 22010954014)) + if (XE_WA(tile->primary_gt, 22010954014)) xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS); } diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 920ca5060146..bcd04464b85e 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -33,7 +33,11 @@ GRAPHICS_VERSION(2004) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001) + MEDIA_VERSION(3000), MEDIA_STEP(A0, B0) 22019338487_display PLATFORM(LUNARLAKE) 16023588340 GRAPHICS_VERSION(2001) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) +no_media_l3 MEDIA_VERSION(3000) +14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0) + MEDIA_VERSION(3000), MEDIA_STEP(A0, B0) diff --git a/drivers/gpu/drm/xe/xe_wopcm.c b/drivers/gpu/drm/xe/xe_wopcm.c index d3a99157e523..ada0d0aa6b74 100644 --- a/drivers/gpu/drm/xe/xe_wopcm.c +++ b/drivers/gpu/drm/xe/xe_wopcm.c @@ -5,6 +5,8 @@ #include "xe_wopcm.h" +#include <linux/fault-inject.h> + #include "regs/xe_guc_regs.h" #include "xe_device.h" #include "xe_force_wake.h" @@ -123,8 +125,8 @@ static bool __check_layout(struct xe_device *xe, u32 wopcm_size, static bool __wopcm_regs_locked(struct xe_gt *gt, u32 *guc_wopcm_base, u32 *guc_wopcm_size) { - u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET); - u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE); + u32 reg_base = xe_mmio_read32(>->mmio, DMA_GUC_WOPCM_OFFSET); + u32 reg_size = xe_mmio_read32(>->mmio, GUC_WOPCM_SIZE); if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) || !(reg_base & GUC_WOPCM_OFFSET_VALID)) @@ -150,13 +152,13 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt, XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK); mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; - err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask, + err = xe_mmio_write32_and_verify(>->mmio, GUC_WOPCM_SIZE, size, mask, size | GUC_WOPCM_SIZE_LOCKED); if (err) goto err_out; mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; - err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET, + err = xe_mmio_write32_and_verify(>->mmio, DMA_GUC_WOPCM_OFFSET, base | huc_agent, mask, base | huc_agent | GUC_WOPCM_OFFSET_VALID); @@ -169,10 +171,10 @@ err_out: drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n"); drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", DMA_GUC_WOPCM_OFFSET.addr, - xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET)); + xe_mmio_read32(>->mmio, DMA_GUC_WOPCM_OFFSET)); drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", GUC_WOPCM_SIZE.addr, - xe_mmio_read32(gt, GUC_WOPCM_SIZE)); + xe_mmio_read32(>->mmio, GUC_WOPCM_SIZE)); return err; } @@ -268,3 +270,4 @@ check: return ret; } +ALLOW_ERROR_INJECTION(xe_wopcm_init, ERRNO); /* See xe_pci_probe() */ diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index 626e5ac4c33d..4197f44e202f 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -6,6 +6,7 @@ config DRM_ZYNQMP_DPSUB depends on PHY_XILINX_ZYNQMP depends on XILINX_ZYNQMP_DPDMA select DMA_ENGINE + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 9368acf56eaf..e4e0e299e8a7 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1200,6 +1200,9 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp, { unsigned int i; + if (!layer->info) + return; + for (i = 0; i < layer->info->num_channels; i++) { struct zynqmp_disp_layer_dma *dma = &layer->dmas[i]; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 129beac4c073..25c5dc61ee88 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -18,7 +18,9 @@ #include <drm/drm_modes.h> #include <drm/drm_of.h> +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/io.h> @@ -51,6 +53,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_LANE_COUNT_SET 0x4 #define ZYNQMP_DP_ENHANCED_FRAME_EN 0x8 #define ZYNQMP_DP_TRAINING_PATTERN_SET 0xc +#define ZYNQMP_DP_LINK_QUAL_PATTERN_SET 0x10 #define ZYNQMP_DP_SCRAMBLING_DISABLE 0x14 #define ZYNQMP_DP_DOWNSPREAD_CTL 0x18 #define ZYNQMP_DP_SOFTWARE_RESET 0x1c @@ -64,6 +67,9 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); ZYNQMP_DP_SOFTWARE_RESET_STREAM3 | \ ZYNQMP_DP_SOFTWARE_RESET_STREAM4 | \ ZYNQMP_DP_SOFTWARE_RESET_AUX) +#define ZYNQMP_DP_COMP_PATTERN_80BIT_1 0x20 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_2 0x24 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_3 0x28 /* Core enable registers */ #define ZYNQMP_DP_TRANSMITTER_ENABLE 0x80 @@ -207,6 +213,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf +#define ZYNQMP_DP_TRANSMIT_PRBS7 0x230 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_0 0x23c #define ZYNQMP_DP_PHY_PRECURSOR_LANE_1 0x240 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_2 0x244 @@ -275,30 +282,108 @@ struct zynqmp_dp_config { }; /** + * enum test_pattern - Test patterns for test testing + * @TEST_VIDEO: Use regular video input + * @TEST_SYMBOL_ERROR: Symbol error measurement pattern + * @TEST_PRBS7: Output of the PRBS7 (x^7 + x^6 + 1) polynomial + * @TEST_80BIT_CUSTOM: A custom 80-bit pattern + * @TEST_CP2520: HBR2 compliance eye pattern + * @TEST_TPS1: Link training symbol pattern TPS1 (/D10.2/) + * @TEST_TPS2: Link training symbol pattern TPS2 + * @TEST_TPS3: Link training symbol pattern TPS3 (for HBR2) + */ +enum test_pattern { + TEST_VIDEO, + TEST_TPS1, + TEST_TPS2, + TEST_TPS3, + TEST_SYMBOL_ERROR, + TEST_PRBS7, + TEST_80BIT_CUSTOM, + TEST_CP2520, +}; + +static const char *const test_pattern_str[] = { + [TEST_VIDEO] = "video", + [TEST_TPS1] = "tps1", + [TEST_TPS2] = "tps2", + [TEST_TPS3] = "tps3", + [TEST_SYMBOL_ERROR] = "symbol-error", + [TEST_PRBS7] = "prbs7", + [TEST_80BIT_CUSTOM] = "80bit-custom", + [TEST_CP2520] = "cp2520", +}; + +/** + * struct zynqmp_dp_test - Configuration for test mode + * @pattern: The test pattern + * @enhanced: Use enhanced framing + * @downspread: Use SSC + * @active: Whether test mode is active + * @custom: Custom pattern for %TEST_80BIT_CUSTOM + * @train_set: Voltage/preemphasis settings + * @bw_code: Bandwidth code for the link + * @link_cnt: Number of lanes + */ +struct zynqmp_dp_test { + enum test_pattern pattern; + bool enhanced, downspread, active; + u8 custom[10]; + u8 train_set[ZYNQMP_DP_MAX_LANES]; + u8 bw_code; + u8 link_cnt; +}; + +/** + * struct zynqmp_dp_train_set_priv - Private data for train_set debugfs files + * @dp: DisplayPort IP core structure + * @lane: The lane for this file + */ +struct zynqmp_dp_train_set_priv { + struct zynqmp_dp *dp; + int lane; +}; + +/** * struct zynqmp_dp - Xilinx DisplayPort core * @dev: device structure * @dpsub: Display subsystem * @iomem: device I/O memory for register access * @reset: reset controller + * @lock: Mutex protecting this struct and register access (but not AUX) * @irq: irq * @bridge: DRM bridge for the DP encoder * @next_bridge: The downstream bridge + * @test: Configuration for test mode * @config: IP core configuration from DTS * @aux: aux channel + * @aux_done: Completed when we get an AUX reply or timeout + * @ignore_aux_errors: If set, AUX errors are suppressed * @phy: PHY handles for DP lanes * @num_lanes: number of enabled phy lanes * @hpd_work: hot plug detection worker + * @hpd_irq_work: hot plug detection IRQ worker + * @ignore_hpd: If set, HPD events and IRQs are ignored * @status: connection status * @enabled: flag to indicate if the device is enabled * @dpcd: DP configuration data from currently connected sink device * @link_config: common link configuration between IP core and sink device * @mode: current mode between IP core and sink device * @train_set: set of training data + * @debugfs_train_set: Debugfs private data for @train_set + * + * @lock covers the link configuration in this struct and the device's + * registers. It does not cover @aux or @ignore_aux_errors. It is not strictly + * required for any of the members which are only modified at probe/remove time + * (e.g. @dev). */ struct zynqmp_dp { struct drm_dp_aux aux; struct drm_bridge bridge; struct work_struct hpd_work; + struct work_struct hpd_irq_work; + struct completion aux_done; + struct mutex lock; struct drm_bridge *next_bridge; struct device *dev; @@ -310,9 +395,13 @@ struct zynqmp_dp { enum drm_connector_status status; int irq; bool enabled; + bool ignore_aux_errors; + bool ignore_hpd; + struct zynqmp_dp_train_set_priv debugfs_train_set[ZYNQMP_DP_MAX_LANES]; struct zynqmp_dp_mode mode; struct zynqmp_dp_link_config link_config; + struct zynqmp_dp_test test; struct zynqmp_dp_config config; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 train_set[ZYNQMP_DP_MAX_LANES]; @@ -626,6 +715,7 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, /** * zynqmp_dp_update_vs_emph - Update the training values * @dp: DisplayPort IP core structure + * @train_set: A set of training values * * Update the training values based on the request from sink. The mapped values * are predefined, and values(vs, pe, pc) are from the device manual. @@ -633,12 +723,12 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, * Return: 0 if vs and emph are updated successfully, or the error code returned * by drm_dp_dpcd_write(). */ -static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) +static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp, u8 *train_set) { unsigned int i; int ret; - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->train_set, + ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->mode.lane_cnt); if (ret < 0) return ret; @@ -646,7 +736,7 @@ static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) for (i = 0; i < dp->mode.lane_cnt; i++) { u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4; union phy_configure_opts opts = { 0 }; - u8 train = dp->train_set[i]; + u8 train = train_set[i]; opts.dp.voltage[0] = (train & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; @@ -690,7 +780,7 @@ static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp) * So, This loop should exit before 512 iterations */ for (max_tries = 0; max_tries < 512; max_tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -755,7 +845,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) return ret; for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -778,28 +868,29 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) } /** - * zynqmp_dp_train - Train the link + * zynqmp_dp_setup() - Set up major link parameters * @dp: DisplayPort IP core structure + * @bw_code: The link bandwidth as a multiple of 270 MHz + * @lane_cnt: The number of lanes to use + * @enhanced: Use enhanced framing + * @downspread: Enable spread-spectrum clocking * - * Return: 0 if all trains are done successfully, or corresponding error code. + * Return: 0 on success, or -errno on failure */ -static int zynqmp_dp_train(struct zynqmp_dp *dp) +static int zynqmp_dp_setup(struct zynqmp_dp *dp, u8 bw_code, u8 lane_cnt, + bool enhanced, bool downspread) { u32 reg; - u8 bw_code = dp->mode.bw_code; - u8 lane_cnt = dp->mode.lane_cnt; u8 aux_lane_cnt = lane_cnt; - bool enhanced; int ret; zynqmp_dp_write(dp, ZYNQMP_DP_LANE_COUNT_SET, lane_cnt); - enhanced = drm_dp_enhanced_frame_cap(dp->dpcd); if (enhanced) { zynqmp_dp_write(dp, ZYNQMP_DP_ENHANCED_FRAME_EN, 1); aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN; } - if (dp->dpcd[3] & 0x1) { + if (downspread) { zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 1); drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); @@ -842,8 +933,24 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp) } zynqmp_dp_write(dp, ZYNQMP_DP_PHY_CLOCK_SELECT, reg); - ret = zynqmp_dp_phy_ready(dp); - if (ret < 0) + return zynqmp_dp_phy_ready(dp); +} + +/** + * zynqmp_dp_train - Train the link + * @dp: DisplayPort IP core structure + * + * Return: 0 if all trains are done successfully, or corresponding error code. + */ +static int zynqmp_dp_train(struct zynqmp_dp *dp) +{ + int ret; + + ret = zynqmp_dp_setup(dp, dp->mode.bw_code, dp->mode.lane_cnt, + drm_dp_enhanced_frame_cap(dp->dpcd), + dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + if (ret) return ret; zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1); @@ -934,12 +1041,15 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, u8 *buf, u8 bytes, u8 *reply) { bool is_read = (cmd & AUX_READ_BIT) ? true : false; + unsigned long time_left; u32 reg, i; reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST) return -EBUSY; + reinit_completion(&dp->aux_done); + zynqmp_dp_write(dp, ZYNQMP_DP_AUX_ADDRESS, addr); if (!is_read) for (i = 0; i < bytes; i++) @@ -954,17 +1064,14 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, zynqmp_dp_write(dp, ZYNQMP_DP_AUX_COMMAND, reg); /* Wait for reply to be delivered upto 2ms */ - for (i = 0; ; i++) { - reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) - break; - - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT || - i == 2) - return -ETIMEDOUT; + time_left = wait_for_completion_timeout(&dp->aux_done, + msecs_to_jiffies(2)); + if (!time_left) + return -ETIMEDOUT; - usleep_range(1000, 1100); - } + reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); + if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + return -ETIMEDOUT; reg = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_CODE); if (reply) @@ -1006,6 +1113,8 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (dp->status == connector_status_disconnected) { dev_dbg(dp->dev, "no connected aux device\n"); + if (dp->ignore_aux_errors) + goto fake_response; return -ENODEV; } @@ -1014,7 +1123,13 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret); - return ret; + if (!dp->ignore_aux_errors) + return ret; + +fake_response: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + memset(msg->buffer, 0, msg->size); + return msg->size; } /** @@ -1048,6 +1163,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) (w << ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT) | (rate / (1000 * 1000))); + zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); + dp->aux.name = "ZynqMP DP AUX"; dp->aux.dev = dp->dev; dp->aux.drm_dev = dp->bridge.dev; @@ -1065,6 +1183,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) static void zynqmp_dp_aux_cleanup(struct zynqmp_dp *dp) { drm_dp_aux_unregister(&dp->aux); + + zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); } /* ----------------------------------------------------------------------------- @@ -1386,8 +1507,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge, } /* Check with link rate and lane count */ + mutex_lock(&dp->lock); rate = zynqmp_dp_max_rate(dp->link_config.max_rate, dp->link_config.max_lanes, dp->config.bpp); + mutex_unlock(&dp->lock); if (mode->clock > rate) { dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n", mode->name); @@ -1414,6 +1537,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, pm_runtime_get_sync(dp->dev); + mutex_lock(&dp->lock); zynqmp_dp_disp_enable(dp, old_bridge_state); /* @@ -1474,6 +1598,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET, ZYNQMP_DP_SOFTWARE_RESET_ALL); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1); + mutex_unlock(&dp->lock); } static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, @@ -1481,6 +1606,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct zynqmp_dp *dp = bridge_to_dp(bridge); + mutex_lock(&dp->lock); dp->enabled = false; cancel_work(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0); @@ -1491,6 +1617,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0); zynqmp_dp_disp_disable(dp, old_bridge_state); + mutex_unlock(&dp->lock); pm_runtime_put_sync(dp->dev); } @@ -1526,13 +1653,14 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp) { - struct zynqmp_dp *dp = bridge_to_dp(bridge); struct zynqmp_dp_link_config *link_config = &dp->link_config; u32 state, i; int ret; + lockdep_assert_held(&dp->lock); + /* * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to * get the HPD signal with some monitors. @@ -1568,6 +1696,18 @@ disconnected: return connector_status_disconnected; } +static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + enum drm_connector_status ret; + + mutex_lock(&dp->lock); + ret = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + + return ret; +} + static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { @@ -1605,6 +1745,582 @@ zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge, return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts); } +/* ----------------------------------------------------------------------------- + * debugfs + */ + +/** + * zynqmp_dp_set_test_pattern() - Configure the link for a test pattern + * @dp: DisplayPort IP core structure + * @pattern: The test pattern to configure + * @custom: The custom pattern to use if @pattern is %TEST_80BIT_CUSTOM + * + * Return: 0 on success, or negative errno on (DPCD) failure + */ +static int zynqmp_dp_set_test_pattern(struct zynqmp_dp *dp, + enum test_pattern pattern, + u8 *const custom) +{ + bool scramble = false; + u32 train_pattern = 0; + u32 link_pattern = 0; + u8 dpcd_train = 0; + u8 dpcd_link = 0; + int ret; + + switch (pattern) { + case TEST_TPS1: + train_pattern = 1; + break; + case TEST_TPS2: + train_pattern = 2; + break; + case TEST_TPS3: + train_pattern = 3; + break; + case TEST_SYMBOL_ERROR: + scramble = true; + link_pattern = DP_PHY_TEST_PATTERN_ERROR_COUNT; + break; + case TEST_PRBS7: + /* We use a dedicated register to enable PRBS7 */ + dpcd_link = DP_LINK_QUAL_PATTERN_ERROR_RATE; + break; + case TEST_80BIT_CUSTOM: { + const u8 *p = custom; + + link_pattern = DP_LINK_QUAL_PATTERN_80BIT_CUSTOM; + + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_1, + (p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_2, + (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_3, + (p[9] << 8) | p[8]); + break; + } + case TEST_CP2520: + link_pattern = DP_LINK_QUAL_PATTERN_CP2520_PAT_1; + break; + default: + WARN_ON_ONCE(1); + fallthrough; + case TEST_VIDEO: + scramble = true; + } + + zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, !scramble); + zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET, train_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_LINK_QUAL_PATTERN_SET, link_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMIT_PRBS7, pattern == TEST_PRBS7); + + dpcd_link = dpcd_link ?: link_pattern; + dpcd_train = train_pattern; + if (!scramble) + dpcd_train |= DP_LINK_SCRAMBLING_DISABLE; + + if (dp->dpcd[DP_DPCD_REV] < 0x12) { + if (pattern == TEST_CP2520) + dev_warn(dp->dev, + "can't set sink link quality pattern to CP2520 for DPCD < r1.2; error counters will be invalid\n"); + else + dpcd_train |= FIELD_PREP(DP_LINK_QUAL_PATTERN_11_MASK, + dpcd_link); + } else { + u8 dpcd_link_lane[ZYNQMP_DP_MAX_LANES]; + + memset(dpcd_link_lane, dpcd_link, ZYNQMP_DP_MAX_LANES); + ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_QUAL_LANE0_SET, + dpcd_link_lane, ZYNQMP_DP_MAX_LANES); + if (ret < 0) + return ret; + } + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, dpcd_train); + return ret < 0 ? ret : 0; +} + +static int zynqmp_dp_test_setup(struct zynqmp_dp *dp) +{ + return zynqmp_dp_setup(dp, dp->test.bw_code, dp->test.link_cnt, + dp->test.enhanced, dp->test.downspread); +} + +static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = snprintf(buf, sizeof(buf), "%s\n", + test_pattern_str[dp->test.pattern]); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +static ssize_t zynqmp_dp_pattern_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + int pattern; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, + count); + if (ret < 0) + goto out; + buf[ret] = '\0'; + + pattern = sysfs_match_string(test_pattern_str, buf); + if (pattern < 0) { + ret = -EINVAL; + goto out; + } + + mutex_lock(&dp->lock); + dp->test.pattern = pattern; + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_pattern = { + .read = zynqmp_dp_pattern_read, + .write = zynqmp_dp_pattern_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_enhanced_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.enhanced; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_enhanced_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.enhanced = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get, + zynqmp_dp_enhanced_set, "%llu\n"); + +static int zynqmp_dp_downspread_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.downspread; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_downspread_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.downspread = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get, + zynqmp_dp_downspread_set, "%llu\n"); + +static int zynqmp_dp_active_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.active; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_active_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + if (val) { + if (val < 2) { + ret = zynqmp_dp_test_setup(dp); + if (ret) + goto out; + } + + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom); + if (ret) + goto out; + + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + if (ret) + goto out; + + dp->test.active = true; + } else { + int err; + + dp->test.active = false; + err = zynqmp_dp_set_test_pattern(dp, TEST_VIDEO, NULL); + if (err) + dev_warn(dp->dev, "could not clear test pattern: %d\n", + err); + zynqmp_dp_train_loop(dp); + } +out: + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get, + zynqmp_dp_active_set, "%llu\n"); + +static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom, + sizeof(dp->test.custom)); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return ret; +} + +static ssize_t zynqmp_dp_custom_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + char buf[sizeof(dp->test.custom)]; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (ret < 0) + goto out; + + mutex_lock(&dp->lock); + memcpy(dp->test.custom, buf, ret); + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_custom = { + .read = zynqmp_dp_custom_read, + .write = zynqmp_dp_custom_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_swing_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_swing_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 3) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_SWING_REACHED | + DP_TRAIN_VOLTAGE_SWING_MASK); + *train_set |= val; + if (val == 3) + *train_set |= DP_TRAIN_MAX_SWING_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get, + zynqmp_dp_swing_set, "%llu\n"); + +static int zynqmp_dp_preemphasis_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, + dp->test.train_set[priv->lane]); + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_preemphasis_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 2) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | + DP_TRAIN_PRE_EMPHASIS_MASK); + *train_set |= val; + if (val == 2) + *train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get, + zynqmp_dp_preemphasis_set, "%llu\n"); + +static int zynqmp_dp_lanes_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.link_cnt; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_lanes_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + if (val > ZYNQMP_DP_MAX_LANES) + return -EINVAL; + + mutex_lock(&dp->lock); + if (val > dp->num_lanes) { + ret = -EINVAL; + } else { + dp->test.link_cnt = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + } + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get, + zynqmp_dp_lanes_set, "%llu\n"); + +static int zynqmp_dp_rate_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_rate_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int link_rate; + int ret = 0; + u8 bw_code; + + if (do_div(val, 10000)) + return -EINVAL; + + bw_code = drm_dp_link_rate_to_bw_code(val); + link_rate = drm_dp_bw_code_to_link_rate(bw_code); + if (val != link_rate) + return -EINVAL; + + if (bw_code != DP_LINK_BW_1_62 && bw_code != DP_LINK_BW_2_7 && + bw_code != DP_LINK_BW_5_4) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->test.bw_code = bw_code; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get, + zynqmp_dp_rate_set, "%llu\n"); + +static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->aux.hw_mutex); + *val = dp->ignore_aux_errors; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->aux.hw_mutex); + dp->ignore_aux_errors = val; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_aux_errors, + zynqmp_dp_ignore_aux_errors_get, + zynqmp_dp_ignore_aux_errors_set, "%llu\n"); + +static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->ignore_hpd; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_ignore_hpd_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->ignore_hpd = val; + mutex_lock(&dp->lock); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_hpd, zynqmp_dp_ignore_hpd_get, + zynqmp_dp_ignore_hpd_set, "%llu\n"); + +static void zynqmp_dp_bridge_debugfs_init(struct drm_bridge *bridge, + struct dentry *root) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + struct dentry *test; + int i; + + dp->test.bw_code = DP_LINK_BW_5_4; + dp->test.link_cnt = dp->num_lanes; + + test = debugfs_create_dir("test", root); +#define CREATE_FILE(name) \ + debugfs_create_file(#name, 0600, test, dp, &fops_zynqmp_dp_##name) + CREATE_FILE(pattern); + CREATE_FILE(enhanced); + CREATE_FILE(downspread); + CREATE_FILE(active); + CREATE_FILE(custom); + CREATE_FILE(rate); + CREATE_FILE(lanes); + CREATE_FILE(ignore_aux_errors); + CREATE_FILE(ignore_hpd); + + for (i = 0; i < dp->num_lanes; i++) { + static const char fmt[] = "lane%d_preemphasis"; + char name[sizeof(fmt)]; + + dp->debugfs_train_set[i].dp = dp; + dp->debugfs_train_set[i].lane = i; + + snprintf(name, sizeof(name), fmt, i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_preemphasis); + + snprintf(name, sizeof(name), "lane%d_swing", i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_swing); + } +} + static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .attach = zynqmp_dp_bridge_attach, .detach = zynqmp_dp_bridge_detach, @@ -1618,6 +2334,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .detect = zynqmp_dp_bridge_detect, .edid_read = zynqmp_dp_bridge_edid_read, .atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts, + .debugfs_init = zynqmp_dp_bridge_debugfs_init, }; /* ----------------------------------------------------------------------------- @@ -1651,10 +2368,46 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work) struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work); enum drm_connector_status status; - status = zynqmp_dp_bridge_detect(&dp->bridge); + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + status = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + drm_bridge_hpd_notify(&dp->bridge, status); } +static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work) +{ + struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, + hpd_irq_work); + u8 status[DP_LINK_STATUS_SIZE + 2]; + int err; + + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, + DP_LINK_STATUS_SIZE + 2); + if (err < 0) { + dev_dbg_ratelimited(dp->dev, + "could not read sink status: %d\n", err); + } else { + if (status[4] & DP_LINK_STATUS_UPDATED || + !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || + !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { + zynqmp_dp_train_loop(dp); + } + } + mutex_unlock(&dp->lock); +} + static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) { struct zynqmp_dp *dp = (struct zynqmp_dp *)data; @@ -1686,23 +2439,15 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) if (status & ZYNQMP_DP_INT_HPD_EVENT) schedule_work(&dp->hpd_work); - if (status & ZYNQMP_DP_INT_HPD_IRQ) { - int ret; - u8 status[DP_LINK_STATUS_SIZE + 2]; + if (status & ZYNQMP_DP_INT_HPD_IRQ) + schedule_work(&dp->hpd_irq_work); - ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, - DP_LINK_STATUS_SIZE + 2); - if (ret < 0) - goto handled; + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) + complete(&dp->aux_done); - if (status[4] & DP_LINK_STATUS_UPDATED || - !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || - !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { - zynqmp_dp_train_loop(dp); - } - } + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + complete(&dp->aux_done); -handled: return IRQ_HANDLED; } @@ -1725,8 +2470,11 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) dp->dev = &pdev->dev; dp->dpsub = dpsub; dp->status = connector_status_disconnected; + mutex_init(&dp->lock); + init_completion(&dp->aux_done); INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func); + INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func); /* Acquire all resources (IOMEM, IRQ and PHYs). */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp"); @@ -1802,9 +2550,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) * Now that the hardware is initialized and won't generate spurious * interrupts, request the IRQ. */ - ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL, - zynqmp_dp_irq_handler, IRQF_ONESHOT, - dev_name(dp->dev), dp); + ret = devm_request_irq(dp->dev, dp->irq, zynqmp_dp_irq_handler, + IRQF_SHARED, dev_name(dp->dev), dp); if (ret < 0) goto err_phy_exit; @@ -1829,8 +2576,9 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) struct zynqmp_dp *dp = dpsub->dp; zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL); - disable_irq(dp->irq); + devm_free_irq(dp->dev, dp->irq, dp); + cancel_work_sync(&dp->hpd_irq_work); cancel_work_sync(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0); @@ -1838,4 +2586,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) zynqmp_dp_phy_exit(dp); zynqmp_dp_reset(dp, true); + mutex_destroy(&dp->lock); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index bd1368df7870..fc81983d9e5e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -14,6 +14,7 @@ #include <drm/drm_blend.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> @@ -402,6 +403,7 @@ static const struct drm_driver zynqmp_dpsub_drm_driver = { DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &zynqmp_dpsub_drm_fops, @@ -509,12 +511,12 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) if (ret) return ret; - drm_kms_helper_poll_init(drm); - ret = zynqmp_dpsub_kms_init(dpsub); if (ret < 0) goto err_poll_fini; + drm_kms_helper_poll_init(drm); + /* Reset all components and register the DRM device. */ drm_mode_config_reset(drm); @@ -523,7 +525,7 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) goto err_poll_fini; /* Initialize fbdev generic emulation. */ - drm_fbdev_dma_setup(drm, 24); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888); return 0; @@ -536,7 +538,7 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub) { struct drm_device *drm = &dpsub->drm->dev; - drm_dev_unregister(drm); + drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); drm_encoder_cleanup(&dpsub->drm->encoder); drm_kms_helper_poll_fini(drm); diff --git a/drivers/gpu/host1x/context_bus.c b/drivers/gpu/host1x/context_bus.c index d9421179d7b4..7cd0e1a5edd1 100644 --- a/drivers/gpu/host1x/context_bus.c +++ b/drivers/gpu/host1x/context_bus.c @@ -6,7 +6,7 @@ #include <linux/device.h> #include <linux/of.h> -struct bus_type host1x_context_device_bus_type = { +const struct bus_type host1x_context_device_bus_type = { .name = "host1x-context", }; EXPORT_SYMBOL_GPL(host1x_context_device_bus_type); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index e98528777faa..be2ad7203d7b 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -142,18 +142,29 @@ static const struct host1x_info host1x05_info = { }; static const struct host1x_sid_entry tegra186_sid_table[] = { - { - /* VIC */ - .base = 0x1af0, - .offset = 0x30, - .limit = 0x34 - }, - { - /* NVDEC */ - .base = 0x1b00, - .offset = 0x30, - .limit = 0x34 - }, + { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 }, + { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 }, + { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 }, + { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 }, + { /* ISP */ .base = 0x1ae8, .offset = 0x50, .limit = 0x50 }, + { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 }, + { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 }, + { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 }, + { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 }, + { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 }, + { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 }, + { /* VI 0 */ .base = 0x1b80, .offset = 0x10000, .limit = 0x10000 }, + { /* VI 1 */ .base = 0x1b88, .offset = 0x20000, .limit = 0x20000 }, + { /* VI 2 */ .base = 0x1b90, .offset = 0x30000, .limit = 0x30000 }, + { /* VI 3 */ .base = 0x1b98, .offset = 0x40000, .limit = 0x40000 }, + { /* VI 4 */ .base = 0x1ba0, .offset = 0x50000, .limit = 0x50000 }, + { /* VI 5 */ .base = 0x1ba8, .offset = 0x60000, .limit = 0x60000 }, + { /* VI 6 */ .base = 0x1bb0, .offset = 0x70000, .limit = 0x70000 }, + { /* VI 7 */ .base = 0x1bb8, .offset = 0x80000, .limit = 0x80000 }, + { /* VI 8 */ .base = 0x1bc0, .offset = 0x90000, .limit = 0x90000 }, + { /* VI 9 */ .base = 0x1bc8, .offset = 0xa0000, .limit = 0xa0000 }, + { /* VI 10 */ .base = 0x1bd0, .offset = 0xb0000, .limit = 0xb0000 }, + { /* VI 11 */ .base = 0x1bd8, .offset = 0xc0000, .limit = 0xc0000 }, }; static const struct host1x_info host1x06_info = { @@ -173,24 +184,26 @@ static const struct host1x_info host1x06_info = { }; static const struct host1x_sid_entry tegra194_sid_table[] = { - { - /* VIC */ - .base = 0x1af0, - .offset = 0x30, - .limit = 0x34 - }, - { - /* NVDEC */ - .base = 0x1b00, - .offset = 0x30, - .limit = 0x34 - }, - { - /* NVDEC1 */ - .base = 0x1bc0, - .offset = 0x30, - .limit = 0x34 - }, + { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 }, + { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 }, + { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 }, + { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 }, + { /* ISP */ .base = 0x1ae8, .offset = 0x800, .limit = 0x800 }, + { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 }, + { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 }, + { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 }, + { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 }, + { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 }, + { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 }, + { /* VI */ .base = 0x1b80, .offset = 0x800, .limit = 0x800 }, + { /* VI_THI */ .base = 0x1b88, .offset = 0x30, .limit = 0x34 }, + { /* ISP_THI */ .base = 0x1b90, .offset = 0x30, .limit = 0x34 }, + { /* PVA0_CLUSTER */ .base = 0x1b98, .offset = 0x0, .limit = 0x0 }, + { /* PVA0_CLUSTER */ .base = 0x1ba0, .offset = 0x0, .limit = 0x0 }, + { /* NVDLA0 */ .base = 0x1ba8, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA1 */ .base = 0x1bb0, .offset = 0x30, .limit = 0x34 }, + { /* NVENC1 */ .base = 0x1bb8, .offset = 0x30, .limit = 0x34 }, + { /* NVDEC1 */ .base = 0x1bc0, .offset = 0x30, .limit = 0x34 }, }; static const struct host1x_info host1x07_info = { @@ -215,54 +228,35 @@ static const struct host1x_info host1x07_info = { * and firmware stream ID in the MMIO path table. */ static const struct host1x_sid_entry tegra234_sid_table[] = { - { - /* SE2 MMIO */ - .base = 0x1658, - .offset = 0x90, - .limit = 0x90 - }, - { - /* SE4 MMIO */ - .base = 0x1660, - .offset = 0x90, - .limit = 0x90 - }, - { - /* SE2 channel */ - .base = 0x1738, - .offset = 0x90, - .limit = 0x90 - }, - { - /* SE4 channel */ - .base = 0x1740, - .offset = 0x90, - .limit = 0x90 - }, - { - /* VIC channel */ - .base = 0x17b8, - .offset = 0x30, - .limit = 0x30 - }, - { - /* VIC MMIO */ - .base = 0x1688, - .offset = 0x34, - .limit = 0x34 - }, - { - /* NVDEC channel */ - .base = 0x17c8, - .offset = 0x30, - .limit = 0x30, - }, - { - /* NVDEC MMIO */ - .base = 0x1698, - .offset = 0x34, - .limit = 0x34, - }, + { /* SE1 MMIO */ .base = 0x1650, .offset = 0x90, .limit = 0x90 }, + { /* SE1 ch */ .base = 0x1730, .offset = 0x90, .limit = 0x90 }, + { /* SE2 MMIO */ .base = 0x1658, .offset = 0x90, .limit = 0x90 }, + { /* SE2 ch */ .base = 0x1738, .offset = 0x90, .limit = 0x90 }, + { /* SE4 MMIO */ .base = 0x1660, .offset = 0x90, .limit = 0x90 }, + { /* SE4 ch */ .base = 0x1740, .offset = 0x90, .limit = 0x90 }, + { /* ISP MMIO */ .base = 0x1680, .offset = 0x800, .limit = 0x800 }, + { /* VIC MMIO */ .base = 0x1688, .offset = 0x34, .limit = 0x34 }, + { /* VIC ch */ .base = 0x17b8, .offset = 0x30, .limit = 0x30 }, + { /* NVENC MMIO */ .base = 0x1690, .offset = 0x34, .limit = 0x34 }, + { /* NVENC ch */ .base = 0x17c0, .offset = 0x30, .limit = 0x30 }, + { /* NVDEC MMIO */ .base = 0x1698, .offset = 0x34, .limit = 0x34 }, + { /* NVDEC ch */ .base = 0x17c8, .offset = 0x30, .limit = 0x30 }, + { /* NVJPG MMIO */ .base = 0x16a0, .offset = 0x34, .limit = 0x34 }, + { /* NVJPG ch */ .base = 0x17d0, .offset = 0x30, .limit = 0x30 }, + { /* TSEC MMIO */ .base = 0x16a8, .offset = 0x30, .limit = 0x34 }, + { /* NVJPG1 MMIO */ .base = 0x16b0, .offset = 0x34, .limit = 0x34 }, + { /* NVJPG1 ch */ .base = 0x17a8, .offset = 0x30, .limit = 0x30 }, + { /* VI MMIO */ .base = 0x16b8, .offset = 0x800, .limit = 0x800 }, + { /* VI_THI MMIO */ .base = 0x16c0, .offset = 0x30, .limit = 0x34 }, + { /* ISP_THI MMIO */ .base = 0x16c8, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA MMIO */ .base = 0x16d8, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA ch */ .base = 0x17e0, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA1 MMIO */ .base = 0x16e0, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA1 ch */ .base = 0x17e8, .offset = 0x30, .limit = 0x34 }, + { /* OFA MMIO */ .base = 0x16e8, .offset = 0x34, .limit = 0x34 }, + { /* OFA ch */ .base = 0x1768, .offset = 0x30, .limit = 0x30 }, + { /* VI2 MMIO */ .base = 0x16f0, .offset = 0x800, .limit = 0x800 }, + { /* VI2_THI MMIO */ .base = 0x16f8, .offset = 0x30, .limit = 0x34 }, }; static const struct host1x_info host1x08_info = { diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index 92031b240a17..d3855a1c6b47 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -175,11 +175,11 @@ struct host1x { }; void host1x_common_writel(struct host1x *host1x, u32 v, u32 r); -void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); +void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r); u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r); -void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); +void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r); u32 host1x_sync_readl(struct host1x *host1x, u32 r); -void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v); +void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r); u32 host1x_ch_readl(struct host1x_channel *ch, u32 r); static inline void host1x_hw_syncpt_restore(struct host1x *host, diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 1b65a10b9dfc..3f3f0018eee0 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c @@ -254,12 +254,24 @@ static void timeout_release_mlock(struct host1x_cdma *cdma) u32 offset; switch (ch->client->class) { + case HOST1X_CLASS_NVJPG1: + offset = HOST1X_COMMON_NVJPG1_MLOCK; + break; + case HOST1X_CLASS_NVENC: + offset = HOST1X_COMMON_NVENC_MLOCK; + break; case HOST1X_CLASS_VIC: offset = HOST1X_COMMON_VIC_MLOCK; break; + case HOST1X_CLASS_NVJPG: + offset = HOST1X_COMMON_NVJPG_MLOCK; + break; case HOST1X_CLASS_NVDEC: offset = HOST1X_COMMON_NVDEC_MLOCK; break; + case HOST1X_CLASS_OFA: + offset = HOST1X_COMMON_OFA_MLOCK; + break; default: WARN(1, "%s was not updated for class %u", __func__, ch->client->class); return; diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index 54e31d81517b..4c32aa1b95e8 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c @@ -177,7 +177,16 @@ static void show_gather(struct output *o, dma_addr_t phys_addr, for (i = 0; i < words; i++) { dma_addr_t addr = phys_addr + i * 4; - u32 val = *(map_addr + offset / 4 + i); + u32 voffset = offset + i * 4; + u32 val; + + /* If we reach the RESTART opcode, continue at the beginning of pushbuffer */ + if (cdma && voffset >= cdma->push_buffer.size) { + addr -= cdma->push_buffer.size; + voffset -= cdma->push_buffer.size; + } + + val = *(map_addr + voffset / 4); if (!data_count) { host1x_debug_output(o, " %pad: %08x: ", &addr, val); @@ -203,7 +212,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) job->num_slots, job->num_unpins); show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma, - pb->dma + job->first_get, pb->mapped + job->first_get); + pb->dma, pb->mapped); for (i = 0; i < job->num_cmds; i++) { struct host1x_job_gather *g; @@ -227,7 +236,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n", &g->base, g->offset, g->words); - show_gather(o, g->base + g->offset, g->words, cdma, + show_gather(o, g->base + g->offset, g->words, NULL, g->base, mapped); if (!job->gather_copy_mapped) diff --git a/drivers/hid/bpf/hid_bpf_struct_ops.c b/drivers/hid/bpf/hid_bpf_struct_ops.c index 0e611a9d79d7..702c22fae136 100644 --- a/drivers/hid/bpf/hid_bpf_struct_ops.c +++ b/drivers/hid/bpf/hid_bpf_struct_ops.c @@ -79,7 +79,6 @@ static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log, WRITE_RANGE(hid_device, name, true), WRITE_RANGE(hid_device, uniq, true), WRITE_RANGE(hid_device, phys, true), - WRITE_RANGE(hid_device, quirks, false), }; #undef WRITE_RANGE const struct btf_type *state = NULL; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 81d6c734c8bc..98bef39642a9 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2692,12 +2692,6 @@ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) int ret; if (!hdev->bpf_rsize) { - unsigned int quirks; - - /* reset the quirks that has been previously set */ - quirks = hid_lookup_quirk(hdev); - hdev->quirks = quirks; - /* in case a bpf program gets detached, we need to free the old one */ hid_free_bpf_rdesc(hdev); @@ -2707,9 +2701,6 @@ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) /* call_hid_bpf_rdesc_fixup will always return a valid pointer */ hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc, &hdev->bpf_rsize); - if (quirks ^ hdev->quirks) - hid_info(hdev, "HID-BPF toggled quirks on the device: %04x", - quirks ^ hdev->quirks); } if (!hid_check_device_match(hdev, hdrv, &id)) @@ -2719,6 +2710,8 @@ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) if (!hdev->devres_group_id) return -ENOMEM; + /* reset the quirks that has been previously set */ + hdev->quirks = hid_lookup_quirk(hdev); hdev->driver = hdrv; if (hdrv->probe) { diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c index f8b16a82faef..8c28e982e09d 100644 --- a/drivers/hid/hid-picolcd_fb.c +++ b/drivers/hid/hid-picolcd_fb.c @@ -497,6 +497,10 @@ int picolcd_init_framebuffer(struct picolcd_data *data) #endif #endif +#ifdef CONFIG_HID_PICOLCD_LCD + info->lcd_dev = data->lcd; +#endif + fbdata = info->par; spin_lock_init(&fbdata->lock); fbdata->picolcd = data; diff --git a/drivers/hid/hid-picolcd_lcd.c b/drivers/hid/hid-picolcd_lcd.c index 061a33ba7b1d..318f19eac0e7 100644 --- a/drivers/hid/hid-picolcd_lcd.c +++ b/drivers/hid/hid-picolcd_lcd.c @@ -41,15 +41,9 @@ static int picolcd_set_contrast(struct lcd_device *ldev, int contrast) return 0; } -static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb) -{ - return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev)); -} - static const struct lcd_ops picolcd_lcdops = { .get_contrast = picolcd_get_contrast, .set_contrast = picolcd_set_contrast, - .check_fb = picolcd_check_lcd_fb, }; int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report) diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c index 34cac27e4dde..0dcb8a3a691d 100644 --- a/drivers/hwmon/aquacomputer_d5next.c +++ b/drivers/hwmon/aquacomputer_d5next.c @@ -597,7 +597,7 @@ struct aqc_data { /* Sensor values */ s32 temp_input[20]; /* Max 4 physical and 16 virtual or 8 physical and 12 virtual */ - s32 speed_input[8]; + s32 speed_input[9]; u32 speed_input_min[1]; u32 speed_input_target[1]; u32 speed_input_max[1]; diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c index 1f36af2cd2d9..fbe673009126 100644 --- a/drivers/hwmon/tmp108.c +++ b/drivers/hwmon/tmp108.c @@ -452,12 +452,7 @@ static int p3t1085_i3c_probe(struct i3c_device *i3cdev) struct device *dev = i3cdev_to_dev(i3cdev); struct regmap *regmap; -#ifdef CONFIG_REGMAP_I3C regmap = devm_regmap_init_i3c(i3cdev, &tmp108_regmap_config); -#else - regmap = ERR_PTR(-ENODEV); -#endif - if (IS_ERR(regmap)) return dev_err_probe(dev, PTR_ERR(regmap), "Failed to register i3c regmap\n"); diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c index dfcfb09d9f3c..80fb03f30c30 100644 --- a/drivers/hwmon/tps23861.c +++ b/drivers/hwmon/tps23861.c @@ -132,7 +132,7 @@ static int tps23861_read_temp(struct tps23861_data *data, long *val) if (err < 0) return err; - *val = (regval * TEMPERATURE_LSB) - 20000; + *val = ((long)regval * TEMPERATURE_LSB) - 20000; return 0; } diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0d7b9839e5b6..e9d8d28e055f 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -23,7 +23,6 @@ enum { TH_PCI_RTIT_BAR = 4, }; -#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR)) #define PCI_REG_NPKDSC 0x80 #define NPKDSC_TSACT BIT(5) @@ -83,10 +82,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev, if (err) return err; - err = pcim_iomap_regions_request_all(pdev, BAR_MASK, DRIVER_NAME); + err = pcim_request_all_regions(pdev, DRIVER_NAME); if (err) return err; + if (!pcim_iomap(pdev, TH_PCI_CONFIG_BAR, 0)) + return -ENOMEM; + + if (!pcim_iomap(pdev, TH_PCI_STH_SW_BAR, 0)) + return -ENOMEM; + if (pdev->resource[TH_PCI_RTIT_BAR].start) { resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR]; r++; diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index 3f71ce4711e3..af95d6e7f004 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile @@ -5,10 +5,10 @@ obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o obj-$(CONFIG_I2C) += i2c-core.o -i2c-core-objs := i2c-core-base.o i2c-core-smbus.o +i2c-core-y := i2c-core-base.o i2c-core-smbus.o i2c-core-$(CONFIG_ACPI) += i2c-core-acpi.o -i2c-core-$(CONFIG_I2C_SLAVE) += i2c-core-slave.o -i2c-core-$(CONFIG_OF) += i2c-core-of.o +i2c-core-$(CONFIG_I2C_SLAVE) += i2c-core-slave.o +i2c-core-$(CONFIG_OF) += i2c-core-of.o obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 4977abcd7c46..ceb3ecdf884b 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -62,19 +62,6 @@ config I2C_AMD756 This driver can also be built as a module. If so, the module will be called i2c-amd756. -config I2C_AMD756_S4882 - tristate "SMBus multiplexing on the Tyan S4882" - depends on I2C_AMD756 && X86 - help - Enabling this option will add specific SMBus support for the Tyan - S4882 motherboard. On this 4-CPU board, the SMBus is multiplexed - over 8 different channels, where the various memory module EEPROMs - and temperature sensors live. Saying yes here will give you access - to these in addition to the trunk. - - This driver can also be built as a module. If so, the module - will be called i2c-amd756-s4882. - config I2C_AMD8111 tristate "AMD 8111" depends on PCI && HAS_IOPORT @@ -95,6 +82,23 @@ config I2C_AMD_MP2 This driver can also be built as modules. If so, the modules will be called i2c-amd-mp2-pci and i2c-amd-mp2-plat. +config I2C_AMD_ASF + tristate "AMD ASF I2C Controller Support" + depends on I2C_PIIX4 + select I2C_SLAVE + help + This option enables support for the AMD ASF (Alert Standard Format) + I2C controller. The AMD ASF controller is an SMBus controller with + built-in ASF functionality, allowing it to issue generic SMBus + packets and communicate with the DASH controller using MCTP over + ASF. + + If you have an AMD system with ASF support and want to enable this + functionality, say Y or M here. If unsure, say N. + + To compile this driver as a module, choose M here: the module will + be called i2c_amd_asf_plat. + config I2C_HIX5HD2 tristate "Hix5hd2 high-speed I2C driver" depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST @@ -160,6 +164,7 @@ config I2C_I801 Meteor Lake (SOC and PCH) Birch Stream (SOC) Arrow Lake (SOC) + Panther Lake (SOC) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -250,19 +255,6 @@ config I2C_NFORCE2 This driver can also be built as a module. If so, the module will be called i2c-nforce2. -config I2C_NFORCE2_S4985 - tristate "SMBus multiplexing on the Tyan S4985" - depends on I2C_NFORCE2 && X86 - help - Enabling this option will add specific SMBus support for the Tyan - S4985 motherboard. On this 4-CPU board, the SMBus is multiplexed - over 4 different channels, where the various memory module EEPROMs - live. Saying yes here will give you access to these in addition - to the trunk. - - This driver can also be built as a module. If so, the module - will be called i2c-nforce2-s4985. - config I2C_NVIDIA_GPU tristate "NVIDIA GPU I2C controller" depends on PCI @@ -439,7 +431,7 @@ config I2C_AT91 are facing this situation, use the i2c-gpio driver. config I2C_AT91_SLAVE_EXPERIMENTAL - tristate "Microchip AT91 I2C experimental slave mode" + bool "Microchip AT91 I2C experimental slave mode" depends on I2C_AT91 select I2C_SLAVE help @@ -448,7 +440,7 @@ config I2C_AT91_SLAVE_EXPERIMENTAL been tested in a heavy way, help wanted. There are known bugs: - It can hang, on a SAMA5D4, after several transfers. - - There are some mismtaches with a SAMA5D4 as slave and a SAMA5D2 as + - There are some mismatches with a SAMA5D4 as slave and a SAMA5D2 as master. config I2C_AU1550 @@ -751,13 +743,14 @@ config I2C_IMG config I2C_IMX tristate "IMX I2C interface" - depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST + depends on ARCH_MXC || ARCH_LAYERSCAPE || ARCH_S32 || COLDFIRE \ + || COMPILE_TEST select I2C_SLAVE help Say Y here if you want to use the IIC bus controller on - the Freescale i.MX/MXC, Layerscape or ColdFire processors. + the Freescale i.MX/MXC/S32G, Layerscape or ColdFire processors. - This driver can also be built as a module. If so, the module + This driver can also be built as a module. If so, the module will be called i2c-imx. config I2C_IMX_LPI2C @@ -1070,6 +1063,16 @@ config I2C_RK3X This driver can also be built as a module. If so, the module will be called i2c-rk3x. +config I2C_RTL9300 + tristate "Realtek RTL9300 I2C controller" + depends on MACH_REALTEK_RTL || COMPILE_TEST + help + Say Y here to include support for the I2C controller in Realtek + RTL9300 SoCs. + + This driver can also be built as a module. If so, the module will + be called i2c-rtl9300. + config I2C_RZV2M tristate "Renesas RZ/V2M adapter" depends on ARCH_RENESAS || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index a6bcbf2febcf..1c2a4510abe4 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -14,14 +14,12 @@ obj-$(CONFIG_I2C_ALI1535) += i2c-ali1535.o obj-$(CONFIG_I2C_ALI1563) += i2c-ali1563.o obj-$(CONFIG_I2C_ALI15X3) += i2c-ali15x3.o obj-$(CONFIG_I2C_AMD756) += i2c-amd756.o -obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o obj-$(CONFIG_I2C_CHT_WC) += i2c-cht-wc.o obj-$(CONFIG_I2C_I801) += i2c-i801.o obj-$(CONFIG_I2C_ISCH) += i2c-isch.o obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o -obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o @@ -38,12 +36,11 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o # Embedded system I2C/SMBus host controller drivers obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o obj-$(CONFIG_I2C_AMD_MP2) += i2c-amd-mp2-pci.o i2c-amd-mp2-plat.o +obj-$(CONFIG_I2C_AMD_ASF) += i2c-amd-asf-plat.o obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o -i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o -ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y) - i2c-at91-objs += i2c-at91-slave.o -endif +i2c-at91-y := i2c-at91-core.o i2c-at91-master.o +i2c-at91-$(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL) += i2c-at91-slave.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o @@ -104,6 +101,7 @@ obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o obj-$(CONFIG_I2C_QUP) += i2c-qup.o obj-$(CONFIG_I2C_RIIC) += i2c-riic.o obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o +obj-$(CONFIG_I2C_RTL9300) += i2c-rtl9300.o obj-$(CONFIG_I2C_RZV2M) += i2c-rzv2m.o obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o @@ -112,8 +110,8 @@ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o -i2c-stm32f7-drv-objs := i2c-stm32f7.o i2c-stm32.o obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7-drv.o +i2c-stm32f7-drv-y := i2c-stm32f7.o i2c-stm32.o obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o obj-$(CONFIG_I2C_SYNQUACER) += i2c-synquacer.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o @@ -122,10 +120,10 @@ obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o obj-$(CONFIG_I2C_WMT) += i2c-viai2c-wmt.o i2c-viai2c-common.o -i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o -i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o +i2c-octeon-y := i2c-octeon-core.o i2c-octeon-platdrv.o obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o +i2c-thunderx-y := i2c-octeon-core.o i2c-thunderx-pcidrv.o obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index f4dde08a3b92..2da73173ce24 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -482,7 +482,7 @@ MODULE_DEVICE_TABLE(of, altr_i2c_of_match); static struct platform_driver altr_i2c_driver = { .probe = altr_i2c_probe, - .remove_new = altr_i2c_remove, + .remove = altr_i2c_remove, .driver = { .name = "altera-i2c", .of_match_table = altr_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c new file mode 100644 index 000000000000..ba47df5370c7 --- /dev/null +++ b/drivers/i2c/busses/i2c-amd-asf-plat.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Alert Standard Format Platform Driver + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + * Sanket Goswami <Sanket.Goswami@amd.com> + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/devm-helpers.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/sprintf.h> + +#include "i2c-piix4.h" + +/* ASF register bits */ +#define ASF_SLV_LISTN 0 +#define ASF_SLV_INTR 1 +#define ASF_SLV_RST 4 +#define ASF_PEC_SP 5 +#define ASF_DATA_EN 7 +#define ASF_MSTR_EN 16 +#define ASF_CLK_EN 17 + +/* ASF address offsets */ +#define ASFINDEX (0x07 + piix4_smba) +#define ASFLISADDR (0x09 + piix4_smba) +#define ASFSTA (0x0A + piix4_smba) +#define ASFSLVSTA (0x0D + piix4_smba) +#define ASFDATARWPTR (0x11 + piix4_smba) +#define ASFSETDATARDPTR (0x12 + piix4_smba) +#define ASFDATABNKSEL (0x13 + piix4_smba) +#define ASFSLVEN (0x15 + piix4_smba) + +#define ASF_BLOCK_MAX_BYTES 72 +#define ASF_ERROR_STATUS GENMASK(3, 1) + +struct amd_asf_dev { + struct i2c_adapter adap; + void __iomem *eoi_base; + struct i2c_client *target; + struct delayed_work work_buf; + struct sb800_mmio_cfg mmio_cfg; + struct resource *port_addr; +}; + +static void amd_asf_process_target(struct work_struct *work) +{ + struct amd_asf_dev *dev = container_of(work, struct amd_asf_dev, work_buf.work); + unsigned short piix4_smba = dev->port_addr->start; + u8 data[ASF_BLOCK_MAX_BYTES]; + u8 bank, reg, cmd; + u8 len = 0, idx, val; + + /* Read target status register */ + reg = inb_p(ASFSLVSTA); + + /* Check if no error bits are set in target status register */ + if (reg & ASF_ERROR_STATUS) { + /* Set bank as full */ + cmd = 0; + reg |= GENMASK(3, 2); + outb_p(reg, ASFDATABNKSEL); + } else { + /* Read data bank */ + reg = inb_p(ASFDATABNKSEL); + bank = (reg & BIT(3)) ? 1 : 0; + + /* Set read data bank */ + if (bank) { + reg |= BIT(4); + reg &= ~BIT(3); + } else { + reg &= ~BIT(4); + reg &= ~BIT(2); + } + + /* Read command register */ + outb_p(reg, ASFDATABNKSEL); + cmd = inb_p(ASFINDEX); + len = inb_p(ASFDATARWPTR); + for (idx = 0; idx < len; idx++) + data[idx] = inb_p(ASFINDEX); + + /* Clear data bank status */ + if (bank) { + reg |= BIT(3); + outb_p(reg, ASFDATABNKSEL); + } else { + reg |= BIT(2); + outb_p(reg, ASFDATABNKSEL); + } + } + + outb_p(0, ASFSETDATARDPTR); + if (cmd & BIT(0)) + return; + + /* + * Although i2c_slave_event() returns an appropriate error code, we + * don't check it here because we're operating in the workqueue context. + */ + i2c_slave_event(dev->target, I2C_SLAVE_WRITE_REQUESTED, &val); + for (idx = 0; idx < len; idx++) { + val = data[idx]; + i2c_slave_event(dev->target, I2C_SLAVE_WRITE_RECEIVED, &val); + } + i2c_slave_event(dev->target, I2C_SLAVE_STOP, &val); +} + +static void amd_asf_update_ioport_target(unsigned short piix4_smba, u8 bit, + unsigned long offset, bool set) +{ + unsigned long reg; + + reg = inb_p(offset); + __assign_bit(bit, ®, set); + outb_p(reg, offset); +} + +static void amd_asf_update_mmio_target(struct amd_asf_dev *dev, u8 bit, bool set) +{ + unsigned long reg; + + reg = ioread32(dev->mmio_cfg.addr); + __assign_bit(bit, ®, set); + iowrite32(reg, dev->mmio_cfg.addr); +} + +static void amd_asf_setup_target(struct amd_asf_dev *dev) +{ + unsigned short piix4_smba = dev->port_addr->start; + + /* Reset both host and target before setting up */ + outb_p(0, SMBHSTSTS); + outb_p(0, ASFSLVSTA); + outb_p(0, ASFSTA); + + /* Update target address */ + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, true); + /* Enable target and set the clock */ + amd_asf_update_mmio_target(dev, ASF_MSTR_EN, false); + amd_asf_update_mmio_target(dev, ASF_CLK_EN, true); + /* Enable target interrupt */ + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, true); + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, false); + /* Enable PEC and PEC append */ + amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true); + amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true); +} + +static int amd_asf_access(struct i2c_adapter *adap, u16 addr, u8 command, u8 *data) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(adap); + unsigned short piix4_smba = dev->port_addr->start; + u8 i, len; + + outb_p((addr << 1), SMBHSTADD); + outb_p(command, SMBHSTCMD); + len = data[0]; + if (len == 0 || len > ASF_BLOCK_MAX_BYTES) + return -EINVAL; + + outb_p(len, SMBHSTDAT0); + /* Reset SMBBLKDAT */ + inb_p(SMBHSTCNT); + for (i = 1; i <= len; i++) + outb_p(data[i], SMBBLKDAT); + + outb_p(PIIX4_BLOCK_DATA, SMBHSTCNT); + /* Enable PEC and PEC append */ + amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true); + amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true); + + return piix4_transaction(adap, piix4_smba); +} + +static int amd_asf_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(adap); + unsigned short piix4_smba = dev->port_addr->start; + u8 asf_data[ASF_BLOCK_MAX_BYTES]; + struct i2c_msg *dev_msgs = msgs; + u8 prev_port; + int ret; + + if (msgs->flags & I2C_M_RD) { + dev_err(&adap->dev, "ASF: Read not supported\n"); + return -EOPNOTSUPP; + } + + /* Exclude the receive header and PEC */ + if (msgs->len > ASF_BLOCK_MAX_BYTES - 3) { + dev_warn(&adap->dev, "ASF: max message length exceeded\n"); + return -EOPNOTSUPP; + } + + asf_data[0] = dev_msgs->len; + memcpy(asf_data + 1, dev_msgs[0].buf, dev_msgs->len); + + ret = piix4_sb800_region_request(&adap->dev, &dev->mmio_cfg); + if (ret) + return ret; + + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true); + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, false); + /* Clear ASF target status */ + outb_p(0, ASFSLVSTA); + + /* Enable ASF SMBus controller function */ + amd_asf_update_mmio_target(dev, ASF_MSTR_EN, true); + prev_port = piix4_sb800_port_sel(0, &dev->mmio_cfg); + ret = amd_asf_access(adap, msgs->addr, msgs[0].buf[0], asf_data); + piix4_sb800_port_sel(prev_port, &dev->mmio_cfg); + amd_asf_setup_target(dev); + piix4_sb800_region_release(&adap->dev, &dev->mmio_cfg); + return ret; +} + +static int amd_asf_reg_target(struct i2c_client *target) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter); + unsigned short piix4_smba = dev->port_addr->start; + int ret; + u8 reg; + + if (dev->target) + return -EBUSY; + + ret = piix4_sb800_region_request(&target->dev, &dev->mmio_cfg); + if (ret) + return ret; + + reg = (target->addr << 1) | I2C_M_RD; + outb_p(reg, ASFLISADDR); + + amd_asf_setup_target(dev); + dev->target = target; + amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, ASFDATABNKSEL, false); + piix4_sb800_region_release(&target->dev, &dev->mmio_cfg); + + return 0; +} + +static int amd_asf_unreg_target(struct i2c_client *target) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter); + unsigned short piix4_smba = dev->port_addr->start; + + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, false); + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true); + dev->target = NULL; + + return 0; +} + +static u32 amd_asf_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_PEC | I2C_FUNC_SLAVE; +} + +static const struct i2c_algorithm amd_asf_smbus_algorithm = { + .master_xfer = amd_asf_xfer, + .reg_slave = amd_asf_reg_target, + .unreg_slave = amd_asf_unreg_target, + .functionality = amd_asf_func, +}; + +static irqreturn_t amd_asf_irq_handler(int irq, void *ptr) +{ + struct amd_asf_dev *dev = ptr; + unsigned short piix4_smba = dev->port_addr->start; + u8 target_int = inb_p(ASFSTA); + + if (target_int & BIT(6)) { + /* Target Interrupt */ + outb_p(target_int | BIT(6), ASFSTA); + schedule_delayed_work(&dev->work_buf, HZ); + } else { + /* Controller Interrupt */ + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true); + } + + return IRQ_HANDLED; +} + +static int amd_asf_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct amd_asf_dev *asf_dev; + struct resource *eoi_addr; + int ret, irq; + + asf_dev = devm_kzalloc(dev, sizeof(*asf_dev), GFP_KERNEL); + if (!asf_dev) + return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n"); + + asf_dev->mmio_cfg.use_mmio = true; + asf_dev->port_addr = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!asf_dev->port_addr) + return dev_err_probe(dev, -EINVAL, "missing IO resources\n"); + + /* + * The resource obtained via ACPI might not belong to the ASF device address space. Instead, + * it could be within other IP blocks of the ASIC, which are crucial for generating + * subsequent interrupts. Therefore, we avoid using devm_platform_ioremap_resource() and + * use platform_get_resource() and devm_ioremap() separately to prevent any address space + * conflicts. + */ + eoi_addr = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!eoi_addr) + return dev_err_probe(dev, -EINVAL, "missing MEM resources\n"); + + asf_dev->eoi_base = devm_ioremap(dev, eoi_addr->start, resource_size(eoi_addr)); + if (!asf_dev->eoi_base) + return dev_err_probe(dev, -EBUSY, "failed mapping IO region\n"); + + ret = devm_delayed_work_autocancel(dev, &asf_dev->work_buf, amd_asf_process_target); + if (ret) + return dev_err_probe(dev, ret, "failed to create work queue\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "missing IRQ resources\n"); + + ret = devm_request_irq(dev, irq, amd_asf_irq_handler, IRQF_SHARED, "amd_asf", asf_dev); + if (ret) + return dev_err_probe(dev, ret, "Unable to request irq: %d for use\n", irq); + + asf_dev->adap.owner = THIS_MODULE; + asf_dev->adap.algo = &amd_asf_smbus_algorithm; + asf_dev->adap.dev.parent = dev; + + i2c_set_adapdata(&asf_dev->adap, asf_dev); + snprintf(asf_dev->adap.name, sizeof(asf_dev->adap.name), "AMD ASF adapter"); + + return devm_i2c_add_adapter(dev, &asf_dev->adap); +} + +static const struct acpi_device_id amd_asf_acpi_ids[] = { + { "AMDI001A" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_asf_acpi_ids); + +static struct platform_driver amd_asf_driver = { + .driver = { + .name = "i2c-amd-asf", + .acpi_match_table = amd_asf_acpi_ids, + }, + .probe = amd_asf_probe, +}; +module_platform_driver(amd_asf_driver); + +MODULE_IMPORT_NS(PIIX4_SMBUS); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD Alert Standard Format Driver"); diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c index 6f0ef587e76d..d9dd0e475d1a 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-plat.c +++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c @@ -346,7 +346,7 @@ MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match); static struct platform_driver i2c_amd_plat_driver = { .probe = i2c_amd_probe, - .remove_new = i2c_amd_remove, + .remove = i2c_amd_remove, .driver = { .name = "i2c_amd_mp2", .acpi_match_table = ACPI_PTR(i2c_amd_acpi_match), diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c deleted file mode 100644 index 063274388a75..000000000000 --- a/drivers/i2c/busses/i2c-amd756-s4882.c +++ /dev/null @@ -1,245 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard - * - * Copyright (C) 2004, 2008 Jean Delvare <jdelvare@suse.de> - */ - -/* - * We select the channels by sending commands to the Philips - * PCA9556 chip at I2C address 0x18. The main adapter is used for - * the non-multiplexed part of the bus, and 4 virtual adapters - * are defined for the multiplexed addresses: 0x50-0x53 (memory - * module EEPROM) located on channels 1-4, and 0x4c (LM63) - * located on multiplexed channels 0 and 5-7. We define one - * virtual adapter per CPU, which corresponds to two multiplexed - * channels: - * CPU0: virtual adapter 1, channels 1 and 0 - * CPU1: virtual adapter 2, channels 2 and 5 - * CPU2: virtual adapter 3, channels 3 and 6 - * CPU3: virtual adapter 4, channels 4 and 7 - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/i2c.h> -#include <linux/mutex.h> - -extern struct i2c_adapter amd756_smbus; - -static struct i2c_adapter *s4882_adapter; -static struct i2c_algorithm *s4882_algo; - -/* Wrapper access functions for multiplexed SMBus */ -static DEFINE_MUTEX(amd756_lock); - -static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - int error; - - /* We exclude the multiplexed addresses */ - if (addr == 0x4c || (addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 - || addr == 0x18) - return -ENXIO; - - mutex_lock(&amd756_lock); - - error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - - mutex_unlock(&amd756_lock); - - return error; -} - -/* We remember the last used channels combination so as to only switch - channels when it is really needed. This greatly reduces the SMBus - overhead, but also assumes that nobody will be writing to the PCA9556 - in our back. */ -static u8 last_channels; - -static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data, - u8 channels) -{ - int error; - - /* We exclude the non-multiplexed addresses */ - if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) - return -ENXIO; - - mutex_lock(&amd756_lock); - - if (last_channels != channels) { - union i2c_smbus_data mplxdata; - mplxdata.byte = channels; - - error = amd756_smbus.algo->smbus_xfer(adap, 0x18, 0, - I2C_SMBUS_WRITE, 0x01, - I2C_SMBUS_BYTE_DATA, - &mplxdata); - if (error) - goto UNLOCK; - last_channels = channels; - } - error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - -UNLOCK: - mutex_unlock(&amd756_lock); - return error; -} - -static s32 amd756_access_virt1(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU0: channels 1 and 0 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x03); -} - -static s32 amd756_access_virt2(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU1: channels 2 and 5 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x24); -} - -static s32 amd756_access_virt3(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU2: channels 3 and 6 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x48); -} - -static s32 amd756_access_virt4(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU3: channels 4 and 7 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x90); -} - -static int __init amd756_s4882_init(void) -{ - int i, error; - union i2c_smbus_data ioconfig; - - if (!amd756_smbus.dev.parent) - return -ENODEV; - - /* Configure the PCA9556 multiplexer */ - ioconfig.byte = 0x00; /* All I/O to output mode */ - error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, - I2C_SMBUS_BYTE_DATA, &ioconfig); - if (error) { - dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); - error = -EIO; - goto ERROR0; - } - - /* Unregister physical bus */ - i2c_del_adapter(&amd756_smbus); - - printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n"); - /* Define the 5 virtual adapters and algorithms structures */ - if (!(s4882_adapter = kcalloc(5, sizeof(struct i2c_adapter), - GFP_KERNEL))) { - error = -ENOMEM; - goto ERROR1; - } - if (!(s4882_algo = kcalloc(5, sizeof(struct i2c_algorithm), - GFP_KERNEL))) { - error = -ENOMEM; - goto ERROR2; - } - - /* Fill in the new structures */ - s4882_algo[0] = *(amd756_smbus.algo); - s4882_algo[0].smbus_xfer = amd756_access_virt0; - s4882_adapter[0] = amd756_smbus; - s4882_adapter[0].algo = s4882_algo; - s4882_adapter[0].dev.parent = amd756_smbus.dev.parent; - for (i = 1; i < 5; i++) { - s4882_algo[i] = *(amd756_smbus.algo); - s4882_adapter[i] = amd756_smbus; - snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name), - "SMBus 8111 adapter (CPU%d)", i-1); - s4882_adapter[i].algo = s4882_algo+i; - s4882_adapter[i].dev.parent = amd756_smbus.dev.parent; - } - s4882_algo[1].smbus_xfer = amd756_access_virt1; - s4882_algo[2].smbus_xfer = amd756_access_virt2; - s4882_algo[3].smbus_xfer = amd756_access_virt3; - s4882_algo[4].smbus_xfer = amd756_access_virt4; - - /* Register virtual adapters */ - for (i = 0; i < 5; i++) { - error = i2c_add_adapter(s4882_adapter+i); - if (error) { - printk(KERN_ERR "i2c-amd756-s4882: " - "Virtual adapter %d registration " - "failed, module not inserted\n", i); - for (i--; i >= 0; i--) - i2c_del_adapter(s4882_adapter+i); - goto ERROR3; - } - } - - return 0; - -ERROR3: - kfree(s4882_algo); - s4882_algo = NULL; -ERROR2: - kfree(s4882_adapter); - s4882_adapter = NULL; -ERROR1: - /* Restore physical bus */ - i2c_add_adapter(&amd756_smbus); -ERROR0: - return error; -} - -static void __exit amd756_s4882_exit(void) -{ - if (s4882_adapter) { - int i; - - for (i = 0; i < 5; i++) - i2c_del_adapter(s4882_adapter+i); - kfree(s4882_adapter); - s4882_adapter = NULL; - } - kfree(s4882_algo); - s4882_algo = NULL; - - /* Restore physical bus */ - if (i2c_add_adapter(&amd756_smbus)) - printk(KERN_ERR "i2c-amd756-s4882: " - "Physical bus restoration failed\n"); -} - -MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); -MODULE_DESCRIPTION("S4882 SMBus multiplexing"); -MODULE_LICENSE("GPL"); - -module_init(amd756_s4882_init); -module_exit(amd756_s4882_exit); diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index 208310db906d..fa0d5a2c3732 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c @@ -283,7 +283,7 @@ static const struct i2c_algorithm smbus_algorithm = { .functionality = amd756_func, }; -struct i2c_adapter amd756_smbus = { +static struct i2c_adapter amd756_smbus = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON, .algo = &smbus_algorithm, @@ -398,5 +398,3 @@ module_pci_driver(amd756_driver); MODULE_AUTHOR("Merlin Hughes <merlin@merlin.org>"); MODULE_DESCRIPTION("AMD756/766/768/8111 and nVidia nForce SMBus driver"); MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(amd756_smbus); diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index cc5a26637fd5..1550d3d552ae 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -1102,7 +1102,7 @@ static void aspeed_i2c_remove_bus(struct platform_device *pdev) static struct platform_driver aspeed_i2c_bus_driver = { .probe = aspeed_i2c_probe_bus, - .remove_new = aspeed_i2c_remove_bus, + .remove = aspeed_i2c_remove_bus, .driver = { .name = "aspeed-i2c-bus", .of_match_table = aspeed_i2c_bus_of_table, diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index dc52b3530725..edc047e3e535 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -330,7 +330,7 @@ static const struct dev_pm_ops __maybe_unused at91_twi_pm = { static struct platform_driver at91_twi_driver = { .probe = at91_twi_probe, - .remove_new = at91_twi_remove, + .remove = at91_twi_remove, .id_table = at91_twi_devtypes, .driver = { .name = "at91_i2c", diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c index 902e420e761e..b78b38ddac46 100644 --- a/drivers/i2c/busses/i2c-au1550.c +++ b/drivers/i2c/busses/i2c-au1550.c @@ -368,7 +368,7 @@ static struct platform_driver au1xpsc_smbus_driver = { .pm = pm_sleep_ptr(&i2c_au1550_pmops), }, .probe = i2c_au1550_probe, - .remove_new = i2c_au1550_remove, + .remove = i2c_au1550_remove, }; module_platform_driver(au1xpsc_smbus_driver); diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index a66f7f67b3b8..48916cf45ff7 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -824,7 +824,7 @@ MODULE_DEVICE_TABLE(of, axxia_i2c_of_match); static struct platform_driver axxia_i2c_driver = { .probe = axxia_i2c_probe, - .remove_new = axxia_i2c_remove, + .remove = axxia_i2c_remove, .driver = { .name = "axxia-i2c", .of_match_table = axxia_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 133d02899c6b..15b632a146e1 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -1264,7 +1264,7 @@ static struct platform_driver bcm_iproc_i2c_driver = { .pm = pm_sleep_ptr(&bcm_iproc_i2c_pm_ops), }, .probe = bcm_iproc_i2c_probe, - .remove_new = bcm_iproc_i2c_remove, + .remove = bcm_iproc_i2c_remove, }; module_platform_driver(bcm_iproc_i2c_driver); diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index eb5c46a8f824..340fe1305dd9 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -877,7 +877,7 @@ static struct platform_driver bcm_kona_i2c_driver = { .of_match_table = bcm_kona_i2c_of_match, }, .probe = bcm_kona_i2c_probe, - .remove_new = bcm_kona_i2c_remove, + .remove = bcm_kona_i2c_remove, }; module_platform_driver(bcm_kona_i2c_driver); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ae42e37052a8..8554e790f8e3 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match); static struct platform_driver bcm2835_i2c_driver = { .probe = bcm2835_i2c_probe, - .remove_new = bcm2835_i2c_remove, + .remove = bcm2835_i2c_remove, .driver = { .name = "i2c-bcm2835", .of_match_table = bcm2835_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 83b85011e377..00f1a046e985 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -744,7 +744,7 @@ static struct platform_driver brcmstb_i2c_driver = { .pm = pm_sleep_ptr(&brcmstb_i2c_pm), }, .probe = brcmstb_i2c_probe, - .remove_new = brcmstb_i2c_remove, + .remove = brcmstb_i2c_remove, }; module_platform_driver(brcmstb_i2c_driver); diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 87b9ba95b2e1..b64026fbca66 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -129,6 +129,7 @@ #define CDNS_I2C_BROKEN_HOLD_BIT BIT(0) #define CDNS_I2C_POLL_US 100000 +#define CDNS_I2C_POLL_US_ATOMIC 10 #define CDNS_I2C_TIMEOUT_US 500000 #define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) @@ -189,6 +190,8 @@ enum cdns_i2c_slave_state { * @slave_state: I2C Slave state(idle/read/write). * @fifo_depth: The depth of the transfer FIFO * @transfer_size: The maximum number of bytes in one transfer + * @atomic: Mode of transfer + * @err_status_atomic: Error status in atomic mode */ struct cdns_i2c { struct device *dev; @@ -219,6 +222,8 @@ struct cdns_i2c { #endif u32 fifo_depth; unsigned int transfer_size; + bool atomic; + int err_status_atomic; }; struct cdns_platform_data { @@ -229,6 +234,66 @@ struct cdns_platform_data { clk_rate_change_nb) /** + * cdns_i2c_init - Controller initialisation + * @id: Device private data structure + * + * Initialise the i2c controller. + * + */ +static void cdns_i2c_init(struct cdns_i2c *id) +{ + cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET); + /* + * Cadence I2C controller has a bug wherein it generates + * invalid read transaction after HW timeout in master receiver mode. + * HW timeout is not used by this driver and the interrupt is disabled. + * But the feature itself cannot be disabled. Hence maximum value + * is written to this register to reduce the chances of error. + */ + cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); +} + +/** + * cdns_i2c_runtime_suspend - Runtime suspend method for the driver + * @dev: Address of the platform_device structure + * + * Put the driver into low power mode. + * + * Return: 0 always + */ +static int cdns_i2c_runtime_suspend(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + + clk_disable(xi2c->clk); + + return 0; +} + +/** + * cdns_i2c_runtime_resume - Runtime resume + * @dev: Address of the platform_device structure + * + * Runtime resume callback. + * + * Return: 0 on success and error value on error + */ +static int cdns_i2c_runtime_resume(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(xi2c->clk); + if (ret) { + dev_err(dev, "Cannot enable clock.\n"); + return ret; + } + cdns_i2c_init(xi2c); + + return 0; +} + +/** * cdns_i2c_clear_bus_hold - Clear bus hold bit * @id: Pointer to driver data struct * @@ -561,6 +626,89 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) return cdns_i2c_master_isr(ptr); } +static bool cdns_i2c_error_check(struct cdns_i2c *id) +{ + unsigned int isr_status; + + id->err_status = 0; + + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status & CDNS_I2C_IXR_ERR_INTR_MASK, CDNS_I2C_ISR_OFFSET); + + id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK; + + return !!id->err_status; +} + +static void cdns_i2c_mrecv_atomic(struct cdns_i2c *id) +{ + while (id->recv_count > 0) { + bool updatetx; + + /* + * Check if transfer size register needs to be updated again for a + * large data receive operation. + */ + updatetx = id->recv_count > id->curr_recv_count; + + while (id->curr_recv_count > 0) { + if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_RXDV) { + *id->p_recv_buf = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + id->p_recv_buf++; + id->recv_count--; + id->curr_recv_count--; + + /* + * Clear the hold bit that was set for FIFO control, + * if the remaining RX data is less than or equal to + * the FIFO depth, unless a repeated start is selected. + */ + if (id->recv_count <= id->fifo_depth && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + } + if (cdns_i2c_error_check(id)) + return; + if (cdns_is_holdquirk(id, updatetx)) + break; + } + + /* + * The controller sends NACK to the slave/target when transfer size + * register reaches zero without considering the HOLD bit. + * This workaround is implemented for large data transfers to + * maintain transfer size non-zero while performing a large + * receive operation. + */ + if (cdns_is_holdquirk(id, updatetx)) { + /* wait while fifo is full */ + while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != + (id->curr_recv_count - id->fifo_depth)) + ; + + /* + * Check number of bytes to be received against maximum + * transfer size and update register accordingly. + */ + if ((id->recv_count - id->fifo_depth) > + id->transfer_size) { + cdns_i2c_writereg(id->transfer_size, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->transfer_size + + id->fifo_depth; + } else { + cdns_i2c_writereg(id->recv_count - + id->fifo_depth, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->recv_count; + } + } + } + + /* Clear hold (if not repeated start) */ + if (!id->recv_count && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); +} + /** * cdns_i2c_mrecv - Prepare and start a master receive operation * @id: pointer to the i2c device structure @@ -655,7 +803,34 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET); } - cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + if (!id->atomic) + cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + else + cdns_i2c_mrecv_atomic(id); +} + +static void cdns_i2c_msend_rem_atomic(struct cdns_i2c *id) +{ + while (id->send_count) { + unsigned int avail_bytes; + unsigned int bytes_to_send; + + avail_bytes = id->fifo_depth - cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + if (id->send_count > avail_bytes) + bytes_to_send = avail_bytes; + else + bytes_to_send = id->send_count; + + while (bytes_to_send--) { + cdns_i2c_writereg((*id->p_send_buf++), CDNS_I2C_DATA_OFFSET); + id->send_count--; + } + if (cdns_i2c_error_check(id)) + return; + } + + if (!id->send_count && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); } /** @@ -718,7 +893,10 @@ static void cdns_i2c_msend(struct cdns_i2c *id) cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, CDNS_I2C_ADDR_OFFSET); - cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + if (!id->atomic) + cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + else if (id->send_count > 0) + cdns_i2c_msend_rem_atomic(id); } /** @@ -758,7 +936,8 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, id->p_msg = msg; id->err_status = 0; - reinit_completion(&id->xfer_done); + if (!id->atomic) + reinit_completion(&id->xfer_done); /* Check for the TEN Bit mode on each msg */ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); @@ -780,14 +959,31 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, /* Minimal time to execute this message */ msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk); - /* Plus some wiggle room */ - msg_timeout += msecs_to_jiffies(500); + + /* + * Plus some wiggle room. + * For non-atomic contexts, 500 ms is added to the timeout. + * For atomic contexts, 2000 ms is added because transfers happen in polled + * mode, requiring more time to account for the polling overhead. + */ + if (!id->atomic) + msg_timeout += msecs_to_jiffies(500); + else + msg_timeout += msecs_to_jiffies(2000); if (msg_timeout < adap->timeout) msg_timeout = adap->timeout; - /* Wait for the signal of completion */ - time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); + if (!id->atomic) { + /* Wait for the signal of completion */ + time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); + } else { + /* 0 is success, -ETIMEDOUT is error */ + time_left = !readl_poll_timeout_atomic(id->membase + CDNS_I2C_ISR_OFFSET, + reg, (reg & CDNS_I2C_IXR_COMP), + CDNS_I2C_POLL_US_ATOMIC, msg_timeout); + } + if (time_left == 0) { cdns_i2c_master_reset(adap); return -ETIMEDOUT; @@ -806,58 +1002,31 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, return 0; } -/** - * cdns_i2c_master_xfer - The main i2c transfer function - * @adap: pointer to the i2c adapter driver instance - * @msgs: pointer to the i2c message structure - * @num: the number of messages to transfer - * - * Initiates the send/recv activity based on the transfer message received. - * - * Return: number of msgs processed on success, negative error otherwise - */ -static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num) +static int cdns_i2c_master_common_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, + int num) { int ret, count; u32 reg; struct cdns_i2c *id = adap->algo_data; bool hold_quirk; -#if IS_ENABLED(CONFIG_I2C_SLAVE) - bool change_role = false; -#endif - - ret = pm_runtime_resume_and_get(id->dev); - if (ret < 0) - return ret; - -#if IS_ENABLED(CONFIG_I2C_SLAVE) - /* Check i2c operating mode and switch if possible */ - if (id->dev_mode == CDNS_I2C_MODE_SLAVE) { - if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) { - ret = -EAGAIN; - goto out; - } - - /* Set mode to master */ - cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); - - /* Mark flag to change role once xfer is completed */ - change_role = true; - } -#endif /* Check if the bus is free */ - - ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET, - reg, - !(reg & CDNS_I2C_SR_BA), - CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US); + if (!id->atomic) + ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET, + reg, + !(reg & CDNS_I2C_SR_BA), + CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US); + else + ret = readl_poll_timeout_atomic(id->membase + CDNS_I2C_SR_OFFSET, + reg, + !(reg & CDNS_I2C_SR_BA), + CDNS_I2C_POLL_US_ATOMIC, CDNS_I2C_TIMEOUT_US); if (ret) { ret = -EAGAIN; if (id->adap.bus_recovery_info) i2c_recover_bus(adap); - goto out; + return ret; } hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT); @@ -877,8 +1046,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, if (msgs[count].flags & I2C_M_RD) { dev_warn(adap->dev.parent, "Can't do repeated start after a receive message\n"); - ret = -EOPNOTSUPP; - goto out; + return -EOPNOTSUPP; } } id->bus_hold_flag = 1; @@ -896,26 +1064,65 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, ret = cdns_i2c_process_msg(id, msgs, adap); if (ret) - goto out; + return ret; /* Report the other error interrupts to application */ - if (id->err_status) { + if (id->err_status || id->err_status_atomic) { cdns_i2c_master_reset(adap); - if (id->err_status & CDNS_I2C_IXR_NACK) { - ret = -ENXIO; - goto out; - } - ret = -EIO; - goto out; + if (id->err_status & CDNS_I2C_IXR_NACK) + return -ENXIO; + + return -EIO; } } + return 0; +} + +/** + * cdns_i2c_master_xfer - The main i2c transfer function + * @adap: pointer to the i2c adapter driver instance + * @msgs: pointer to the i2c message structure + * @num: the number of messages to transfer + * + * Initiates the send/recv activity based on the transfer message received. + * + * Return: number of msgs processed on success, negative error otherwise + */ +static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + int ret; + struct cdns_i2c *id = adap->algo_data; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + bool change_role = false; +#endif - ret = num; + ret = pm_runtime_resume_and_get(id->dev); + if (ret < 0) + return ret; -out: +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Check i2c operating mode and switch if possible */ + if (id->dev_mode == CDNS_I2C_MODE_SLAVE) { + if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) { + ret = -EAGAIN; + goto out; + } + + /* Set mode to master */ + cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); + + /* Mark flag to change role once xfer is completed */ + change_role = true; + } +#endif + ret = cdns_i2c_master_common_xfer(adap, msgs, num); + if (!ret) + ret = num; #if IS_ENABLED(CONFIG_I2C_SLAVE) +out: /* Switch i2c mode to slave */ if (change_role) cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); @@ -927,6 +1134,41 @@ out: } /** + * cdns_i2c_master_xfer_atomic - The i2c transfer function in atomic mode + * @adap: pointer to the i2c adapter driver instance + * @msgs: pointer to the i2c message structure + * @num: the number of messages to transfer + * + * Return: number of msgs processed on success, negative error otherwise + */ +static int cdns_i2c_master_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + int ret; + struct cdns_i2c *id = adap->algo_data; + + ret = cdns_i2c_runtime_resume(id->dev); + if (ret) + return ret; + + if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) { + dev_warn(id->adap.dev.parent, + "Atomic xfer not supported for version 1.0\n"); + return 0; + } + + id->atomic = true; + ret = cdns_i2c_master_common_xfer(adap, msgs, num); + if (!ret) + ret = num; + + id->atomic = false; + cdns_i2c_runtime_suspend(id->dev); + + return ret; +} + +/** * cdns_i2c_func - Returns the supported features of the I2C driver * @adap: pointer to the i2c adapter structure * @@ -990,6 +1232,7 @@ static int cdns_unreg_slave(struct i2c_client *slave) static const struct i2c_algorithm cdns_i2c_algo = { .master_xfer = cdns_i2c_master_xfer, + .master_xfer_atomic = cdns_i2c_master_xfer_atomic, .functionality = cdns_i2c_func, #if IS_ENABLED(CONFIG_I2C_SLAVE) .reg_slave = cdns_reg_slave, @@ -1158,23 +1401,6 @@ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long } } -/** - * cdns_i2c_runtime_suspend - Runtime suspend method for the driver - * @dev: Address of the platform_device structure - * - * Put the driver into low power mode. - * - * Return: 0 always - */ -static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) -{ - struct cdns_i2c *xi2c = dev_get_drvdata(dev); - - clk_disable(xi2c->clk); - - return 0; -} - static int __maybe_unused cdns_i2c_suspend(struct device *dev) { struct cdns_i2c *xi2c = dev_get_drvdata(dev); @@ -1187,49 +1413,6 @@ static int __maybe_unused cdns_i2c_suspend(struct device *dev) return 0; } -/** - * cdns_i2c_init - Controller initialisation - * @id: Device private data structure - * - * Initialise the i2c controller. - * - */ -static void cdns_i2c_init(struct cdns_i2c *id) -{ - cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET); - /* - * Cadence I2C controller has a bug wherein it generates - * invalid read transaction after HW timeout in master receiver mode. - * HW timeout is not used by this driver and the interrupt is disabled. - * But the feature itself cannot be disabled. Hence maximum value - * is written to this register to reduce the chances of error. - */ - cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); -} - -/** - * cdns_i2c_runtime_resume - Runtime resume - * @dev: Address of the platform_device structure - * - * Runtime resume callback. - * - * Return: 0 on success and error value on error - */ -static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev) -{ - struct cdns_i2c *xi2c = dev_get_drvdata(dev); - int ret; - - ret = clk_enable(xi2c->clk); - if (ret) { - dev_err(dev, "Cannot enable clock.\n"); - return ret; - } - cdns_i2c_init(xi2c); - - return 0; -} - static int __maybe_unused cdns_i2c_resume(struct device *dev) { struct cdns_i2c *xi2c = dev_get_drvdata(dev); @@ -1469,7 +1652,7 @@ static struct platform_driver cdns_i2c_drv = { .pm = &cdns_i2c_dev_pm_ops, }, .probe = cdns_i2c_probe, - .remove_new = cdns_i2c_remove, + .remove = cdns_i2c_remove, }; module_platform_driver(cdns_i2c_drv); diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c index fdc1758a3275..8065c7e4462e 100644 --- a/drivers/i2c/busses/i2c-cbus-gpio.c +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids); static struct platform_driver cbus_i2c_driver = { .probe = cbus_i2c_probe, - .remove_new = cbus_i2c_remove, + .remove = cbus_i2c_remove, .driver = { .name = "i2c-cbus-gpio", .of_match_table = of_match_ptr(i2c_cbus_dt_ids), diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 52e3000626c5..26a36a65521e 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -546,7 +546,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_i2c_adap_id_table); static struct platform_driver cht_wc_i2c_adap_driver = { .probe = cht_wc_i2c_adap_i2c_probe, - .remove_new = cht_wc_i2c_adap_i2c_remove, + .remove = cht_wc_i2c_adap_i2c_remove, .driver = { .name = "cht_wcove_ext_chgr", }, diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 4794ec066eb0..260e1643c2cc 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -701,7 +701,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match); static struct platform_driver cpm_i2c_driver = { .probe = cpm_i2c_probe, - .remove_new = cpm_i2c_remove, + .remove = cpm_i2c_remove, .driver = { .name = "fsl-i2c-cpm", .of_match_table = cpm_i2c_match, diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index ab2688bd4d33..43bf90d90eeb 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id); static struct platform_driver ec_i2c_tunnel_driver = { .probe = ec_i2c_probe, - .remove_new = ec_i2c_remove, + .remove = ec_i2c_remove, .driver = { .name = "cros-ec-i2c-tunnel", .acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id), diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index c4fb5e9ab506..71dc0a6688b7 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -935,7 +935,7 @@ MODULE_DEVICE_TABLE(platform, davinci_i2c_driver_ids); static struct platform_driver davinci_i2c_driver = { .probe = davinci_i2c_probe, - .remove_new = davinci_i2c_remove, + .remove = davinci_i2c_remove, .id_table = davinci_i2c_driver_ids, .driver = { .name = "i2c_davinci", diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c index 63454b06e5da..8fbd2a10c31a 100644 --- a/drivers/i2c/busses/i2c-designware-amdpsp.c +++ b/drivers/i2c/busses/i2c-designware-amdpsp.c @@ -155,7 +155,7 @@ static void psp_release_i2c_bus_deferred(struct work_struct *work) /* * If there is any pending transaction, cannot release the bus here. - * psp_release_i2c_bus will take care of this later. + * psp_release_i2c_bus() will take care of this later. */ if (psp_i2c_access_count) goto cleanup; @@ -210,12 +210,12 @@ static void psp_release_i2c_bus(void) { mutex_lock(&psp_i2c_access_mutex); - /* Return early if mailbox was malfunctional */ + /* Return early if mailbox was malfunctioned */ if (psp_i2c_mbox_fail) goto cleanup; /* - * If we are last owner of PSP semaphore, need to release aribtration + * If we are last owner of PSP semaphore, need to release arbitration * via mailbox. */ psp_i2c_access_count--; @@ -235,9 +235,9 @@ cleanup: /* * Locking methods are based on the default implementation from - * drivers/i2c/i2c-core-base.c, but with psp acquire and release operations + * drivers/i2c/i2c-core-base.c, but with PSP acquire and release operations * added. With this in place we can ensure that i2c clients on the bus shared - * with psp are able to lock HW access to the bus for arbitrary number of + * with PSP are able to lock HW access to the bus for arbitrary number of * operations - that is e.g. write-wait-read. */ static void i2c_adapter_dw_psp_lock_bus(struct i2c_adapter *adapter, diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 9d88b4fa03e4..857783d458fb 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -33,7 +33,7 @@ #include "i2c-designware-core.h" -static char *abort_sources[] = { +static const char *const abort_sources[] = { [ABRT_7B_ADDR_NOACK] = "slave address not acknowledged (7bit mode)", [ABRT_10ADDR1_NOACK] = @@ -127,6 +127,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val) * Autodetects needed register access mode and creates the regmap with * corresponding read/write callbacks. This must be called before doing any * other register access. + * + * Return: 0 on success, or negative errno otherwise. */ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) { @@ -174,7 +176,7 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) /* * Note we'll check the return value of the regmap IO accessors only * at the probe stage. The rest of the code won't do this because - * basically we have MMIO-based regmap so non of the read/write methods + * basically we have MMIO-based regmap, so none of the read/write methods * can fail. */ dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg); @@ -336,7 +338,7 @@ static u32 i2c_dw_acpi_round_bus_speed(struct device *device) acpi_speed = i2c_acpi_find_bus_speed(device); /* - * Some DSTDs use a non standard speed, round down to the lowest + * Some DSDTs use a non standard speed, round down to the lowest * standard speed. */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { @@ -380,6 +382,11 @@ int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev) i2c_parse_fw_timings(device, t, false); + if (device_property_read_u32(device, "snps,bus-capacitance-pf", &dev->bus_capacitance_pF)) + dev->bus_capacitance_pF = 100; + + dev->clk_freq_optimized = device_property_read_bool(device, "snps,clk-freq-optimized"); + i2c_dw_adjust_bus_speed(dev); if (is_of_node(fwnode)) @@ -407,47 +414,26 @@ static u32 i2c_dw_read_scl_reg(struct dw_i2c_dev *dev, u32 reg) } u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, - u32 tSYMBOL, u32 tf, int cond, int offset) + u32 tSYMBOL, u32 tf, int offset) { if (!ic_clk) return i2c_dw_read_scl_reg(dev, reg); /* - * DesignWare I2C core doesn't seem to have solid strategy to meet - * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec - * will result in violation of the tHD;STA spec. + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). */ - if (cond) - /* - * Conditional expression: - * - * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH - * - * This is based on the DW manuals, and represents an ideal - * configuration. The resulting I2C bus speed will be - * faster than any of the others. - * - * If your hardware is free from tHD;STA issue, try this one. - */ - return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) - - 8 + offset; - else - /* - * Conditional expression: - * - * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) - * - * This is just experimental rule; the tHD;STA period turned - * out to be proportinal to (_HCNT + 3). With this setting, - * we could meet both tHIGH and tHD;STA timing specs. - * - * If unsure, you'd better to take this alternative. - * - * The reason why we need to take into account "tf" here, - * is the same as described in i2c_dw_scl_lcnt(). - */ - return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - - 3 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset; } u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, @@ -467,8 +453,7 @@ u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ - return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - - 1 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - 1 + offset; } int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) @@ -571,7 +556,7 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev) /* * Wait 10 times the signaling period of the highest I2C - * transfer supported by the driver (for 400KHz this is + * transfer supported by the driver (for 400kHz this is * 25us) as described in the DesignWare I2C databook. */ usleep_range(25, 250); @@ -678,10 +663,10 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) if (abort_source & DW_IC_TX_ARB_LOST) return -EAGAIN; - else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) + if (abort_source & DW_IC_TX_ABRT_GCALL_READ) return -EINVAL; /* wrong msgs[] data */ - else - return -EIO; + + return -EIO; } int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 2d32896d0673..347843b4f5dd 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -143,10 +143,10 @@ #define DW_IC_SLAVE 1 /* - * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register + * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register. * - * Only expected abort codes are listed here - * refer to the datasheet for the full list + * Only expected abort codes are listed here, + * refer to the datasheet for the full list. */ #define ABRT_7B_ADDR_NOACK 0 #define ABRT_10ADDR1_NOACK 1 @@ -201,7 +201,7 @@ struct reset_control; * @rst: optional reset for the controller * @slave: represent an I2C slave device * @get_clk_rate_khz: callback to retrieve IP specific bus speed - * @cmd_err: run time hadware error code + * @cmd_err: run time hardware error code * @msgs: points to an array of messages currently being transferred * @msgs_num: the number of elements in msgs * @msg_write_idx: the element index of the current tx message in the msgs array @@ -237,11 +237,15 @@ struct reset_control; * @release_lock: function to release a hardware lock on the bus * @semaphore_idx: Index of table with semaphore type attached to the bus. It's * -1 if there is no semaphore. - * @shared_with_punit: true if this bus is shared with the SoCs PUNIT + * @shared_with_punit: true if this bus is shared with the SoC's PUNIT * @init: function to initialize the I2C hardware * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE * @rinfo: I²C GPIO recovery information + * @bus_capacitance_pF: bus capacitance in picofarads + * @clk_freq_optimized: if this value is true, it means the hardware reduces + * its internal clock frequency by reducing the internal latency required + * to generate the high period and low period of SCL line. * * HCNT and LCNT parameters can be used if the platform knows more accurate * values than the one computed based only on the input clock frequency. @@ -299,6 +303,8 @@ struct dw_i2c_dev { int (*set_sda_hold_time)(struct dw_i2c_dev *dev); int mode; struct i2c_bus_recovery_info rinfo; + u32 bus_capacitance_pF; + bool clk_freq_optimized; }; #define ACCESS_INTR_MASK BIT(0) @@ -329,7 +335,7 @@ struct i2c_dw_semaphore_callbacks { int i2c_dw_init_regmap(struct dw_i2c_dev *dev); u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, - u32 tSYMBOL, u32 tf, int cond, int offset); + u32 tSYMBOL, u32 tf, int offset); u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, u32 tLOW, u32 tf, int offset); int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e8ac9a7bf0b3..eca8998d640f 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -71,7 +71,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 4000, /* tHD;STA = tHIGH = 4.0 us */ sda_falling_time, - 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->ss_lcnt = i2c_dw_scl_lcnt(dev, @@ -105,7 +104,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 260, /* tHIGH = 260 ns */ sda_falling_time, - 0, /* DW default */ 0); /* No offset */ dev->fs_lcnt = i2c_dw_scl_lcnt(dev, @@ -129,7 +127,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 600, /* tHD;STA = tHIGH = 0.6 us */ sda_falling_time, - 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->fs_lcnt = i2c_dw_scl_lcnt(dev, @@ -154,20 +151,38 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) dev->hs_hcnt = 0; dev->hs_lcnt = 0; } else if (!dev->hs_hcnt || !dev->hs_lcnt) { + u32 t_high, t_low; + + /* + * The legal values stated in the databook for bus + * capacitance are only 100pF and 400pF. + * If dev->bus_capacitance_pF is greater than or equals + * to 400, t_high and t_low are assumed to be + * appropriate values for 400pF, otherwise 100pF. + */ + if (dev->bus_capacitance_pF >= 400) { + /* assume bus capacitance is 400pF */ + t_high = dev->clk_freq_optimized ? 160 : 120; + t_low = 320; + } else { + /* assume bus capacitance is 100pF */ + t_high = 60; + t_low = dev->clk_freq_optimized ? 120 : 160; + } + ic_clk = i2c_dw_clk_rate(dev); dev->hs_hcnt = i2c_dw_scl_hcnt(dev, DW_IC_HS_SCL_HCNT, ic_clk, - 160, /* tHIGH = 160 ns */ + t_high, sda_falling_time, - 0, /* DW default */ 0); /* No offset */ dev->hs_lcnt = i2c_dw_scl_lcnt(dev, DW_IC_HS_SCL_LCNT, ic_clk, - 320, /* tLOW = 320 ns */ + t_low, scl_falling_time, 0); /* No offset */ } @@ -184,12 +199,14 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) } /** - * i2c_dw_init_master() - Initialize the designware I2C master hardware + * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware * @dev: device private data * * This functions configures and enables the I2C master. * This function is called during I2C init function, and in case of timeout at * run time. + * + * Return: 0 on success, or negative errno otherwise. */ static int i2c_dw_init_master(struct dw_i2c_dev *dev) { @@ -357,7 +374,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, /* * Initiate the i2c read/write transaction of buffer length, * and poll for bus busy status. For the last message transfer, - * update the command with stopbit enable. + * update the command with stop bit enable. */ for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) @@ -402,7 +419,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, /* * Initiate (and continue) low level master read/write transaction. - * This function is only called from i2c_dw_isr, and pumping i2c_msg + * This function is only called from i2c_dw_isr(), and pumping i2c_msg * messages into the tx buffer. Even if the size of i2c_msg data is * longer than the size of the tx buffer, it handles everything. */ @@ -440,7 +457,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) buf = msgs[dev->msg_write_idx].buf; buf_len = msgs[dev->msg_write_idx].len; - /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and + /* + * If both IC_EMPTYFIFO_HOLD_MASTER_EN and * IC_RESTART_EN are set, we must manually * set restart bit between messages. */ @@ -971,7 +989,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; adap->bus_recovery_info = rinfo; - dev_info(dev->dev, "running with gpio recovery mode! scl%s", + dev_info(dev->dev, "running with GPIO recovery mode! scl%s", rinfo->sda_gpiod ? ",sda" : ""); return 0; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 7b2c5d71a7fc..38265c3dc454 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -51,7 +51,7 @@ struct dw_scl_sda_cfg { u16 fs_hcnt; u16 ss_lcnt; u16 fs_lcnt; - u32 sda_hold; + u32 sda_hold_time; }; struct dw_pci_controller { @@ -76,7 +76,7 @@ static struct dw_scl_sda_cfg byt_config = { .fs_hcnt = 0x55, .ss_lcnt = 0x200, .fs_lcnt = 0x99, - .sda_hold = 0x6, + .sda_hold_time = 0x6, }; /* Haswell HCNT/LCNT/SDA hold time */ @@ -85,14 +85,14 @@ static struct dw_scl_sda_cfg hsw_config = { .fs_hcnt = 0x48, .ss_lcnt = 0x01fb, .fs_lcnt = 0xa0, - .sda_hold = 0x9, + .sda_hold_time = 0x9, }; /* NAVI-AMD HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg navi_amd_config = { .ss_hcnt = 0x1ae, .ss_lcnt = 0x23a, - .sda_hold = 0x9, + .sda_hold_time = 0x9, }; static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev) @@ -207,6 +207,7 @@ static const struct software_node dgpu_node = { static int i2c_dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct device *device = &pdev->dev; struct dw_i2c_dev *dev; struct i2c_adapter *adap; int r; @@ -214,25 +215,22 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, struct dw_scl_sda_cfg *cfg; if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) - return dev_err_probe(&pdev->dev, -EINVAL, - "Invalid driver data %ld\n", + return dev_err_probe(device, -EINVAL, "Invalid driver data %ld\n", id->driver_data); controller = &dw_pci_controllers[id->driver_data]; r = pcim_enable_device(pdev); if (r) - return dev_err_probe(&pdev->dev, r, - "Failed to enable I2C PCI device\n"); + return dev_err_probe(device, r, "Failed to enable I2C PCI device\n"); pci_set_master(pdev); r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); if (r) - return dev_err_probe(&pdev->dev, r, - "I/O memory remapping failed\n"); + return dev_err_probe(device, r, "I/O memory remapping failed\n"); - dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -242,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->get_clk_rate_khz = controller->get_clk_rate_khz; dev->base = pcim_iomap_table(pdev)[0]; - dev->dev = &pdev->dev; + dev->dev = device; dev->irq = pci_irq_vector(pdev, 0); dev->flags |= controller->flags; @@ -266,7 +264,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->fs_hcnt = cfg->fs_hcnt; dev->ss_lcnt = cfg->ss_lcnt; dev->fs_lcnt = cfg->fs_lcnt; - dev->sda_hold_time = cfg->sda_hold; + dev->sda_hold_time = cfg->sda_hold_time; } adap = &dev->adapter; @@ -281,14 +279,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); if (IS_ERR(dev->slave)) - return dev_err_probe(dev->dev, PTR_ERR(dev->slave), + return dev_err_probe(device, PTR_ERR(dev->slave), "register UCSI failed\n"); } - pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); - pm_runtime_allow(&pdev->dev); + pm_runtime_set_autosuspend_delay(device, 1000); + pm_runtime_use_autosuspend(device); + pm_runtime_put_autosuspend(device); + pm_runtime_allow(device); return 0; } @@ -296,11 +294,12 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, static void i2c_dw_pci_remove(struct pci_dev *pdev) { struct dw_i2c_dev *dev = pci_get_drvdata(pdev); + struct device *device = &pdev->dev; i2c_dw_disable(dev); - pm_runtime_forbid(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); + pm_runtime_forbid(device); + pm_runtime_get_noresume(device); i2c_del_adapter(&dev->adapter); } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2d0c7348e491..3e12aab6bf2d 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -72,7 +72,7 @@ static int bt1_i2c_write(void *context, unsigned int reg, unsigned int val) return ret; return regmap_write(dev->sysmap, BT1_I2C_CTL, - BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK)); + BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK)); } static const struct regmap_config bt1_i2c_cfg = { @@ -205,6 +205,7 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) static int dw_i2c_plat_probe(struct platform_device *pdev) { + struct device *device = &pdev->dev; struct i2c_adapter *adap; struct dw_i2c_dev *dev; int irq, ret; @@ -213,15 +214,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (irq < 0) return irq; - dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL); + dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; - dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); - if (device_property_present(&pdev->dev, "wx,i2c-snps-model")) + dev->flags = (uintptr_t)device_get_match_data(device); + if (device_property_present(device, "wx,i2c-snps-model")) dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING; - dev->dev = &pdev->dev; + dev->dev = device; dev->irq = irq; platform_set_drvdata(pdev, dev); @@ -229,7 +230,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (ret) return ret; - dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + dev->rst = devm_reset_control_get_optional_exclusive(device, NULL); if (IS_ERR(dev->rst)) return PTR_ERR(dev->rst); @@ -246,13 +247,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) i2c_dw_configure(dev); /* Optional interface clock */ - dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); + dev->pclk = devm_clk_get_optional(device, "pclk"); if (IS_ERR(dev->pclk)) { ret = PTR_ERR(dev->pclk); goto exit_reset; } - dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + dev->clk = devm_clk_get_optional(device, NULL); if (IS_ERR(dev->clk)) { ret = PTR_ERR(dev->clk); goto exit_reset; @@ -277,31 +278,27 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ? - I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; + I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; adap->nr = -1; - if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE); - } else { - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE | - DPM_FLAG_SMART_SUSPEND); - } + if (dev->flags & ACCESS_NO_IRQ_SUSPEND) + dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE); + else + dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND); - device_enable_async_suspend(&pdev->dev); + device_enable_async_suspend(device); /* The code below assumes runtime PM to be disabled. */ - WARN_ON(pm_runtime_enabled(&pdev->dev)); + WARN_ON(pm_runtime_enabled(device)); - pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(device, 1000); + pm_runtime_use_autosuspend(device); + pm_runtime_set_active(device); if (dev->shared_with_punit) - pm_runtime_get_noresume(&pdev->dev); + pm_runtime_get_noresume(device); - pm_runtime_enable(&pdev->dev); + pm_runtime_enable(device); ret = i2c_dw_probe(dev); if (ret) @@ -319,15 +316,16 @@ exit_reset: static void dw_i2c_plat_remove(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); + struct device *device = &pdev->dev; - pm_runtime_get_sync(&pdev->dev); + pm_runtime_get_sync(device); i2c_del_adapter(&dev->adapter); i2c_dw_disable(dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(device); + pm_runtime_put_sync(device); dw_i2c_plat_pm_cleanup(dev); i2c_dw_remove_lock_support(dev); @@ -351,9 +349,11 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE }, { "AMDI0510", 0 }, { "APMC0D0F", 0 }, + { "FUJI200B", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, { "HISI02A3", 0 }, + { "HJMC3001", 0 }, { "HYGO0010", ACCESS_INTR_MASK }, { "INT33C2", 0 }, { "INT33C3", 0 }, @@ -372,7 +372,7 @@ MODULE_DEVICE_TABLE(platform, dw_i2c_platform_ids); static struct platform_driver dw_i2c_driver = { .probe = dw_i2c_plat_probe, - .remove_new = dw_i2c_plat_remove, + .remove = dw_i2c_plat_remove, .driver = { .name = "i2c_designware", .of_match_table = dw_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 7035296aa24c..fad568e3523b 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -32,12 +32,14 @@ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev) } /** - * i2c_dw_init_slave() - Initialize the designware i2c slave hardware + * i2c_dw_init_slave() - Initialize the DesignWare i2c slave hardware * @dev: device private data * * This function configures and enables the I2C in slave mode. * This function is called during I2C init function, and in case of timeout at * run time. + * + * Return: 0 on success, or negative errno otherwise. */ static int i2c_dw_init_slave(struct dw_i2c_dev *dev) { @@ -264,7 +266,7 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave, IRQF_SHARED, dev_name(dev->dev), dev); if (ret) { - dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev_err(dev->dev, "failure requesting IRQ %i: %d\n", dev->irq, ret); return ret; } diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 3dc5a46698fc..38d7f31aee79 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -363,7 +363,7 @@ MODULE_DEVICE_TABLE(of, dc_i2c_match); static struct platform_driver dc_i2c_driver = { .probe = dc_i2c_probe, - .remove_new = dc_i2c_remove, + .remove = dc_i2c_remove, .driver = { .name = "digicolor-i2c", .of_match_table = dc_i2c_match, diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c index 11ed055143d3..bde2ef098862 100644 --- a/drivers/i2c/busses/i2c-dln2.c +++ b/drivers/i2c/busses/i2c-dln2.c @@ -245,7 +245,7 @@ static void dln2_i2c_remove(struct platform_device *pdev) static struct platform_driver dln2_i2c_driver = { .driver.name = "dln2-i2c", .probe = dln2_i2c_probe, - .remove_new = dln2_i2c_remove, + .remove = dln2_i2c_remove, }; module_platform_driver(dln2_i2c_driver); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index d08be3f3cede..2512cef8e2a2 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -425,7 +425,7 @@ static const struct of_device_id em_i2c_ids[] = { static struct platform_driver em_i2c_driver = { .probe = em_i2c_probe, - .remove_new = em_i2c_remove, + .remove = em_i2c_remove, .driver = { .name = "em-i2c", .of_match_table = em_i2c_ids, diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index d8baca9b610c..e330015087ab 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -1009,7 +1009,7 @@ static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = { static struct platform_driver exynos5_i2c_driver = { .probe = exynos5_i2c_probe, - .remove_new = exynos5_i2c_remove, + .remove = exynos5_i2c_remove, .driver = { .name = "exynos5-hsi2c", .pm = pm_sleep_ptr(&exynos5_i2c_dev_pm_ops), diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index e0bd218e2f14..f4355b17bfbf 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -481,7 +481,7 @@ static struct platform_driver i2c_gpio_driver = { .acpi_match_table = i2c_gpio_acpi_match, }, .probe = i2c_gpio_probe, - .remove_new = i2c_gpio_remove, + .remove = i2c_gpio_remove, }; static int __init i2c_gpio_init(void) diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c index efafc0528c44..0fc39caa6c87 100644 --- a/drivers/i2c/busses/i2c-gxp.c +++ b/drivers/i2c/busses/i2c-gxp.c @@ -595,7 +595,7 @@ MODULE_DEVICE_TABLE(of, gxp_i2c_of_match); static struct platform_driver gxp_i2c_driver = { .probe = gxp_i2c_probe, - .remove_new = gxp_i2c_remove, + .remove = gxp_i2c_remove, .driver = { .name = "gxp-i2c", .of_match_table = gxp_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index ec1ebacb9aa8..78c5845e0877 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -454,7 +454,7 @@ static struct platform_driver highlander_i2c_driver = { }, .probe = highlander_i2c_probe, - .remove_new = highlander_i2c_remove, + .remove = highlander_i2c_remove, }; module_platform_driver(highlander_i2c_driver); diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 64cade6ba923..370f32974763 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -508,7 +508,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_i2c_match); static struct platform_driver hix5hd2_i2c_driver = { .probe = hix5hd2_i2c_probe, - .remove_new = hix5hd2_i2c_remove, + .remove = hix5hd2_i2c_remove, .driver = { .name = "hix5hd2-i2c", .pm = pm_ptr(&hix5hd2_i2c_pm_ops), diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 299fe9d3afab..75dab01d43a7 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -81,6 +81,8 @@ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes * Birch Stream (SOC) 0x5796 32 hard yes yes yes * Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes + * Panther Lake-H (SOC) 0xe322 32 hard yes yes yes + * Panther Lake-P (SOC) 0xe422 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -261,6 +263,8 @@ #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 #define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3 #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22 +#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS 0xe322 +#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS 0xe422 struct i801_mux_config { char *gpio_chip; @@ -1055,6 +1059,8 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 82dedb1bb5be..c76c4116ddc7 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -790,7 +790,7 @@ static struct platform_driver ibm_iic_driver = { .of_match_table = ibm_iic_match, }, .probe = iic_probe, - .remove_new = iic_remove, + .remove = iic_remove, }; module_platform_driver(ibm_iic_driver); diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index e0e87185f6bb..02f75cf310aa 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1497,7 +1497,7 @@ static struct platform_driver img_scb_i2c_driver = { .pm = pm_ptr(&img_i2c_pm), }, .probe = img_i2c_probe, - .remove_new = img_i2c_remove, + .remove = img_i2c_remove, }; module_platform_driver(img_scb_i2c_driver); diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 976d43f73f38..8adf2963d764 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -703,7 +703,7 @@ static const struct dev_pm_ops lpi2c_pm_ops = { static struct platform_driver lpi2c_imx_driver = { .probe = lpi2c_imx_probe, - .remove_new = lpi2c_imx_remove, + .remove = lpi2c_imx_remove, .driver = { .name = DRIVER_NAME, .of_match_table = lpi2c_imx_of_match, diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 98539313cbc9..f751d231ded8 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -17,7 +17,7 @@ * Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt> * * Copyright 2013 Freescale Semiconductor, Inc. - * Copyright 2020 NXP + * Copyright 2020, 2024 NXP * */ @@ -84,6 +84,7 @@ #define IMX_I2C_REGSHIFT 2 #define VF610_I2C_REGSHIFT 0 +#define S32G_I2C_REGSHIFT 0 /* Bits of IMX I2C registers */ #define I2SR_RXAK 0x01 @@ -165,9 +166,34 @@ static struct imx_i2c_clk_pair vf610_i2c_clk_div[] = { { 3840, 0x3F }, { 4096, 0x7B }, { 5120, 0x7D }, { 6144, 0x7E }, }; +/* S32G2/S32G3 clock divider, register value pairs */ +static struct imx_i2c_clk_pair s32g2_i2c_clk_div[] = { + { 34, 0x00 }, { 36, 0x01 }, { 38, 0x02 }, { 40, 0x03 }, + { 42, 0x04 }, { 44, 0x05 }, { 46, 0x06 }, { 48, 0x09 }, + { 52, 0x0A }, { 54, 0x07 }, { 56, 0x0B }, { 60, 0x0C }, + { 64, 0x0D }, { 68, 0x40 }, { 72, 0x0E }, { 76, 0x42 }, + { 80, 0x12 }, { 84, 0x0F }, { 88, 0x13 }, { 96, 0x14 }, + { 104, 0x15 }, { 108, 0x47 }, { 112, 0x19 }, { 120, 0x16 }, + { 128, 0x1A }, { 136, 0x80 }, { 144, 0x17 }, { 152, 0x82 }, + { 160, 0x1C }, { 168, 0x84 }, { 176, 0x1D }, { 192, 0x21 }, + { 208, 0x1E }, { 216, 0x87 }, { 224, 0x22 }, { 240, 0x56 }, + { 256, 0x1F }, { 288, 0x24 }, { 320, 0x25 }, { 336, 0x8F }, + { 352, 0x93 }, { 356, 0x5D }, { 358, 0x98 }, { 384, 0x26 }, + { 416, 0x56 }, { 448, 0x2A }, { 480, 0x27 }, { 512, 0x2B }, + { 576, 0x2C }, { 640, 0x2D }, { 704, 0x9D }, { 768, 0x2E }, + { 832, 0x9D }, { 896, 0x32 }, { 960, 0x2F }, { 1024, 0x33 }, + { 1152, 0x34 }, { 1280, 0x35 }, { 1536, 0x36 }, { 1792, 0x3A }, + { 1920, 0x37 }, { 2048, 0x3B }, { 2304, 0x74 }, { 2560, 0x3D }, + { 3072, 0x3E }, { 3584, 0x7A }, { 3840, 0x3F }, { 4096, 0x7B }, + { 4608, 0x7C }, { 5120, 0x7D }, { 6144, 0x7E }, { 7168, 0xBA }, + { 7680, 0x7F }, { 8192, 0xBB }, { 9216, 0xBC }, { 10240, 0xBD }, + { 12288, 0xBE }, { 15360, 0xBF }, +}; + enum imx_i2c_type { IMX1_I2C, IMX21_I2C, + S32G_I2C, VF610_I2C, }; @@ -197,6 +223,17 @@ struct imx_i2c_dma { enum dma_data_direction dma_data_dir; }; +enum imx_i2c_state { + IMX_I2C_STATE_DONE, + IMX_I2C_STATE_FAILED, + IMX_I2C_STATE_WRITE, + IMX_I2C_STATE_DMA, + IMX_I2C_STATE_READ, + IMX_I2C_STATE_READ_CONTINUE, + IMX_I2C_STATE_READ_BLOCK_DATA, + IMX_I2C_STATE_READ_BLOCK_DATA_LEN, +}; + struct imx_i2c_struct { struct i2c_adapter adapter; struct clk *clk; @@ -216,6 +253,14 @@ struct imx_i2c_struct { struct i2c_client *slave; enum i2c_slave_event last_slave_event; + struct i2c_msg *msg; + unsigned int msg_buf_idx; + int isr_result; + bool is_lastmsg; + enum imx_i2c_state state; + + bool multi_master; + /* For checking slave events. */ spinlock_t slave_lock; struct hrtimer slave_timer; @@ -258,7 +303,15 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = { .ndivs = ARRAY_SIZE(vf610_i2c_clk_div), .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C, .i2cr_ien_opcode = I2CR_IEN_OPCODE_0, +}; +static const struct imx_i2c_hwdata s32g2_i2c_hwdata = { + .devtype = S32G_I2C, + .regshift = S32G_I2C_REGSHIFT, + .clk_div = s32g2_i2c_clk_div, + .ndivs = ARRAY_SIZE(s32g2_i2c_clk_div), + .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C, + .i2cr_ien_opcode = I2CR_IEN_OPCODE_0, }; static const struct platform_device_id imx_i2c_devtype[] = { @@ -288,6 +341,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, }, + { .compatible = "nxp,s32g2-i2c", .data = &s32g2_i2c_hwdata, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids); @@ -481,6 +535,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a unsigned long orig_jiffies = jiffies; unsigned int temp; + if (!i2c_imx->multi_master) + return 0; + while (1) { temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); @@ -540,8 +597,8 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic) return -ETIMEDOUT; } - /* check for arbitration lost */ - if (i2c_imx->i2csr & I2SR_IAL) { + /* In multi-master mode check for arbitration lost */ + if (i2c_imx->multi_master && (i2c_imx->i2csr & I2SR_IAL)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); i2c_imx_clear_irq(i2c_imx, I2SR_IAL); @@ -903,11 +960,156 @@ static int i2c_imx_unreg_slave(struct i2c_client *client) return ret; } +static inline int i2c_imx_isr_acked(struct imx_i2c_struct *i2c_imx) +{ + i2c_imx->isr_result = 0; + + if (imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR) & I2SR_RXAK) { + i2c_imx->state = IMX_I2C_STATE_FAILED; + i2c_imx->isr_result = -ENXIO; + wake_up(&i2c_imx->queue); + } + + return i2c_imx->isr_result; +} + +static inline int i2c_imx_isr_write(struct imx_i2c_struct *i2c_imx) +{ + int result; + + result = i2c_imx_isr_acked(i2c_imx); + if (result) + return result; + + if (i2c_imx->msg->len == i2c_imx->msg_buf_idx) + return 0; + + imx_i2c_write_reg(i2c_imx->msg->buf[i2c_imx->msg_buf_idx++], i2c_imx, IMX_I2C_I2DR); + + return 1; +} + +static inline int i2c_imx_isr_read(struct imx_i2c_struct *i2c_imx) +{ + int result; + unsigned int temp; + + result = i2c_imx_isr_acked(i2c_imx); + if (result) + return result; + + /* setup bus to read data */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp &= ~I2CR_MTX; + if (i2c_imx->msg->len - 1) + temp &= ~I2CR_TXAK; + + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ + + return 0; +} + +static inline void i2c_imx_isr_read_continue(struct imx_i2c_struct *i2c_imx) +{ + unsigned int temp; + + if ((i2c_imx->msg->len - 1) == i2c_imx->msg_buf_idx) { + if (i2c_imx->is_lastmsg) { + /* + * It must generate STOP before read I2DR to prevent + * controller from generating another clock cycle + */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; + temp &= ~(I2CR_MSTA | I2CR_MTX); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } else { + /* + * For i2c master receiver repeat restart operation like: + * read -> repeat MSTA -> read/write + * The controller must set MTX before read the last byte in + * the first read operation, otherwise the first read cost + * one extra clock cycle. + */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp |= I2CR_MTX; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } + } else if (i2c_imx->msg_buf_idx == (i2c_imx->msg->len - 2)) { + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp |= I2CR_TXAK; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } + + i2c_imx->msg->buf[i2c_imx->msg_buf_idx++] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); +} + +static inline void i2c_imx_isr_read_block_data_len(struct imx_i2c_struct *i2c_imx) +{ + u8 len = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + + if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { + i2c_imx->isr_result = -EPROTO; + i2c_imx->state = IMX_I2C_STATE_FAILED; + wake_up(&i2c_imx->queue); + } + i2c_imx->msg->len += len; +} + static irqreturn_t i2c_imx_master_isr(struct imx_i2c_struct *i2c_imx, unsigned int status) { - /* save status register */ - i2c_imx->i2csr = status; - wake_up(&i2c_imx->queue); + /* + * This state machine handles I2C reception and transmission in non-DMA + * mode. We must process all the data in the ISR to reduce the delay + * between two consecutive messages. If the data is not processed in + * the ISR, SMBus devices may timeout, leading to a bus error. + */ + switch (i2c_imx->state) { + case IMX_I2C_STATE_DMA: + i2c_imx->i2csr = status; + wake_up(&i2c_imx->queue); + break; + + case IMX_I2C_STATE_READ: + if (i2c_imx_isr_read(i2c_imx)) + break; + i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE; + break; + + case IMX_I2C_STATE_READ_CONTINUE: + i2c_imx_isr_read_continue(i2c_imx); + if (i2c_imx->msg_buf_idx == i2c_imx->msg->len) { + i2c_imx->state = IMX_I2C_STATE_DONE; + wake_up(&i2c_imx->queue); + } + break; + + case IMX_I2C_STATE_READ_BLOCK_DATA: + if (i2c_imx_isr_read(i2c_imx)) + break; + i2c_imx->state = IMX_I2C_STATE_READ_BLOCK_DATA_LEN; + break; + + case IMX_I2C_STATE_READ_BLOCK_DATA_LEN: + i2c_imx_isr_read_block_data_len(i2c_imx); + i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE; + break; + + case IMX_I2C_STATE_WRITE: + if (i2c_imx_isr_write(i2c_imx)) + break; + i2c_imx->state = IMX_I2C_STATE_DONE; + wake_up(&i2c_imx->queue); + break; + + default: + i2c_imx->i2csr = status; + i2c_imx->state = IMX_I2C_STATE_FAILED; + i2c_imx->isr_result = -EINVAL; + wake_up(&i2c_imx->queue); + } return IRQ_HANDLED; } @@ -954,6 +1156,8 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; + i2c_imx->state = IMX_I2C_STATE_DMA; + dma->chan_using = dma->chan_tx; dma->dma_transfer_dir = DMA_MEM_TO_DEV; dma->dma_data_dir = DMA_TO_DEVICE; @@ -1006,6 +1210,42 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, return i2c_imx_acked(i2c_imx); } +static int i2c_imx_prepare_read(struct imx_i2c_struct *i2c_imx, + struct i2c_msg *msgs, bool use_dma) +{ + int result; + unsigned int temp = 0; + + /* write slave address */ + imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); + result = i2c_imx_trx_complete(i2c_imx, !use_dma); + if (result) + return result; + result = i2c_imx_acked(i2c_imx); + if (result) + return result; + + dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); + + /* setup bus to read data */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp &= ~I2CR_MTX; + + /* + * Reset the I2CR_TXAK flag initially for SMBus block read since the + * length is unknown + */ + if (msgs->len - 1) + temp &= ~I2CR_TXAK; + if (use_dma) + temp |= I2CR_DMAEN; + + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ + + return 0; +} + static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool is_lastmsg) { @@ -1016,6 +1256,13 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; + i2c_imx->state = IMX_I2C_STATE_DMA; + + result = i2c_imx_prepare_read(i2c_imx, msgs, true); + if (result) + return result; + + dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); dma->chan_using = dma->chan_rx; dma->dma_transfer_dir = DMA_DEV_TO_MEM; @@ -1092,8 +1339,8 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, return 0; } -static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, - bool atomic) +static int i2c_imx_atomic_write(struct imx_i2c_struct *i2c_imx, + struct i2c_msg *msgs) { int i, result; @@ -1102,7 +1349,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, /* write slave address */ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_trx_complete(i2c_imx, true); if (result) return result; result = i2c_imx_acked(i2c_imx); @@ -1116,7 +1363,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, "<%s> write byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR); - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_trx_complete(i2c_imx, true); if (result) return result; result = i2c_imx_acked(i2c_imx); @@ -1126,55 +1373,54 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, return 0; } -static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, - bool is_lastmsg, bool atomic) +static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) +{ + dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", + __func__, i2c_8bit_addr_from_msg(msgs)); + + i2c_imx->state = IMX_I2C_STATE_WRITE; + i2c_imx->msg = msgs; + i2c_imx->msg_buf_idx = 0; + + /* + * By writing the device address we start the state machine in the ISR. + * The ISR will report when it is done or when it fails. + */ + imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); + wait_event_timeout(i2c_imx->queue, + i2c_imx->state == IMX_I2C_STATE_DONE || + i2c_imx->state == IMX_I2C_STATE_FAILED, + (msgs->len + 1) * HZ / 10); + if (i2c_imx->state == IMX_I2C_STATE_FAILED) { + dev_dbg(&i2c_imx->adapter.dev, "<%s> write failed with %d\n", + __func__, i2c_imx->isr_result); + return i2c_imx->isr_result; + } + if (i2c_imx->state != IMX_I2C_STATE_DONE) { + dev_err(&i2c_imx->adapter.dev, "<%s> write timedout\n", __func__); + return -ETIMEDOUT; + } + return 0; +} + +static int i2c_imx_atomic_read(struct imx_i2c_struct *i2c_imx, + struct i2c_msg *msgs, bool is_lastmsg) { int i, result; unsigned int temp; int block_data = msgs->flags & I2C_M_RECV_LEN; - int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE && - msgs->len >= DMA_THRESHOLD && !block_data; - - dev_dbg(&i2c_imx->adapter.dev, - "<%s> write slave address: addr=0x%x\n", - __func__, i2c_8bit_addr_from_msg(msgs)); - /* write slave address */ - imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); - result = i2c_imx_trx_complete(i2c_imx, atomic); - if (result) - return result; - result = i2c_imx_acked(i2c_imx); + result = i2c_imx_prepare_read(i2c_imx, msgs, false); if (result) return result; - dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); - - /* setup bus to read data */ - temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); - temp &= ~I2CR_MTX; - - /* - * Reset the I2CR_TXAK flag initially for SMBus block read since the - * length is unknown - */ - if ((msgs->len - 1) || block_data) - temp &= ~I2CR_TXAK; - if (use_dma) - temp |= I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ - dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); - if (use_dma) - return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); - /* read data */ for (i = 0; i < msgs->len; i++) { u8 len = 0; - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_trx_complete(i2c_imx, true); if (result) return result; /* @@ -1205,7 +1451,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); if (!i2c_imx->stopped) - i2c_imx_bus_busy(i2c_imx, 0, atomic); + i2c_imx_bus_busy(i2c_imx, 0, true); } else { /* * For i2c master receiver repeat restart operation like: @@ -1236,6 +1482,48 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, return 0; } +static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, + bool is_lastmsg) +{ + int block_data = msgs->flags & I2C_M_RECV_LEN; + + dev_dbg(&i2c_imx->adapter.dev, + "<%s> write slave address: addr=0x%x\n", + __func__, i2c_8bit_addr_from_msg(msgs)); + + i2c_imx->is_lastmsg = is_lastmsg; + + if (block_data) + i2c_imx->state = IMX_I2C_STATE_READ_BLOCK_DATA; + else + i2c_imx->state = IMX_I2C_STATE_READ; + i2c_imx->msg = msgs; + i2c_imx->msg_buf_idx = 0; + + /* + * By writing the device address we start the state machine in the ISR. + * The ISR will report when it is done or when it fails. + */ + imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); + wait_event_timeout(i2c_imx->queue, + i2c_imx->state == IMX_I2C_STATE_DONE || + i2c_imx->state == IMX_I2C_STATE_FAILED, + (msgs->len + 1) * HZ / 10); + if (i2c_imx->state == IMX_I2C_STATE_FAILED) { + dev_dbg(&i2c_imx->adapter.dev, "<%s> read failed with %d\n", + __func__, i2c_imx->isr_result); + return i2c_imx->isr_result; + } + if (i2c_imx->state != IMX_I2C_STATE_DONE) { + dev_err(&i2c_imx->adapter.dev, "<%s> read timedout\n", __func__); + return -ETIMEDOUT; + } + if (!i2c_imx->stopped) + return i2c_imx_bus_busy(i2c_imx, 0, false); + + return 0; +} + static int i2c_imx_xfer_common(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, bool atomic) { @@ -1243,6 +1531,7 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter, int result; bool is_lastmsg = false; struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); + int use_dma = 0; /* Start I2C transfer */ result = i2c_imx_start(i2c_imx, atomic); @@ -1295,15 +1584,25 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter, (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), (temp & I2SR_RXAK ? 1 : 0)); #endif + + use_dma = i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && + msgs[i].flags & I2C_M_DMA_SAFE; if (msgs[i].flags & I2C_M_RD) { - result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic); + int block_data = msgs->flags & I2C_M_RECV_LEN; + + if (atomic) + result = i2c_imx_atomic_read(i2c_imx, &msgs[i], is_lastmsg); + else if (use_dma && !block_data) + result = i2c_imx_dma_read(i2c_imx, &msgs[i], is_lastmsg); + else + result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg); } else { - if (!atomic && - i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && - msgs[i].flags & I2C_M_DMA_SAFE) + if (atomic) + result = i2c_imx_atomic_write(i2c_imx, &msgs[i]); + else if (use_dma) result = i2c_imx_dma_write(i2c_imx, &msgs[i]); else - result = i2c_imx_write(i2c_imx, &msgs[i], atomic); + result = i2c_imx_write(i2c_imx, &msgs[i]); } if (result) goto fail0; @@ -1468,6 +1767,12 @@ static int i2c_imx_probe(struct platform_device *pdev) goto rpm_disable; } + /* + * We use the single-master property for backward compatibility. + * By default multi master mode is enabled. + */ + i2c_imx->multi_master = !of_property_read_bool(pdev->dev.of_node, "single-master"); + /* Set up clock divider */ i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ; ret = of_property_read_u32(pdev->dev.of_node, @@ -1576,7 +1881,7 @@ static const struct dev_pm_ops i2c_imx_pm_ops = { static struct platform_driver i2c_imx_driver = { .probe = i2c_imx_probe, - .remove_new = i2c_imx_remove, + .remove = i2c_imx_remove, .driver = { .name = DRIVER_NAME, .pm = pm_ptr(&i2c_imx_pm_ops), diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 859c14e340e7..ce5ca5b90b39 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -524,7 +524,7 @@ MODULE_DEVICE_TABLE(of, i2c_iop3xx_match); static struct platform_driver iop3xx_i2c_driver = { .probe = iop3xx_i2c_probe, - .remove_new = iop3xx_i2c_remove, + .remove = iop3xx_i2c_remove, .driver = { .name = "IOP3xx-I2C", .of_match_table = i2c_iop3xx_match, diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index f59158489ad9..2b3b65ef2900 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -1,41 +1,39 @@ // SPDX-License-Identifier: GPL-2.0-only /* - i2c-isch.c - Linux kernel driver for Intel SCH chipset SMBus - - Based on i2c-piix4.c - Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and - Philip Edelbrock <phil@netroedge.com> - - Intel SCH support - Copyright (c) 2007 - 2008 Jacob Jun Pan <jacob.jun.pan@intel.com> - -*/ + * Linux kernel driver for Intel SCH chipset SMBus + * - Based on i2c-piix4.c + * Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and + * Philip Edelbrock <phil@netroedge.com> + * - Intel SCH support + * Copyright (c) 2007 - 2008 Jacob Jun Pan <jacob.jun.pan@intel.com> + */ -/* - Supports: - Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L) - Note: we assume there can only be one device, with one SMBus interface. -*/ +/* Supports: Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L) */ +#include <linux/container_of.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> +#include <linux/i2c.h> +#include <linux/iopoll.h> +#include <linux/ioport.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/kernel.h> -#include <linux/delay.h> +#include <linux/sprintf.h> #include <linux/stddef.h> -#include <linux/ioport.h> -#include <linux/i2c.h> -#include <linux/io.h> +#include <linux/string_choices.h> +#include <linux/types.h> /* SCH SMBus address offsets */ -#define SMBHSTCNT (0 + sch_smba) -#define SMBHSTSTS (1 + sch_smba) -#define SMBHSTCLK (2 + sch_smba) -#define SMBHSTADD (4 + sch_smba) /* TSA */ -#define SMBHSTCMD (5 + sch_smba) -#define SMBHSTDAT0 (6 + sch_smba) -#define SMBHSTDAT1 (7 + sch_smba) -#define SMBBLKDAT (0x20 + sch_smba) - -/* Other settings */ -#define MAX_RETRIES 5000 +#define SMBHSTCNT 0x00 +#define SMBHSTSTS 0x01 +#define SMBHSTCLK 0x02 +#define SMBHSTADD 0x04 /* TSA */ +#define SMBHSTCMD 0x05 +#define SMBHSTDAT0 0x06 +#define SMBHSTDAT1 0x07 +#define SMBBLKDAT 0x20 /* I2C constants */ #define SCH_QUICK 0x00 @@ -44,109 +42,134 @@ #define SCH_WORD_DATA 0x03 #define SCH_BLOCK_DATA 0x05 -static unsigned short sch_smba; -static struct i2c_adapter sch_adapter; +struct sch_i2c { + struct i2c_adapter adapter; + void __iomem *smba; +}; + static int backbone_speed = 33000; /* backbone speed in kHz */ -module_param(backbone_speed, int, S_IRUSR | S_IWUSR); +module_param(backbone_speed, int, 0600); MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)"); -/* - * Start the i2c transaction -- the i2c_access will prepare the transaction - * and this function will execute it. - * return 0 for success and others for failure. +static inline u8 sch_io_rd8(struct sch_i2c *priv, unsigned int offset) +{ + return ioread8(priv->smba + offset); +} + +static inline void sch_io_wr8(struct sch_i2c *priv, unsigned int offset, u8 value) +{ + iowrite8(value, priv->smba + offset); +} + +static inline u16 sch_io_rd16(struct sch_i2c *priv, unsigned int offset) +{ + return ioread16(priv->smba + offset); +} + +static inline void sch_io_wr16(struct sch_i2c *priv, unsigned int offset, u16 value) +{ + iowrite16(value, priv->smba + offset); +} + +/** + * sch_transaction - Start the i2c transaction + * @adap: the i2c adapter pointer + * + * The sch_access() will prepare the transaction and + * this function will execute it. + * + * Return: 0 for success and others for failure. */ -static int sch_transaction(void) +static int sch_transaction(struct i2c_adapter *adap) { + struct sch_i2c *priv = container_of(adap, struct sch_i2c, adapter); int temp; - int result = 0; - int retries = 0; + int rc; - dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, " - "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), - inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0), - inb(SMBHSTDAT1)); + dev_dbg(&adap->dev, + "Transaction (pre): CNT=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", + sch_io_rd8(priv, SMBHSTCNT), sch_io_rd8(priv, SMBHSTCMD), + sch_io_rd8(priv, SMBHSTADD), + sch_io_rd8(priv, SMBHSTDAT0), sch_io_rd8(priv, SMBHSTDAT1)); /* Make sure the SMBus host is ready to start transmitting */ - temp = inb(SMBHSTSTS) & 0x0f; + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f; if (temp) { /* Can not be busy since we checked it in sch_access */ - if (temp & 0x01) { - dev_dbg(&sch_adapter.dev, "Completion (%02x). " - "Clear...\n", temp); - } - if (temp & 0x06) { - dev_dbg(&sch_adapter.dev, "SMBus error (%02x). " - "Resetting...\n", temp); - } - outb(temp, SMBHSTSTS); - temp = inb(SMBHSTSTS) & 0x0f; + if (temp & 0x01) + dev_dbg(&adap->dev, "Completion (%02x). Clear...\n", temp); + if (temp & 0x06) + dev_dbg(&adap->dev, "SMBus error (%02x). Resetting...\n", temp); + sch_io_wr8(priv, SMBHSTSTS, temp); + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f; if (temp) { - dev_err(&sch_adapter.dev, - "SMBus is not ready: (%02x)\n", temp); + dev_err(&adap->dev, "SMBus is not ready: (%02x)\n", temp); return -EAGAIN; } } - /* start the transaction by setting bit 4 */ - outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT); - - do { - usleep_range(100, 200); - temp = inb(SMBHSTSTS) & 0x0f; - } while ((temp & 0x08) && (retries++ < MAX_RETRIES)); + /* Start the transaction by setting bit 4 */ + temp = sch_io_rd8(priv, SMBHSTCNT); + temp |= 0x10; + sch_io_wr8(priv, SMBHSTCNT, temp); + rc = read_poll_timeout(sch_io_rd8, temp, !(temp & 0x08), 200, 500000, true, priv, SMBHSTSTS); /* If the SMBus is still busy, we give up */ - if (retries > MAX_RETRIES) { - dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); - result = -ETIMEDOUT; + if (rc) { + dev_err(&adap->dev, "SMBus Timeout!\n"); } else if (temp & 0x04) { - result = -EIO; - dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " - "locked until next hard reset. (sorry!)\n"); + rc = -EIO; + dev_dbg(&adap->dev, "Bus collision! SMBus may be locked until next hard reset. (sorry!)\n"); /* Clock stops and target is stuck in mid-transmission */ } else if (temp & 0x02) { - result = -EIO; - dev_err(&sch_adapter.dev, "Error: no response!\n"); + rc = -EIO; + dev_err(&adap->dev, "Error: no response!\n"); } else if (temp & 0x01) { - dev_dbg(&sch_adapter.dev, "Post complete!\n"); - outb(temp, SMBHSTSTS); - temp = inb(SMBHSTSTS) & 0x07; + dev_dbg(&adap->dev, "Post complete!\n"); + sch_io_wr8(priv, SMBHSTSTS, temp & 0x0f); + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x07; if (temp & 0x06) { /* Completion clear failed */ - dev_dbg(&sch_adapter.dev, "Failed reset at end of " - "transaction (%02x), Bus error!\n", temp); + dev_dbg(&adap->dev, + "Failed reset at end of transaction (%02x), Bus error!\n", temp); } } else { - result = -ENXIO; - dev_dbg(&sch_adapter.dev, "No such address.\n"); + rc = -ENXIO; + dev_dbg(&adap->dev, "No such address.\n"); } - dev_dbg(&sch_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, " - "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), - inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0), - inb(SMBHSTDAT1)); - return result; + dev_dbg(&adap->dev, "Transaction (post): CNT=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", + sch_io_rd8(priv, SMBHSTCNT), sch_io_rd8(priv, SMBHSTCMD), + sch_io_rd8(priv, SMBHSTADD), + sch_io_rd8(priv, SMBHSTDAT0), sch_io_rd8(priv, SMBHSTDAT1)); + return rc; } -/* - * This is the main access entry for i2c-sch access - * adap is i2c_adapter pointer, addr is the i2c device bus address, read_write - * (0 for read and 1 for write), size is i2c transaction type and data is the - * union of transaction for data to be transferred or data read from bus. - * return 0 for success and others for failure. +/** + * sch_access - the main access entry for i2c-sch access + * @adap: the i2c adapter pointer + * @addr: the i2c device bus address + * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC) + * @read_write: 0 for read and 1 for write + * @command: Byte interpreted by slave, for protocols which use such bytes + * @size: the i2c transaction type + * @data: the union of transaction for data to be transferred or data read from bus + * + * Return: 0 for success and others for failure. */ static s32 sch_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { + struct sch_i2c *priv = container_of(adap, struct sch_i2c, adapter); int i, len, temp, rc; /* Make sure the SMBus host is not busy */ - temp = inb(SMBHSTSTS) & 0x0f; + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f; if (temp & 0x08) { - dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp); + dev_dbg(&adap->dev, "SMBus busy (%02x)\n", temp); return -EAGAIN; } - temp = inw(SMBHSTCLK); + temp = sch_io_rd16(priv, SMBHSTCLK); if (!temp) { /* * We can't determine if we have 33 or 25 MHz clock for @@ -154,50 +177,48 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, * 100 kHz. If we actually run at 25 MHz the bus will be * run ~75 kHz instead which should do no harm. */ - dev_notice(&sch_adapter.dev, - "Clock divider uninitialized. Setting defaults\n"); - outw(backbone_speed / (4 * 100), SMBHSTCLK); + dev_notice(&adap->dev, "Clock divider uninitialized. Setting defaults\n"); + sch_io_wr16(priv, SMBHSTCLK, backbone_speed / (4 * 100)); } - dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size, - (read_write)?"READ":"WRITE"); + dev_dbg(&adap->dev, "access size: %d %s\n", size, str_read_write(read_write)); switch (size) { case I2C_SMBUS_QUICK: - outb((addr << 1) | read_write, SMBHSTADD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); size = SCH_QUICK; break; case I2C_SMBUS_BYTE: - outb((addr << 1) | read_write, SMBHSTADD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); if (read_write == I2C_SMBUS_WRITE) - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTCMD, command); size = SCH_BYTE; break; case I2C_SMBUS_BYTE_DATA: - outb((addr << 1) | read_write, SMBHSTADD); - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); + sch_io_wr8(priv, SMBHSTCMD, command); if (read_write == I2C_SMBUS_WRITE) - outb(data->byte, SMBHSTDAT0); + sch_io_wr8(priv, SMBHSTDAT0, data->byte); size = SCH_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: - outb((addr << 1) | read_write, SMBHSTADD); - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); + sch_io_wr8(priv, SMBHSTCMD, command); if (read_write == I2C_SMBUS_WRITE) { - outb(data->word & 0xff, SMBHSTDAT0); - outb((data->word & 0xff00) >> 8, SMBHSTDAT1); + sch_io_wr8(priv, SMBHSTDAT0, data->word >> 0); + sch_io_wr8(priv, SMBHSTDAT1, data->word >> 8); } size = SCH_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: - outb((addr << 1) | read_write, SMBHSTADD); - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); + sch_io_wr8(priv, SMBHSTCMD, command); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; - outb(len, SMBHSTDAT0); + sch_io_wr8(priv, SMBHSTDAT0, len); for (i = 1; i <= len; i++) - outb(data->block[i], SMBBLKDAT+i-1); + sch_io_wr8(priv, SMBBLKDAT + i - 1, data->block[i]); } size = SCH_BLOCK_DATA; break; @@ -205,10 +226,13 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } - dev_dbg(&sch_adapter.dev, "write size %d to 0x%04x\n", size, SMBHSTCNT); - outb((inb(SMBHSTCNT) & 0xb0) | (size & 0x7), SMBHSTCNT); + dev_dbg(&adap->dev, "write size %d to 0x%04x\n", size, SMBHSTCNT); + + temp = sch_io_rd8(priv, SMBHSTCNT); + temp = (temp & 0xb0) | (size & 0x7); + sch_io_wr8(priv, SMBHSTCNT, temp); - rc = sch_transaction(); + rc = sch_transaction(adap); if (rc) /* Error in transaction */ return rc; @@ -218,17 +242,18 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, switch (size) { case SCH_BYTE: case SCH_BYTE_DATA: - data->byte = inb(SMBHSTDAT0); + data->byte = sch_io_rd8(priv, SMBHSTDAT0); break; case SCH_WORD_DATA: - data->word = inb(SMBHSTDAT0) + (inb(SMBHSTDAT1) << 8); + data->word = (sch_io_rd8(priv, SMBHSTDAT0) << 0) + + (sch_io_rd8(priv, SMBHSTDAT1) << 8); break; case SCH_BLOCK_DATA: - data->block[0] = inb(SMBHSTDAT0); + data->block[0] = sch_io_rd8(priv, SMBHSTDAT0); if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; for (i = 1; i <= data->block[0]; i++) - data->block[i] = inb(SMBBLKDAT+i-1); + data->block[i] = sch_io_rd8(priv, SMBBLKDAT + i - 1); break; } return 0; @@ -246,51 +271,34 @@ static const struct i2c_algorithm smbus_algorithm = { .functionality = sch_func, }; -static struct i2c_adapter sch_adapter = { - .owner = THIS_MODULE, - .class = I2C_CLASS_HWMON, - .algo = &smbus_algorithm, -}; - -static int smbus_sch_probe(struct platform_device *dev) +static int smbus_sch_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + struct sch_i2c *priv; struct resource *res; - int retval; - res = platform_get_resource(dev, IORESOURCE_IO, 0); - if (!res) - return -EBUSY; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - if (!devm_request_region(&dev->dev, res->start, resource_size(res), - dev->name)) { - dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", - sch_smba); + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) return -EBUSY; - } - - sch_smba = res->start; - dev_dbg(&dev->dev, "SMBA = 0x%X\n", sch_smba); + priv->smba = devm_ioport_map(dev, res->start, resource_size(res)); + if (!priv->smba) + return dev_err_probe(dev, -EBUSY, "SMBus region %pR already in use!\n", res); - /* set up the sysfs linkage to our parent device */ - sch_adapter.dev.parent = &dev->dev; + /* Set up the sysfs linkage to our parent device */ + priv->adapter.dev.parent = dev; + priv->adapter.owner = THIS_MODULE, + priv->adapter.class = I2C_CLASS_HWMON, + priv->adapter.algo = &smbus_algorithm, - snprintf(sch_adapter.name, sizeof(sch_adapter.name), - "SMBus SCH adapter at %04x", sch_smba); + snprintf(priv->adapter.name, sizeof(priv->adapter.name), + "SMBus SCH adapter at %04x", (unsigned short)res->start); - retval = i2c_add_adapter(&sch_adapter); - if (retval) - sch_smba = 0; - - return retval; -} - -static void smbus_sch_remove(struct platform_device *pdev) -{ - if (sch_smba) { - i2c_del_adapter(&sch_adapter); - sch_smba = 0; - } + return devm_i2c_add_adapter(dev, &priv->adapter); } static struct platform_driver smbus_sch_driver = { @@ -298,7 +306,6 @@ static struct platform_driver smbus_sch_driver = { .name = "isch_smbus", }, .probe = smbus_sch_probe, - .remove_new = smbus_sch_remove, }; module_platform_driver(smbus_sch_driver); diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 92cc5b091137..664a5471d933 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -847,7 +847,7 @@ static void jz4780_i2c_remove(struct platform_device *pdev) static struct platform_driver jz4780_i2c_driver = { .probe = jz4780_i2c_probe, - .remove_new = jz4780_i2c_remove, + .remove = jz4780_i2c_remove, .driver = { .name = "jz4780-i2c", .of_match_table = jz4780_i2c_of_matches, diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index eb66942e0b7d..212196af68ba 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c @@ -385,7 +385,7 @@ static struct platform_driver kempld_i2c_driver = { .pm = pm_sleep_ptr(&kempld_i2c_pm_ops), }, .probe = kempld_i2c_probe, - .remove_new = kempld_i2c_remove, + .remove = kempld_i2c_remove, }; module_platform_driver(kempld_i2c_driver); diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c index 9fb33cbf7419..6943a0de860a 100644 --- a/drivers/i2c/busses/i2c-lpc2k.c +++ b/drivers/i2c/busses/i2c-lpc2k.c @@ -462,7 +462,7 @@ MODULE_DEVICE_TABLE(of, lpc2k_i2c_match); static struct platform_driver i2c_lpc2k_driver = { .probe = i2c_lpc2k_probe, - .remove_new = i2c_lpc2k_remove, + .remove = i2c_lpc2k_remove, .driver = { .name = "lpc2k-i2c", .pm = pm_sleep_ptr(&i2c_lpc2k_dev_pm_ops), diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index c7b203cc4434..e1d69537353b 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -565,7 +565,7 @@ MODULE_DEVICE_TABLE(of, meson_i2c_match); static struct platform_driver meson_i2c_driver = { .probe = meson_i2c_probe, - .remove_new = meson_i2c_remove, + .remove = meson_i2c_remove, .driver = { .name = "meson-i2c", .of_match_table = meson_i2c_match, diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c index 0b0a1c4d17ca..d1543e7d8380 100644 --- a/drivers/i2c/busses/i2c-microchip-corei2c.c +++ b/drivers/i2c/busses/i2c-microchip-corei2c.c @@ -462,7 +462,7 @@ MODULE_DEVICE_TABLE(of, mchp_corei2c_of_match); static struct platform_driver mchp_corei2c_driver = { .probe = mchp_corei2c_probe, - .remove_new = mchp_corei2c_remove, + .remove = mchp_corei2c_remove, .driver = { .name = "microchip-corei2c", .of_match_table = mchp_corei2c_of_match, diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index b3a73921ab69..21f67f3b65b6 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -2456,7 +2456,7 @@ static void mlxbf_i2c_remove(struct platform_device *pdev) static struct platform_driver mlxbf_i2c_driver = { .probe = mlxbf_i2c_probe, - .remove_new = mlxbf_i2c_remove, + .remove = mlxbf_i2c_remove, .driver = { .name = "i2c-mlxbf", .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 8223f6d29eb3..07d3cadbf510 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -591,7 +591,7 @@ static void mlxcpld_i2c_remove(struct platform_device *pdev) static struct platform_driver mlxcpld_i2c_driver = { .probe = mlxcpld_i2c_probe, - .remove_new = mlxcpld_i2c_remove, + .remove = mlxcpld_i2c_remove, .driver = { .name = MLXCPLD_I2C_DEVICE_NAME, }, diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 236d6b8ba867..28c5c5c1fb7a 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -938,7 +938,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match); /* Structure for a device driver */ static struct platform_driver mpc_i2c_driver = { .probe = fsl_i2c_probe, - .remove_new = fsl_i2c_remove, + .remove = fsl_i2c_remove, .driver = { .name = "mpc-i2c", .of_match_table = mpc_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index e0ba653dec2d..5bd342047d59 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1550,7 +1550,7 @@ static const struct dev_pm_ops mtk_i2c_pm = { static struct platform_driver mtk_i2c_driver = { .probe = mtk_i2c_probe, - .remove_new = mtk_i2c_remove, + .remove = mtk_i2c_remove, .driver = { .name = I2C_DRV_NAME, .pm = pm_sleep_ptr(&mtk_i2c_pm), diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c index 23d417ff5e71..2103f21f9ddd 100644 --- a/drivers/i2c/busses/i2c-mt7621.c +++ b/drivers/i2c/busses/i2c-mt7621.c @@ -331,7 +331,7 @@ static void mtk_i2c_remove(struct platform_device *pdev) static struct platform_driver mtk_i2c_driver = { .probe = mtk_i2c_probe, - .remove_new = mtk_i2c_remove, + .remove = mtk_i2c_remove, .driver = { .name = "i2c-mt7621", .of_match_table = i2c_mtk_dt_ids, diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 29f94efedf60..874309580c33 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -1104,7 +1104,7 @@ static const struct dev_pm_ops mv64xxx_i2c_pm_ops = { static struct platform_driver mv64xxx_i2c_driver = { .probe = mv64xxx_i2c_probe, - .remove_new = mv64xxx_i2c_remove, + .remove = mv64xxx_i2c_remove, .driver = { .name = MV64XXX_I2C_CTLR_NAME, .pm = &mv64xxx_i2c_pm_ops, diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 36def0a9c95c..ad62d56b2186 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -881,7 +881,7 @@ static struct platform_driver mxs_i2c_driver = { .of_match_table = mxs_i2c_dt_ids, }, .probe = mxs_i2c_probe, - .remove_new = mxs_i2c_remove, + .remove = mxs_i2c_remove, }; static int __init mxs_i2c_init(void) diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c deleted file mode 100644 index 69a71bc9830d..000000000000 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c +++ /dev/null @@ -1,240 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard - * - * Copyright (C) 2008 Jean Delvare <jdelvare@suse.de> - */ - -/* - * We select the channels by sending commands to the Philips - * PCA9556 chip at I2C address 0x18. The main adapter is used for - * the non-multiplexed part of the bus, and 4 virtual adapters - * are defined for the multiplexed addresses: 0x50-0x53 (memory - * module EEPROM) located on channels 1-4. We define one virtual - * adapter per CPU, which corresponds to one multiplexed channel: - * CPU0: virtual adapter 1, channel 1 - * CPU1: virtual adapter 2, channel 2 - * CPU2: virtual adapter 3, channel 3 - * CPU3: virtual adapter 4, channel 4 - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/i2c.h> -#include <linux/mutex.h> - -extern struct i2c_adapter *nforce2_smbus; - -static struct i2c_adapter *s4985_adapter; -static struct i2c_algorithm *s4985_algo; - -/* Wrapper access functions for multiplexed SMBus */ -static DEFINE_MUTEX(nforce2_lock); - -static s32 nforce2_access_virt0(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - int error; - - /* We exclude the multiplexed addresses */ - if ((addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 - || addr == 0x18) - return -ENXIO; - - mutex_lock(&nforce2_lock); - error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - mutex_unlock(&nforce2_lock); - - return error; -} - -/* We remember the last used channels combination so as to only switch - channels when it is really needed. This greatly reduces the SMBus - overhead, but also assumes that nobody will be writing to the PCA9556 - in our back. */ -static u8 last_channels; - -static inline s32 nforce2_access_channel(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data, - u8 channels) -{ - int error; - - /* We exclude the non-multiplexed addresses */ - if ((addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) - return -ENXIO; - - mutex_lock(&nforce2_lock); - if (last_channels != channels) { - union i2c_smbus_data mplxdata; - mplxdata.byte = channels; - - error = nforce2_smbus->algo->smbus_xfer(adap, 0x18, 0, - I2C_SMBUS_WRITE, 0x01, - I2C_SMBUS_BYTE_DATA, - &mplxdata); - if (error) - goto UNLOCK; - last_channels = channels; - } - error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - -UNLOCK: - mutex_unlock(&nforce2_lock); - return error; -} - -static s32 nforce2_access_virt1(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU0: channel 1 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x02); -} - -static s32 nforce2_access_virt2(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU1: channel 2 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x04); -} - -static s32 nforce2_access_virt3(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU2: channel 3 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x08); -} - -static s32 nforce2_access_virt4(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU3: channel 4 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x10); -} - -static int __init nforce2_s4985_init(void) -{ - int i, error; - union i2c_smbus_data ioconfig; - - if (!nforce2_smbus) - return -ENODEV; - - /* Configure the PCA9556 multiplexer */ - ioconfig.byte = 0x00; /* All I/O to output mode */ - error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, - I2C_SMBUS_BYTE_DATA, &ioconfig); - if (error) { - dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n"); - error = -EIO; - goto ERROR0; - } - - /* Unregister physical bus */ - i2c_del_adapter(nforce2_smbus); - - printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n"); - /* Define the 5 virtual adapters and algorithms structures */ - s4985_adapter = kcalloc(5, sizeof(struct i2c_adapter), GFP_KERNEL); - if (!s4985_adapter) { - error = -ENOMEM; - goto ERROR1; - } - s4985_algo = kcalloc(5, sizeof(struct i2c_algorithm), GFP_KERNEL); - if (!s4985_algo) { - error = -ENOMEM; - goto ERROR2; - } - - /* Fill in the new structures */ - s4985_algo[0] = *(nforce2_smbus->algo); - s4985_algo[0].smbus_xfer = nforce2_access_virt0; - s4985_adapter[0] = *nforce2_smbus; - s4985_adapter[0].algo = s4985_algo; - s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent; - for (i = 1; i < 5; i++) { - s4985_algo[i] = *(nforce2_smbus->algo); - s4985_adapter[i] = *nforce2_smbus; - snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name), - "SMBus nForce2 adapter (CPU%d)", i - 1); - s4985_adapter[i].algo = s4985_algo + i; - s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent; - } - s4985_algo[1].smbus_xfer = nforce2_access_virt1; - s4985_algo[2].smbus_xfer = nforce2_access_virt2; - s4985_algo[3].smbus_xfer = nforce2_access_virt3; - s4985_algo[4].smbus_xfer = nforce2_access_virt4; - - /* Register virtual adapters */ - for (i = 0; i < 5; i++) { - error = i2c_add_adapter(s4985_adapter + i); - if (error) { - printk(KERN_ERR "i2c-nforce2-s4985: " - "Virtual adapter %d registration " - "failed, module not inserted\n", i); - for (i--; i >= 0; i--) - i2c_del_adapter(s4985_adapter + i); - goto ERROR3; - } - } - - return 0; - -ERROR3: - kfree(s4985_algo); - s4985_algo = NULL; -ERROR2: - kfree(s4985_adapter); - s4985_adapter = NULL; -ERROR1: - /* Restore physical bus */ - i2c_add_adapter(nforce2_smbus); -ERROR0: - return error; -} - -static void __exit nforce2_s4985_exit(void) -{ - if (s4985_adapter) { - int i; - - for (i = 0; i < 5; i++) - i2c_del_adapter(s4985_adapter+i); - kfree(s4985_adapter); - s4985_adapter = NULL; - } - kfree(s4985_algo); - s4985_algo = NULL; - - /* Restore physical bus */ - if (i2c_add_adapter(nforce2_smbus)) - printk(KERN_ERR "i2c-nforce2-s4985: " - "Physical bus restoration failed\n"); -} - -MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); -MODULE_DESCRIPTION("S4985 SMBus multiplexing"); -MODULE_LICENSE("GPL"); - -module_init(nforce2_s4985_init); -module_exit(nforce2_s4985_exit); diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index fab662e6bc08..d58a308582e4 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -117,20 +117,6 @@ static const struct dmi_system_id nforce2_dmi_blacklist2[] = { static struct pci_driver nforce2_driver; -/* For multiplexing support, we need a global reference to the 1st - SMBus channel */ -#if IS_ENABLED(CONFIG_I2C_NFORCE2_S4985) -struct i2c_adapter *nforce2_smbus; -EXPORT_SYMBOL_GPL(nforce2_smbus); - -static void nforce2_set_reference(struct i2c_adapter *adap) -{ - nforce2_smbus = adap; -} -#else -static inline void nforce2_set_reference(struct i2c_adapter *adap) { } -#endif - static void nforce2_abort(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; @@ -411,7 +397,6 @@ static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } - nforce2_set_reference(&smbuses[0].adapter); return 0; } @@ -420,7 +405,6 @@ static void nforce2_remove(struct pci_dev *dev) { struct nforce2_smbus *smbuses = pci_get_drvdata(dev); - nforce2_set_reference(NULL); if (smbuses[0].base) { i2c_del_adapter(&smbuses[0].adapter); release_region(smbuses[0].base, smbuses[0].size); diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index ad0f02acdb12..efb33802804f 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -6,10 +6,10 @@ * I2C master mode controller driver, used in Nomadik 8815 * and Ux500 platforms. * - * The Mobileye EyeQ5 platform is also supported; it uses + * The Mobileye EyeQ5 and EyeQ6H platforms are also supported; they use * the same Ux500/DB8500 IP block with two quirks: * - The memory bus only supports 32-bit accesses. - * - A register must be configured for the I2C speed mode; + * - (only EyeQ5) A register must be configured for the I2C speed mode; * it is located in a shared register region called OLB. * * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> @@ -26,6 +26,7 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> @@ -396,7 +397,7 @@ static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *priv, u16 flags) */ static void setup_i2c_controller(struct nmk_i2c_dev *priv) { - u32 brcr1, brcr2; + u32 brcr; u32 i2c_clk, div; u32 ns; u16 slsu; @@ -443,7 +444,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *priv) /* * The spec says, in case of std. mode the divider is * 2 whereas it is 3 for fast and fastplus mode of - * operation. TODO - high speed support. + * operation. */ div = (priv->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2; @@ -451,30 +452,23 @@ static void setup_i2c_controller(struct nmk_i2c_dev *priv) * generate the mask for baud rate counters. The controller * has two baud rate counters. One is used for High speed * operation, and the other is for std, fast mode, fast mode - * plus operation. Currently we do not supprt high speed mode - * so set brcr1 to 0. + * plus operation. + * + * BRCR is a clock divider amount. Pick highest value that + * leads to rate strictly below target. Eg when asking for + * 400kHz you want a bus rate <=400kHz (and not >=400kHz). */ - brcr1 = FIELD_PREP(I2C_BRCR_BRCNT1, 0); - brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2, i2c_clk / (priv->clk_freq * div)); + brcr = DIV_ROUND_UP(i2c_clk, priv->clk_freq * div); + + if (priv->sm == I2C_FREQ_MODE_HIGH_SPEED) + brcr = FIELD_PREP(I2C_BRCR_BRCNT1, brcr); + else + brcr = FIELD_PREP(I2C_BRCR_BRCNT2, brcr); /* set the baud rate counter register */ - writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR); + writel(brcr, priv->virtbase + I2C_BRCR); - /* - * set the speed mode. Currently we support - * only standard and fast mode of operation - * TODO - support for fast mode plus (up to 1Mb/s) - * and high speed (up to 3.4 Mb/s) - */ - if (priv->sm > I2C_FREQ_MODE_FAST) { - dev_err(&priv->adev->dev, - "do not support this mode defaulting to std. mode\n"); - brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2, - i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2)); - writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR); - writel(FIELD_PREP(I2C_CR_SM, I2C_FREQ_MODE_STANDARD), - priv->virtbase + I2C_CR); - } + /* set the speed mode */ writel(FIELD_PREP(I2C_CR_SM, priv->sm), priv->virtbase + I2C_CR); /* set the Tx and Rx FIFO threshold */ @@ -1015,11 +1009,14 @@ static void nmk_i2c_of_probe(struct device_node *np, if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq)) priv->clk_freq = I2C_MAX_STANDARD_MODE_FREQ; - /* This driver only supports 'standard' and 'fast' modes of operation. */ if (priv->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) priv->sm = I2C_FREQ_MODE_STANDARD; - else + else if (priv->clk_freq <= I2C_MAX_FAST_MODE_FREQ) priv->sm = I2C_FREQ_MODE_FAST; + else if (priv->clk_freq <= I2C_MAX_FAST_MODE_PLUS_FREQ) + priv->sm = I2C_FREQ_MODE_FAST_PLUS; + else + priv->sm = I2C_FREQ_MODE_HIGH_SPEED; priv->tft = 1; /* Tx FIFO threshold */ priv->rft = 8; /* Rx FIFO threshold */ @@ -1046,8 +1043,6 @@ static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv) struct regmap *olb; unsigned int id; - priv->has_32b_bus = true; - olb = syscon_regmap_lookup_by_phandle_args(np, "mobileye,olb", 1, &id); if (IS_ERR(olb)) return PTR_ERR(olb); @@ -1068,15 +1063,39 @@ static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv) return 0; } +#define NMK_I2C_EYEQ_FLAG_32B_BUS BIT(0) +#define NMK_I2C_EYEQ_FLAG_IS_EYEQ5 BIT(1) + +static const struct of_device_id nmk_i2c_eyeq_match_table[] = { + { + .compatible = "mobileye,eyeq5-i2c", + .data = (void *)(NMK_I2C_EYEQ_FLAG_32B_BUS | NMK_I2C_EYEQ_FLAG_IS_EYEQ5), + }, + { + .compatible = "mobileye,eyeq6h-i2c", + .data = (void *)NMK_I2C_EYEQ_FLAG_32B_BUS, + }, +}; + static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) { - int ret = 0; - struct nmk_i2c_dev *priv; + struct i2c_vendor_data *vendor = id->data; + u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1; struct device_node *np = adev->dev.of_node; + const struct of_device_id *match; struct device *dev = &adev->dev; + unsigned long match_flags = 0; + struct nmk_i2c_dev *priv; struct i2c_adapter *adap; - struct i2c_vendor_data *vendor = id->data; - u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1; + int ret = 0; + + /* + * We do not want to attach a .of_match_table to our amba driver. + * Do not convert to device_get_match_data(). + */ + match = of_match_device(nmk_i2c_eyeq_match_table, dev); + if (match) + match_flags = (unsigned long)match->data; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -1084,10 +1103,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) priv->vendor = vendor; priv->adev = adev; - priv->has_32b_bus = false; + priv->has_32b_bus = match_flags & NMK_I2C_EYEQ_FLAG_32B_BUS; nmk_i2c_of_probe(np, priv); - if (of_device_is_compatible(np, "mobileye,eyeq5-i2c")) { + if (match_flags & NMK_I2C_EYEQ_FLAG_IS_EYEQ5) { ret = nmk_i2c_eyeq5_probe(priv); if (ret) return dev_err_probe(dev, ret, "failed OLB lookup\n"); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index bbcb4d6668ce..482a0074d448 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -334,6 +334,7 @@ struct npcm_i2c { u64 nack_cnt; u64 timeout_cnt; u64 tx_complete_cnt; + bool ber_state; /* Indicate the bus error state */ }; static inline void npcm_i2c_select_bank(struct npcm_i2c *bus, @@ -1521,6 +1522,7 @@ static void npcm_i2c_irq_handle_ber(struct npcm_i2c *bus) if (npcm_i2c_is_master(bus)) { npcm_i2c_master_abort(bus); } else { + bus->ber_state = true; npcm_i2c_clear_master_status(bus); /* Clear BB (BUS BUSY) bit */ @@ -1628,13 +1630,10 @@ static void npcm_i2c_irq_handle_sda(struct npcm_i2c *bus, u8 i2cst) npcm_i2c_wr_byte(bus, bus->dest_addr | BIT(0)); /* SDA interrupt, after start\restart */ } else { - if (NPCM_I2CST_XMIT & i2cst) { - bus->operation = I2C_WRITE_OPER; + if (bus->operation == I2C_WRITE_OPER) npcm_i2c_irq_master_handler_write(bus); - } else { - bus->operation = I2C_READ_OPER; + else if (bus->operation == I2C_READ_OPER) npcm_i2c_irq_master_handler_read(bus); - } } } @@ -1702,6 +1701,7 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck", bus->num, bus->dest_addr); npcm_i2c_reset(bus); + bus->ber_state = false; return 0; } @@ -1766,6 +1766,7 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) if (bus->rec_succ_cnt < ULLONG_MAX) bus->rec_succ_cnt++; } + bus->ber_state = false; return status; } @@ -2161,7 +2162,16 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } while (time_is_after_jiffies(time_left) && bus_busy); - if (bus_busy) { + /* + * Check the BER (bus error) state, when ber_state is true, it means that the module + * detects the bus error which is caused by some factor like that the electricity + * noise occurs on the bus. Under this condition, the module is reset and the bus + * gets recovered. + * + * While ber_state is false, the module reset and bus recovery also get done as the + * bus is busy. + */ + if (bus_busy || bus->ber_state) { iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); npcm_i2c_reset(bus); i2c_recover_bus(adap); @@ -2363,7 +2373,7 @@ MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table); static struct platform_driver npcm_i2c_bus_driver = { .probe = npcm_i2c_probe_bus, - .remove_new = npcm_i2c_remove_bus, + .remove = npcm_i2c_remove_bus, .driver = { .name = "nuvoton-i2c", .of_match_table = npcm_i2c_bus_of_table, diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 482b37c8a129..0f67e57cdeff 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -769,7 +769,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm, static struct platform_driver ocores_i2c_driver = { .probe = ocores_i2c_probe, - .remove_new = ocores_i2c_remove, + .remove = ocores_i2c_remove, .driver = { .name = "ocores-i2c", .of_match_table = ocores_i2c_match, diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c index dc6dff95c68c..edfca7b20f29 100644 --- a/drivers/i2c/busses/i2c-octeon-platdrv.c +++ b/drivers/i2c/busses/i2c-octeon-platdrv.c @@ -269,7 +269,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match); static struct platform_driver octeon_i2c_driver = { .probe = octeon_i2c_probe, - .remove_new = octeon_i2c_remove, + .remove = octeon_i2c_remove, .driver = { .name = DRV_NAME, .of_match_table = octeon_i2c_match, diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1d9ad25c89ae..92faf03d64cf 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1605,7 +1605,7 @@ static const struct dev_pm_ops omap_i2c_pm_ops = { static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, - .remove_new = omap_i2c_remove, + .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", .pm = pm_ptr(&omap_i2c_pm_ops), diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c index d9dd71cf37fd..c9b62892397a 100644 --- a/drivers/i2c/busses/i2c-opal.c +++ b/drivers/i2c/busses/i2c-opal.c @@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, i2c_opal_of_match); static struct platform_driver i2c_opal_driver = { .probe = i2c_opal_probe, - .remove_new = i2c_opal_remove, + .remove = i2c_opal_remove, .driver = { .name = "i2c-opal", .of_match_table = i2c_opal_of_match, diff --git a/drivers/i2c/busses/i2c-pasemi-platform.c b/drivers/i2c/busses/i2c-pasemi-platform.c index 5fbfb9b41744..a486a37d3863 100644 --- a/drivers/i2c/busses/i2c-pasemi-platform.c +++ b/drivers/i2c/busses/i2c-pasemi-platform.c @@ -104,7 +104,7 @@ static struct platform_driver pasemi_platform_i2c_driver = { .of_match_table = pasemi_platform_i2c_of_match, }, .probe = pasemi_platform_i2c_probe, - .remove_new = pasemi_platform_i2c_remove, + .remove = pasemi_platform_i2c_remove, }; module_platform_driver(pasemi_platform_i2c_driver); diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index b8d5480c54f6..87da8241b927 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, i2c_pca_of_match_table); static struct platform_driver i2c_pca_pf_driver = { .probe = i2c_pca_pf_probe, - .remove_new = i2c_pca_pf_remove, + .remove = i2c_pca_pf_remove, .driver = { .name = "i2c-pca-platform", .of_match_table = of_match_ptr(i2c_pca_of_match_table), diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index febbd9950d8f..9402fa3811c5 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -35,20 +35,7 @@ #include <linux/acpi.h> #include <linux/io.h> - -/* PIIX4 SMBus address offsets */ -#define SMBHSTSTS (0 + piix4_smba) -#define SMBHSLVSTS (1 + piix4_smba) -#define SMBHSTCNT (2 + piix4_smba) -#define SMBHSTCMD (3 + piix4_smba) -#define SMBHSTADD (4 + piix4_smba) -#define SMBHSTDAT0 (5 + piix4_smba) -#define SMBHSTDAT1 (6 + piix4_smba) -#define SMBBLKDAT (7 + piix4_smba) -#define SMBSLVCNT (8 + piix4_smba) -#define SMBSHDWCMD (9 + piix4_smba) -#define SMBSLVEVT (0xA + piix4_smba) -#define SMBSLVDAT (0xC + piix4_smba) +#include "i2c-piix4.h" /* count for request_region */ #define SMBIOSIZE 9 @@ -70,7 +57,6 @@ #define PIIX4_BYTE 0x04 #define PIIX4_BYTE_DATA 0x08 #define PIIX4_WORD_DATA 0x0C -#define PIIX4_BLOCK_DATA 0x14 /* Multi-port constants */ #define PIIX4_MAX_ADAPTERS 4 @@ -101,6 +87,7 @@ #define SB800_PIIX4_FCH_PM_ADDR 0xFED80300 #define SB800_PIIX4_FCH_PM_SIZE 8 +#define SB800_ASF_ACPI_PATH "\\_SB.ASFC" /* insmod parameters */ @@ -160,11 +147,6 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { }; static const char *piix4_aux_port_name_sb800 = " port 1"; -struct sb800_mmio_cfg { - void __iomem *addr; - bool use_mmio; -}; - struct i2c_piix4_adapdata { unsigned short smba; @@ -175,8 +157,7 @@ struct i2c_piix4_adapdata { struct sb800_mmio_cfg mmio_cfg; }; -static int piix4_sb800_region_request(struct device *dev, - struct sb800_mmio_cfg *mmio_cfg) +int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { void __iomem *addr; @@ -214,9 +195,9 @@ static int piix4_sb800_region_request(struct device *dev, return 0; } +EXPORT_SYMBOL_NS_GPL(piix4_sb800_region_request, PIIX4_SMBUS); -static void piix4_sb800_region_release(struct device *dev, - struct sb800_mmio_cfg *mmio_cfg) +void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { iounmap(mmio_cfg->addr); @@ -227,6 +208,7 @@ static void piix4_sb800_region_release(struct device *dev, release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE); } +EXPORT_SYMBOL_NS_GPL(piix4_sb800_region_release, PIIX4_SMBUS); static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev) { @@ -536,10 +518,8 @@ static int piix4_setup_aux(struct pci_dev *PIIX4_dev, return piix4_smba; } -static int piix4_transaction(struct i2c_adapter *piix4_adapter) +int piix4_transaction(struct i2c_adapter *piix4_adapter, unsigned short piix4_smba) { - struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter); - unsigned short piix4_smba = adapdata->smba; int temp; int result = 0; int timeout = 0; @@ -611,6 +591,7 @@ static int piix4_transaction(struct i2c_adapter *piix4_adapter) inb_p(SMBHSTDAT1)); return result; } +EXPORT_SYMBOL_NS_GPL(piix4_transaction, PIIX4_SMBUS); /* Return negative errno on error. */ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, @@ -675,7 +656,7 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT); - status = piix4_transaction(adap); + status = piix4_transaction(adap, piix4_smba); if (status) return status; @@ -764,7 +745,7 @@ static void piix4_imc_wakeup(void) release_region(KERNCZ_IMC_IDX, 2); } -static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) +int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) { u8 smba_en_lo, val; @@ -786,6 +767,7 @@ static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) return (smba_en_lo & piix4_port_mask_sb800); } +EXPORT_SYMBOL_NS_GPL(piix4_sb800_port_sel, PIIX4_SMBUS); /* * Handles access to multiple SMBus ports on the SB800. @@ -1043,6 +1025,9 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; bool is_sb800 = false; + bool is_asf = false; + acpi_status status; + acpi_handle handle; if ((dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && @@ -1105,10 +1090,16 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) } } + status = acpi_get_handle(NULL, (acpi_string)SB800_ASF_ACPI_PATH, &handle); + if (ACPI_SUCCESS(status)) + is_asf = true; + if (dev->vendor == PCI_VENDOR_ID_AMD && (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { - retval = piix4_setup_sb800(dev, id, 1); + /* Do not setup AUX port if ASF is enabled */ + if (!is_asf) + retval = piix4_setup_sb800(dev, id, 1); } if (retval > 0) { diff --git a/drivers/i2c/busses/i2c-piix4.h b/drivers/i2c/busses/i2c-piix4.h new file mode 100644 index 000000000000..36bc6ce82a27 --- /dev/null +++ b/drivers/i2c/busses/i2c-piix4.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * PIIX4/SB800 SMBus Interfaces + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + * Sanket Goswami <Sanket.Goswami@amd.com> + */ + +#ifndef I2C_PIIX4_H +#define I2C_PIIX4_H + +#include <linux/types.h> + +/* PIIX4 SMBus address offsets */ +#define SMBHSTSTS (0x00 + piix4_smba) +#define SMBHSLVSTS (0x01 + piix4_smba) +#define SMBHSTCNT (0x02 + piix4_smba) +#define SMBHSTCMD (0x03 + piix4_smba) +#define SMBHSTADD (0x04 + piix4_smba) +#define SMBHSTDAT0 (0x05 + piix4_smba) +#define SMBHSTDAT1 (0x06 + piix4_smba) +#define SMBBLKDAT (0x07 + piix4_smba) +#define SMBSLVCNT (0x08 + piix4_smba) +#define SMBSHDWCMD (0x09 + piix4_smba) +#define SMBSLVEVT (0x0A + piix4_smba) +#define SMBSLVDAT (0x0C + piix4_smba) + +/* PIIX4 constants */ +#define PIIX4_BLOCK_DATA 0x14 + +struct sb800_mmio_cfg { + void __iomem *addr; + bool use_mmio; +}; + +int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg); +int piix4_transaction(struct i2c_adapter *piix4_adapter, unsigned short piix4_smba); +int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg); +void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg); + +#endif /* I2C_PIIX4_H */ diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 1dafadda73af..d4d139b97513 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -733,7 +733,7 @@ static struct platform_driver i2c_pnx_driver = { .pm = pm_sleep_ptr(&i2c_pnx_pm), }, .probe = i2c_pnx_probe, - .remove_new = i2c_pnx_remove, + .remove = i2c_pnx_remove, }; static int __init i2c_adap_pnx_init(void) diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index b6b03539f626..9a867c817db0 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -437,7 +437,7 @@ static int i2c_powermac_probe(struct platform_device *dev) static struct platform_driver i2c_powermac_driver = { .probe = i2c_powermac_probe, - .remove_new = i2c_powermac_remove, + .remove = i2c_powermac_remove, .driver = { .name = "i2c-powermac", .bus = &platform_bus_type, diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 4d76e71cdd4b..cb6988482673 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1574,7 +1574,7 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = { static struct platform_driver i2c_pxa_driver = { .probe = i2c_pxa_probe, - .remove_new = i2c_pxa_remove, + .remove = i2c_pxa_remove, .driver = { .name = "pxa2xx-i2c", .pm = pm_sleep_ptr(&i2c_pxa_dev_pm_ops), diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 414882c57d7f..05b73326afd4 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -120,7 +120,6 @@ struct cci_data { unsigned int num_masters; struct i2c_adapter_quirks quirks; u16 queue_size[NUM_QUEUES]; - unsigned long cci_clk_rate; struct hw_params params[3]; }; @@ -523,7 +522,6 @@ static const struct dev_pm_ops qcom_cci_pm = { static int cci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - unsigned long cci_clk_rate = 0; struct device_node *child; struct resource *r; struct cci *cci; @@ -594,22 +592,6 @@ static int cci_probe(struct platform_device *pdev) return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n"); cci->nclocks = ret; - /* Retrieve CCI clock rate */ - for (i = 0; i < cci->nclocks; i++) { - if (!strcmp(cci->clocks[i].id, "cci")) { - cci_clk_rate = clk_get_rate(cci->clocks[i].clk); - break; - } - } - - if (cci_clk_rate != cci->data->cci_clk_rate) { - /* cci clock set by the bootloader or via assigned clock rate - * in DT. - */ - dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n", - cci_clk_rate, cci->data->cci_clk_rate); - } - ret = cci_enable_clocks(cci); if (ret < 0) return ret; @@ -699,7 +681,6 @@ static const struct cci_data cci_v1_data = { .max_write_len = 10, .max_read_len = 12, }, - .cci_clk_rate = 19200000, .params[I2C_MODE_STANDARD] = { .thigh = 78, .tlow = 114, @@ -733,7 +714,6 @@ static const struct cci_data cci_v1_5_data = { .max_write_len = 10, .max_read_len = 12, }, - .cci_clk_rate = 19200000, .params[I2C_MODE_STANDARD] = { .thigh = 78, .tlow = 114, @@ -767,7 +747,6 @@ static const struct cci_data cci_v2_data = { .max_write_len = 11, .max_read_len = 12, }, - .cci_clk_rate = 37500000, .params[I2C_MODE_STANDARD] = { .thigh = 201, .tlow = 174, @@ -826,7 +805,7 @@ MODULE_DEVICE_TABLE(of, cci_dt_match); static struct platform_driver qcom_cci_driver = { .probe = cci_probe, - .remove_new = cci_remove, + .remove = cci_remove, .driver = { .name = "i2c-qcom-cci", .of_match_table = cci_dt_match, diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 212336f724a6..7a22e1f46e60 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -16,6 +16,7 @@ #include <linux/pm_runtime.h> #include <linux/soc/qcom/geni-se.h> #include <linux/spinlock.h> +#include <linux/units.h> #define SE_I2C_TX_TRANS_LEN 0x26c #define SE_I2C_RX_TRANS_LEN 0x270 @@ -146,22 +147,36 @@ struct geni_i2c_clk_fld { * clk_freq_out = t / t_cycle * source_clock = 19.2 MHz */ -static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = { +static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = { {KHZ(100), 7, 10, 11, 26}, {KHZ(400), 2, 5, 12, 24}, {KHZ(1000), 1, 3, 9, 18}, + {}, +}; + +/* source_clock = 32 MHz */ +static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = { + {KHZ(100), 8, 14, 18, 40}, + {KHZ(400), 4, 3, 11, 20}, + {KHZ(1000), 2, 3, 6, 15}, + {}, }; static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c) { - int i; - const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map; + const struct geni_i2c_clk_fld *itr; - for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) { + if (clk_get_rate(gi2c->se.clk) == 32 * HZ_PER_MHZ) + itr = geni_i2c_clk_map_32mhz; + else + itr = geni_i2c_clk_map_19p2mhz; + + while (itr->clk_freq_out != 0) { if (itr->clk_freq_out == gi2c->clk_freq_out) { gi2c->clk_fld = itr; return 0; } + itr++; } return -EINVAL; } @@ -818,6 +833,8 @@ static int geni_i2c_probe(struct platform_device *pdev) init_completion(&gi2c->done); spin_lock_init(&gi2c->lock); platform_set_drvdata(pdev, gi2c); + + /* Keep interrupts disabled initially to allow for low-power modes */ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN, dev_name(dev), gi2c); if (ret) { @@ -1049,7 +1066,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match); static struct platform_driver geni_i2c_driver = { .probe = geni_i2c_probe, - .remove_new = geni_i2c_remove, + .remove = geni_i2c_remove, .shutdown = geni_i2c_shutdown, .driver = { .name = "geni_i2c", diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index d480162a4d39..da20b4487c9a 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -17,9 +17,9 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/scatterlist.h> /* QUP Registers */ @@ -1683,7 +1683,7 @@ static int qup_i2c_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) { + if (device_is_compatible(&pdev->dev, "qcom,i2c-qup-v1.1.1")) { qup->adap.algo = &qup_i2c_algo; qup->adap.quirks = &qup_i2c_quirks; is_qup_v1 = true; @@ -1974,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, qup_i2c_dt_match); static struct platform_driver qup_i2c_driver = { .probe = qup_i2c_probe, - .remove_new = qup_i2c_remove, + .remove = qup_i2c_remove, .driver = { .name = "i2c_qup", .pm = pm_ptr(&qup_i2c_qup_pm_ops), diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 9267df38c2d0..a7b77d14ee86 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -1271,7 +1271,7 @@ static struct platform_driver rcar_i2c_driver = { .pm = pm_sleep_ptr(&rcar_i2c_pm_ops), }, .probe = rcar_i2c_probe, - .remove_new = rcar_i2c_remove, + .remove = rcar_i2c_remove, }; module_platform_driver(rcar_i2c_driver); diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index c7f3a4c02470..c218f73c3650 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -632,7 +632,7 @@ static const struct of_device_id riic_i2c_dt_ids[] = { static struct platform_driver riic_i2c_driver = { .probe = riic_i2c_probe, - .remove_new = riic_i2c_remove, + .remove = riic_i2c_remove, .driver = { .name = "i2c-riic", .of_match_table = riic_i2c_dt_ids, diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 4ef9bad77b85..d4e9196445c0 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -1398,7 +1398,7 @@ static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume); static struct platform_driver rk3x_i2c_driver = { .probe = rk3x_i2c_probe, - .remove_new = rk3x_i2c_remove, + .remove = rk3x_i2c_remove, .driver = { .name = "rk3x-i2c", .of_match_table = rk3x_i2c_match, diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c new file mode 100644 index 000000000000..e064e8a4a1f0 --- /dev/null +++ b/drivers/i2c/busses/i2c-rtl9300.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/bits.h> +#include <linux/i2c.h> +#include <linux/i2c-mux.h> +#include <linux/mod_devicetable.h> +#include <linux/mfd/syscon.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +enum rtl9300_bus_freq { + RTL9300_I2C_STD_FREQ, + RTL9300_I2C_FAST_FREQ, +}; + +struct rtl9300_i2c; + +struct rtl9300_i2c_chan { + struct i2c_adapter adap; + struct rtl9300_i2c *i2c; + enum rtl9300_bus_freq bus_freq; + u8 sda_pin; +}; + +#define RTL9300_I2C_MUX_NCHAN 8 + +struct rtl9300_i2c { + struct regmap *regmap; + struct device *dev; + struct rtl9300_i2c_chan chans[RTL9300_I2C_MUX_NCHAN]; + u32 reg_base; + u8 sda_pin; + struct mutex lock; +}; + +#define RTL9300_I2C_MST_CTRL1 0x0 +#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS 8 +#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK GENMASK(31, 8) +#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS 4 +#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK GENMASK(6, 4) +#define RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL BIT(3) +#define RTL9300_I2C_MST_CTRL1_RWOP BIT(2) +#define RTL9300_I2C_MST_CTRL1_I2C_FAIL BIT(1) +#define RTL9300_I2C_MST_CTRL1_I2C_TRIG BIT(0) +#define RTL9300_I2C_MST_CTRL2 0x4 +#define RTL9300_I2C_MST_CTRL2_RD_MODE BIT(15) +#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS 8 +#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK GENMASK(14, 8) +#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS 4 +#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK GENMASK(7, 4) +#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS 2 +#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK GENMASK(3, 2) +#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS 0 +#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK GENMASK(1, 0) +#define RTL9300_I2C_MST_DATA_WORD0 0x8 +#define RTL9300_I2C_MST_DATA_WORD1 0xc +#define RTL9300_I2C_MST_DATA_WORD2 0x10 +#define RTL9300_I2C_MST_DATA_WORD3 0x14 + +#define RTL9300_I2C_MST_GLB_CTRL 0x384 + +static int rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len) +{ + u32 val, mask; + int ret; + + val = len << RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS; + mask = RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK; + + ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val); + if (ret) + return ret; + + val = reg << RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS; + mask = RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK; + + return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); +} + +static int rtl9300_i2c_config_io(struct rtl9300_i2c *i2c, u8 sda_pin) +{ + int ret; + u32 val, mask; + + ret = regmap_update_bits(i2c->regmap, RTL9300_I2C_MST_GLB_CTRL, BIT(sda_pin), BIT(sda_pin)); + if (ret) + return ret; + + val = (sda_pin << RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS) | + RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL; + mask = RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK | RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL; + + return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); +} + +static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan, + u16 addr, u16 len) +{ + u32 val, mask; + + val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS; + mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK; + + val |= addr << RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS; + mask |= RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK; + + val |= ((len - 1) & 0xf) << RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS; + mask |= RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK; + + mask |= RTL9300_I2C_MST_CTRL2_RD_MODE; + + return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val); +} + +static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len) +{ + u32 vals[4] = {}; + int i, ret; + + if (len > 16) + return -EIO; + + ret = regmap_bulk_read(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, + vals, ARRAY_SIZE(vals)); + if (ret) + return ret; + + for (i = 0; i < len; i++) { + buf[i] = vals[i/4] & 0xff; + vals[i/4] >>= 8; + } + + return 0; +} + +static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len) +{ + u32 vals[4] = {}; + int i; + + if (len > 16) + return -EIO; + + for (i = 0; i < len; i++) { + if (i % 4 == 0) + vals[i/4] = 0; + vals[i/4] <<= 8; + vals[i/4] |= buf[i]; + } + + return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, + vals, ARRAY_SIZE(vals)); +} + +static int rtl9300_i2c_writel(struct rtl9300_i2c *i2c, u32 data) +{ + return regmap_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, data); +} + +static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write, + int size, union i2c_smbus_data *data, int len) +{ + u32 val, mask; + int ret; + + val = read_write == I2C_SMBUS_WRITE ? RTL9300_I2C_MST_CTRL1_RWOP : 0; + mask = RTL9300_I2C_MST_CTRL1_RWOP; + + val |= RTL9300_I2C_MST_CTRL1_I2C_TRIG; + mask |= RTL9300_I2C_MST_CTRL1_I2C_TRIG; + + ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, + val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 2000); + if (ret) + return ret; + + if (val & RTL9300_I2C_MST_CTRL1_I2C_FAIL) + return -EIO; + + if (read_write == I2C_SMBUS_READ) { + if (size == I2C_SMBUS_BYTE || size == I2C_SMBUS_BYTE_DATA) { + ret = regmap_read(i2c->regmap, + i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val); + if (ret) + return ret; + data->byte = val & 0xff; + } else if (size == I2C_SMBUS_WORD_DATA) { + ret = regmap_read(i2c->regmap, + i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val); + if (ret) + return ret; + data->word = val & 0xffff; + } else { + ret = rtl9300_i2c_read(i2c, &data->block[0], len); + if (ret) + return ret; + } + } + + return 0; +} + +static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, + char read_write, u8 command, int size, + union i2c_smbus_data *data) +{ + struct rtl9300_i2c_chan *chan = i2c_get_adapdata(adap); + struct rtl9300_i2c *i2c = chan->i2c; + int len = 0, ret; + + mutex_lock(&i2c->lock); + if (chan->sda_pin != i2c->sda_pin) { + ret = rtl9300_i2c_config_io(i2c, chan->sda_pin); + if (ret) + goto out_unlock; + i2c->sda_pin = chan->sda_pin; + } + + switch (size) { + case I2C_SMBUS_QUICK: + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0); + if (ret) + goto out_unlock; + break; + + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + } else { + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0); + if (ret) + goto out_unlock; + } + break; + + case I2C_SMBUS_BYTE_DATA: + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1); + if (ret) + goto out_unlock; + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_writel(i2c, data->byte); + if (ret) + goto out_unlock; + } + break; + + case I2C_SMBUS_WORD_DATA: + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 2); + if (ret) + goto out_unlock; + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_writel(i2c, data->word); + if (ret) + goto out_unlock; + } + break; + + case I2C_SMBUS_BLOCK_DATA: + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0]); + if (ret) + goto out_unlock; + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_write(i2c, &data->block[1], data->block[0]); + if (ret) + goto out_unlock; + } + len = data->block[0]; + break; + + default: + dev_err(&adap->dev, "Unsupported transaction %d\n", size); + ret = -EOPNOTSUPP; + goto out_unlock; + } + + ret = rtl9300_i2c_execute_xfer(i2c, read_write, size, data, len); + +out_unlock: + mutex_unlock(&i2c->lock); + + return ret; +} + +static u32 rtl9300_i2c_func(struct i2c_adapter *a) +{ + return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA; +} + +static const struct i2c_algorithm rtl9300_i2c_algo = { + .smbus_xfer = rtl9300_i2c_smbus_xfer, + .functionality = rtl9300_i2c_func, +}; + +static struct i2c_adapter_quirks rtl9300_i2c_quirks = { + .flags = I2C_AQ_NO_CLK_STRETCH, + .max_read_len = 16, + .max_write_len = 16, +}; + +static int rtl9300_i2c_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rtl9300_i2c *i2c; + u32 clock_freq, sda_pin; + int ret, i = 0; + struct fwnode_handle *child; + + i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + i2c->regmap = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(i2c->regmap)) + return PTR_ERR(i2c->regmap); + i2c->dev = dev; + + mutex_init(&i2c->lock); + + ret = device_property_read_u32(dev, "reg", &i2c->reg_base); + if (ret) + return ret; + + platform_set_drvdata(pdev, i2c); + + if (device_get_child_node_count(dev) >= RTL9300_I2C_MUX_NCHAN) + return dev_err_probe(dev, -EINVAL, "Too many channels\n"); + + device_for_each_child_node(dev, child) { + struct rtl9300_i2c_chan *chan = &i2c->chans[i]; + struct i2c_adapter *adap = &chan->adap; + + ret = fwnode_property_read_u32(child, "reg", &sda_pin); + if (ret) + return ret; + + ret = fwnode_property_read_u32(child, "clock-frequency", &clock_freq); + if (ret) + clock_freq = I2C_MAX_STANDARD_MODE_FREQ; + + switch (clock_freq) { + case I2C_MAX_STANDARD_MODE_FREQ: + chan->bus_freq = RTL9300_I2C_STD_FREQ; + break; + + case I2C_MAX_FAST_MODE_FREQ: + chan->bus_freq = RTL9300_I2C_FAST_FREQ; + break; + default: + dev_warn(i2c->dev, "SDA%d clock-frequency %d not supported using default\n", + sda_pin, clock_freq); + break; + } + + chan->sda_pin = sda_pin; + chan->i2c = i2c; + adap = &i2c->chans[i].adap; + adap->owner = THIS_MODULE; + adap->algo = &rtl9300_i2c_algo; + adap->quirks = &rtl9300_i2c_quirks; + adap->retries = 3; + adap->dev.parent = dev; + i2c_set_adapdata(adap, chan); + adap->dev.of_node = to_of_node(child); + snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_pin); + i++; + + ret = devm_i2c_add_adapter(dev, adap); + if (ret) + return ret; + } + i2c->sda_pin = 0xff; + + return 0; +} + +static const struct of_device_id i2c_rtl9300_dt_ids[] = { + { .compatible = "realtek,rtl9301-i2c" }, + { .compatible = "realtek,rtl9302b-i2c" }, + { .compatible = "realtek,rtl9302c-i2c" }, + { .compatible = "realtek,rtl9303-i2c" }, + {} +}; +MODULE_DEVICE_TABLE(of, i2c_rtl9300_dt_ids); + +static struct platform_driver rtl9300_i2c_driver = { + .probe = rtl9300_i2c_probe, + .driver = { + .name = "i2c-rtl9300", + .of_match_table = i2c_rtl9300_dt_ids, + }, +}; + +module_platform_driver(rtl9300_i2c_driver); + +MODULE_DESCRIPTION("RTL9300 I2C controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c index 8380a68538ab..02b76e24a476 100644 --- a/drivers/i2c/busses/i2c-rzv2m.c +++ b/drivers/i2c/busses/i2c-rzv2m.c @@ -536,7 +536,7 @@ static struct platform_driver rzv2m_i2c_driver = { .pm = pm_sleep_ptr(&rzv2m_i2c_pm_ops), }, .probe = rzv2m_i2c_probe, - .remove_new = rzv2m_i2c_remove, + .remove = rzv2m_i2c_remove, }; module_platform_driver(rzv2m_i2c_driver); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 7698d9d59744..0f3cf500df68 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1176,7 +1176,7 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { static struct platform_driver s3c24xx_i2c_driver = { .probe = s3c24xx_i2c_probe, - .remove_new = s3c24xx_i2c_remove, + .remove = s3c24xx_i2c_remove, .id_table = s3c24xx_driver_ids, .driver = { .name = "s3c-i2c", diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index d7af8e0d7599..10a5146b3aa5 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -411,7 +411,7 @@ static void smbus_cmi_remove(struct platform_device *device) static struct platform_driver smbus_cmi_driver = { .probe = smbus_cmi_probe, - .remove_new = smbus_cmi_remove, + .remove = smbus_cmi_remove, .driver = { .name = "smbus_cmi", .acpi_match_table = acpi_smbus_cmi_ids, diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c index 8a043f5fca1e..620f12596763 100644 --- a/drivers/i2c/busses/i2c-sh7760.c +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -552,7 +552,7 @@ static struct platform_driver sh7760_i2c_drv = { .name = SH7760_I2C_DEVNAME, }, .probe = sh7760_i2c_probe, - .remove_new = sh7760_i2c_remove, + .remove = sh7760_i2c_remove, }; module_platform_driver(sh7760_i2c_drv); diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index f86c29737df1..efe29621b8d7 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -983,7 +983,7 @@ static struct platform_driver sh_mobile_i2c_driver = { .pm = pm_sleep_ptr(&sh_mobile_i2c_pm_ops), }, .probe = sh_mobile_i2c_probe, - .remove_new = sh_mobile_i2c_remove, + .remove = sh_mobile_i2c_remove, }; static int __init sh_mobile_i2c_adap_init(void) diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index 18516bc64e04..d90606048611 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c @@ -144,7 +144,7 @@ static struct platform_driver simtec_i2c_driver = { .name = "simtec-i2c", }, .probe = simtec_i2c_probe, - .remove_new = simtec_i2c_remove, + .remove = simtec_i2c_remove, }; module_platform_driver(simtec_i2c_driver); diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 9c45e75b9187..56b2e5c5fb49 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -643,7 +643,7 @@ MODULE_DEVICE_TABLE(of, sprd_i2c_of_match); static struct platform_driver sprd_i2c_driver = { .probe = sprd_i2c_probe, - .remove_new = sprd_i2c_remove, + .remove = sprd_i2c_remove, .driver = { .name = "sprd-i2c", .of_match_table = sprd_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 05b19ede65a0..750fff3d2389 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -893,7 +893,7 @@ static struct platform_driver st_i2c_driver = { .pm = pm_sleep_ptr(&st_i2c_pm), }, .probe = st_i2c_probe, - .remove_new = st_i2c_remove, + .remove = st_i2c_remove, }; module_platform_driver(st_i2c_driver); diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index 230fff0c0bf9..b3d56d0aa9d0 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -869,7 +869,7 @@ static struct platform_driver stm32f4_i2c_driver = { .of_match_table = stm32f4_i2c_match, }, .probe = stm32f4_i2c_probe, - .remove_new = stm32f4_i2c_remove, + .remove = stm32f4_i2c_remove, }; module_platform_driver(stm32f4_i2c_driver); diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 0174ead99de6..973a3a8c6d4a 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -2532,7 +2532,7 @@ static struct platform_driver stm32f7_i2c_driver = { .pm = &stm32f7_i2c_pm_ops, }, .probe = stm32f7_i2c_probe, - .remove_new = stm32f7_i2c_remove, + .remove = stm32f7_i2c_remove, }; module_platform_driver(stm32f7_i2c_driver); diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index 074eade6c4a4..fb5280b8cf7f 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -319,7 +319,7 @@ static void p2wi_remove(struct platform_device *dev) static struct platform_driver p2wi_driver = { .probe = p2wi_probe, - .remove_new = p2wi_remove, + .remove = p2wi_remove, .driver = { .name = "i2c-sunxi-p2wi", .of_match_table = p2wi_of_match_table, diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index bbb9062669e4..31f8d08e32a4 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -629,7 +629,7 @@ MODULE_DEVICE_TABLE(acpi, synquacer_i2c_acpi_ids); static struct platform_driver synquacer_i2c_driver = { .probe = synquacer_i2c_probe, - .remove_new = synquacer_i2c_remove, + .remove = synquacer_i2c_remove, .driver = { .name = "synquacer_i2c", .of_match_table = of_match_ptr(synquacer_i2c_dt_ids), diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c index dabadbcc6d6a..bb0de6db6391 100644 --- a/drivers/i2c/busses/i2c-tegra-bpmp.c +++ b/drivers/i2c/busses/i2c-tegra-bpmp.c @@ -335,7 +335,7 @@ static struct platform_driver tegra_bpmp_i2c_driver = { .of_match_table = tegra_bpmp_i2c_of_match, }, .probe = tegra_bpmp_i2c_probe, - .remove_new = tegra_bpmp_i2c_remove, + .remove = tegra_bpmp_i2c_remove, }; module_platform_driver(tegra_bpmp_i2c_driver); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 1df5b4204142..87976e99e6d0 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1964,7 +1964,7 @@ MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match); static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, - .remove_new = tegra_i2c_remove, + .remove = tegra_i2c_remove, .driver = { .name = "tegra-i2c", .of_match_table = tegra_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 10a99cd08972..d877f5a1f579 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -615,7 +615,7 @@ MODULE_DEVICE_TABLE(of, uniphier_fi2c_match); static struct platform_driver uniphier_fi2c_drv = { .probe = uniphier_fi2c_probe, - .remove_new = uniphier_fi2c_remove, + .remove = uniphier_fi2c_remove, .driver = { .name = "uniphier-fi2c", .of_match_table = uniphier_fi2c_match, diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index ef5753307469..b95d50d4d7db 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, uniphier_i2c_match); static struct platform_driver uniphier_i2c_drv = { .probe = uniphier_i2c_probe, - .remove_new = uniphier_i2c_remove, + .remove = uniphier_i2c_remove, .driver = { .name = "uniphier-i2c", .of_match_table = uniphier_i2c_match, diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c index 76abfa77e200..a1ab6ef6f071 100644 --- a/drivers/i2c/busses/i2c-versatile.c +++ b/drivers/i2c/busses/i2c-versatile.c @@ -109,7 +109,7 @@ MODULE_DEVICE_TABLE(of, i2c_versatile_match); static struct platform_driver i2c_versatile_driver = { .probe = i2c_versatile_probe, - .remove_new = i2c_versatile_remove, + .remove = i2c_versatile_remove, .driver = { .name = "versatile-i2c", .of_match_table = i2c_versatile_match, diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c index 3415683dab91..4eb740faf268 100644 --- a/drivers/i2c/busses/i2c-viai2c-wmt.c +++ b/drivers/i2c/busses/i2c-viai2c-wmt.c @@ -169,7 +169,7 @@ static const struct of_device_id wmt_i2c_dt_ids[] = { static struct platform_driver wmt_i2c_driver = { .probe = wmt_i2c_probe, - .remove_new = wmt_i2c_remove, + .remove = wmt_i2c_remove, .driver = { .name = "wmt-i2c", .of_match_table = wmt_i2c_dt_ids, diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index 2ed4130c0339..503e2f4d6f84 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c @@ -415,7 +415,7 @@ static void vprbrd_i2c_remove(struct platform_device *pdev) static struct platform_driver vprbrd_i2c_driver = { .driver.name = "viperboard-i2c", .probe = vprbrd_i2c_probe, - .remove_new = vprbrd_i2c_remove, + .remove = vprbrd_i2c_remove, }; static int __init vprbrd_i2c_init(void) diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 658396c9eeab..663fe5604dd6 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -581,7 +581,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_slimpro_i2c_acpi_ids); static struct platform_driver xgene_slimpro_i2c_driver = { .probe = xgene_slimpro_i2c_probe, - .remove_new = xgene_slimpro_i2c_remove, + .remove = xgene_slimpro_i2c_remove, .driver = { .name = "xgene-slimpro-i2c", .of_match_table = of_match_ptr(xgene_slimpro_i2c_dt_ids), diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 1d68177241a6..c4d3eb02da09 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -1395,7 +1395,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = { static struct platform_driver xiic_i2c_driver = { .probe = xiic_i2c_probe, - .remove_new = xiic_i2c_remove, + .remove = xiic_i2c_remove, .driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(xiic_of_match), diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 08a59a920929..4d5e49b6321b 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -579,7 +579,7 @@ MODULE_DEVICE_TABLE(acpi, xlp9xx_i2c_acpi_ids); static struct platform_driver xlp9xx_i2c_driver = { .probe = xlp9xx_i2c_probe, - .remove_new = xlp9xx_i2c_remove, + .remove = xlp9xx_i2c_remove, .driver = { .name = "xlp9xx-i2c", .acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids), diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 3648382b885a..4d6abd7e92ce 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -536,7 +536,7 @@ static struct platform_driver scx200_pci_driver = { .name = "cs5535-smb", }, .probe = scx200_probe, - .remove_new = scx200_remove, + .remove = scx200_remove, }; static const struct pci_device_id scx200_isa[] = { diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index e3b96fc53b5c..0a6bdc6df172 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -712,12 +712,15 @@ int i2c_setup_smbus_alert(struct i2c_adapter *adapter) if (!parent) return 0; + /* Report serious errors */ irq = device_property_match_string(parent, "interrupt-names", "smbus_alert"); - if (irq == -EINVAL || irq == -ENODATA) - return 0; - else if (irq < 0) + if (irq < 0 && irq != -EINVAL && irq != -ENODATA) return irq; + /* Skip setup when no irq was found */ + if (irq < 0 && !device_property_present(parent, "smbalert-gpios")) + return 0; + return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL)); } #endif diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 61f7c4003d2f..e9577f920286 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -251,10 +251,8 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, return -EOPNOTSUPP; data_ptrs = kmalloc_array(nmsgs, sizeof(u8 __user *), GFP_KERNEL); - if (data_ptrs == NULL) { - kfree(msgs); + if (!data_ptrs) return -ENOMEM; - } res = 0; for (i = 0; i < nmsgs; i++) { @@ -302,7 +300,6 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, for (j = 0; j < i; ++j) kfree(msgs[j].buf); kfree(data_ptrs); - kfree(msgs); return res; } @@ -316,7 +313,6 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, kfree(msgs[i].buf); } kfree(data_ptrs); - kfree(msgs); return res; } @@ -446,6 +442,7 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case I2C_RDWR: { struct i2c_rdwr_ioctl_data rdwr_arg; struct i2c_msg *rdwr_pa; + int res; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data __user *)arg, @@ -467,7 +464,9 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (IS_ERR(rdwr_pa)) return PTR_ERR(rdwr_pa); - return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); + res = i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); + kfree(rdwr_pa); + return res; } case I2C_SMBUS: { @@ -540,7 +539,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo struct i2c_rdwr_ioctl_data32 rdwr_arg; struct i2c_msg32 __user *p; struct i2c_msg *rdwr_pa; - int i; + int i, res; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data32 __user *)arg, @@ -573,7 +572,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo }; } - return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); + res = i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); + kfree(rdwr_pa); + return res; } case I2C_SMBUS: { struct i2c_smbus_ioctl_data32 data32; diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c index 9fe3150378e8..0d6fbaa48248 100644 --- a/drivers/i2c/i2c-slave-testunit.c +++ b/drivers/i2c/i2c-slave-testunit.c @@ -183,6 +183,10 @@ static void i2c_slave_testunit_work(struct work_struct *work) break; case TU_CMD_SMBUS_ALERT_REQUEST: + if (!tu->gpio) { + ret = -ENOENT; + break; + } i2c_slave_unregister(tu->client); orig_addr = tu->client->addr; tu->client->addr = 0x0c; @@ -232,6 +236,9 @@ static int i2c_slave_testunit_probe(struct i2c_client *client) INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work); tu->gpio = devm_gpiod_get_index_optional(&client->dev, NULL, 0, GPIOD_OUT_LOW); + if (IS_ERR(tu->gpio)) + return PTR_ERR(tu->gpio); + if (gpiod_cansleep(tu->gpio)) { dev_err(&client->dev, "GPIO access which may sleep is not allowed\n"); return -EDEADLK; diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 8256f7aed0cf..7d40e7aa3799 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -8,6 +8,7 @@ #include <linux/device.h> #include <linux/dmi.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/interrupt.h> @@ -167,6 +168,8 @@ static int smbalert_probe(struct i2c_client *ara) struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev); struct i2c_smbus_alert *alert; struct i2c_adapter *adapter = ara->adapter; + unsigned long irqflags = IRQF_SHARED | IRQF_ONESHOT; + struct gpio_desc *gpiod; int res, irq; alert = devm_kzalloc(&ara->dev, sizeof(struct i2c_smbus_alert), @@ -179,18 +182,25 @@ static int smbalert_probe(struct i2c_client *ara) } else { irq = fwnode_irq_get_byname(dev_fwnode(adapter->dev.parent), "smbus_alert"); - if (irq <= 0) - return irq; + if (irq <= 0) { + gpiod = devm_gpiod_get(adapter->dev.parent, "smbalert", GPIOD_IN); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + irq = gpiod_to_irq(gpiod); + if (irq <= 0) + return irq; + + irqflags |= IRQF_TRIGGER_FALLING; + } } INIT_WORK(&alert->alert, smbalert_work); alert->ara = ara; if (irq > 0) { - res = devm_request_threaded_irq(&ara->dev, irq, - NULL, smbus_alert, - IRQF_SHARED | IRQF_ONESHOT, - "smbus_alert", alert); + res = devm_request_threaded_irq(&ara->dev, irq, NULL, smbus_alert, + irqflags, "smbus_alert", alert); if (res) return res; } diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c index 7aa6e795d833..d6ef91b888c6 100644 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c @@ -190,7 +190,7 @@ MODULE_DEVICE_TABLE(of, i2c_arbitrator_of_match); static struct platform_driver i2c_arbitrator_driver = { .probe = i2c_arbitrator_probe, - .remove_new = i2c_arbitrator_remove, + .remove = i2c_arbitrator_remove, .driver = { .name = "i2c-arb-gpio-challenge", .of_match_table = i2c_arbitrator_of_match, diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 7e2686b606c0..dce18f763a09 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -314,7 +314,7 @@ static struct platform_driver i2c_demux_pinctrl_driver = { .of_match_table = i2c_demux_pinctrl_of_match, }, .probe = i2c_demux_pinctrl_probe, - .remove_new = i2c_demux_pinctrl_remove, + .remove = i2c_demux_pinctrl_remove, }; module_platform_driver(i2c_demux_pinctrl_driver); diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index 944577bb09c1..9b46b84e84fb 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -247,7 +247,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match); static struct platform_driver i2c_mux_gpio_driver = { .probe = i2c_mux_gpio_probe, - .remove_new = i2c_mux_gpio_remove, + .remove = i2c_mux_gpio_remove, .driver = { .name = "i2c-mux-gpio", .of_match_table = i2c_mux_gpio_of_match, diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c index 10d63307b14d..ab8e11661052 100644 --- a/drivers/i2c/muxes/i2c-mux-gpmux.c +++ b/drivers/i2c/muxes/i2c-mux-gpmux.c @@ -152,7 +152,7 @@ static void i2c_mux_remove(struct platform_device *pdev) static struct platform_driver i2c_mux_driver = { .probe = i2c_mux_probe, - .remove_new = i2c_mux_remove, + .remove = i2c_mux_remove, .driver = { .name = "i2c-mux-gpmux", .of_match_table = i2c_mux_of_match, diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c index 3f06aa3331a7..1c2debcf379c 100644 --- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c +++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c @@ -182,7 +182,7 @@ static struct platform_driver mlxcpld_mux_driver = { .name = "i2c-mux-mlxcpld", }, .probe = mlxcpld_mux_probe, - .remove_new = mlxcpld_mux_remove, + .remove = mlxcpld_mux_remove, }; module_platform_driver(mlxcpld_mux_driver); diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 02aaf0781e9c..fc686a350ae8 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -186,7 +186,7 @@ static struct platform_driver i2c_mux_pinctrl_driver = { .of_match_table = i2c_mux_pinctrl_of_match, }, .probe = i2c_mux_pinctrl_probe, - .remove_new = i2c_mux_pinctrl_remove, + .remove = i2c_mux_pinctrl_remove, }; module_platform_driver(i2c_mux_pinctrl_driver); diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index ef765fcd33f5..dfa472d514cc 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c @@ -247,7 +247,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_reg_of_match); static struct platform_driver i2c_mux_reg_driver = { .probe = i2c_mux_reg_probe, - .remove_new = i2c_mux_reg_remove, + .remove = i2c_mux_reg_remove, .driver = { .name = "i2c-mux-reg", .of_match_table = of_match_ptr(i2c_mux_reg_of_match), diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 6f3eb710a75d..42310c9a00c2 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -282,7 +282,8 @@ static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *e struct i3c_device_info devinfo; u16 manuf, part, ext; - i3c_device_get_info(i3cdev, &devinfo); + if (i3cdev->desc) + devinfo = i3cdev->desc->info; manuf = I3C_PID_MANUF_ID(devinfo.pid); part = I3C_PID_PART_ID(devinfo.pid); ext = I3C_PID_EXTRA_INFO(devinfo.pid); @@ -345,10 +346,10 @@ const struct bus_type i3c_bus_type = { EXPORT_SYMBOL_GPL(i3c_bus_type); static enum i3c_addr_slot_status -i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) +i3c_bus_get_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, u32 mask) { unsigned long status; - int bitpos = addr * 2; + int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS; if (addr > I2C_MAX_ADDR) return I3C_ADDR_SLOT_RSVD; @@ -356,22 +357,33 @@ i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) status = bus->addrslots[bitpos / BITS_PER_LONG]; status >>= bitpos % BITS_PER_LONG; - return status & I3C_ADDR_SLOT_STATUS_MASK; + return status & mask; } -static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr, - enum i3c_addr_slot_status status) +static enum i3c_addr_slot_status +i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) +{ + return i3c_bus_get_addr_slot_status_mask(bus, addr, I3C_ADDR_SLOT_STATUS_MASK); +} + +static void i3c_bus_set_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, + enum i3c_addr_slot_status status, u32 mask) { - int bitpos = addr * 2; + int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS; unsigned long *ptr; if (addr > I2C_MAX_ADDR) return; ptr = bus->addrslots + (bitpos / BITS_PER_LONG); - *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK << - (bitpos % BITS_PER_LONG)); - *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG); + *ptr &= ~((unsigned long)mask << (bitpos % BITS_PER_LONG)); + *ptr |= ((unsigned long)status & mask) << (bitpos % BITS_PER_LONG); +} + +static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr, + enum i3c_addr_slot_status status) +{ + i3c_bus_set_addr_slot_status_mask(bus, addr, status, I3C_ADDR_SLOT_STATUS_MASK); } static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr) @@ -383,13 +395,44 @@ static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr) return status == I3C_ADDR_SLOT_FREE; } +/* + * ┌────┬─────────────┬───┬─────────┬───┐ + * │S/Sr│ 7'h7E RnW=0 │ACK│ ENTDAA │ T ├────┐ + * └────┴─────────────┴───┴─────────┴───┘ │ + * ┌─────────────────────────────────────────┘ + * │ ┌──┬─────────────┬───┬─────────────────┬────────────────┬───┬─────────┐ + * └─►│Sr│7'h7E RnW=1 │ACK│48bit UID BCR DCR│Assign 7bit Addr│PAR│ ACK/NACK│ + * └──┴─────────────┴───┴─────────────────┴────────────────┴───┴─────────┘ + * Some master controllers (such as HCI) need to prepare the entire above transaction before + * sending it out to the I3C bus. This means that a 7-bit dynamic address needs to be allocated + * before knowing the target device's UID information. + * + * However, some I3C targets may request specific addresses (called as "init_dyn_addr"), which is + * typically specified by the DT-'s assigned-address property. Lower addresses having higher IBI + * priority. If it is available, i3c_bus_get_free_addr() preferably return a free address that is + * not in the list of desired addresses (called as "init_dyn_addr"). This allows the device with + * the "init_dyn_addr" to switch to its "init_dyn_addr" when it hot-joins the I3C bus. Otherwise, + * if the "init_dyn_addr" is already in use by another I3C device, the target device will not be + * able to switch to its desired address. + * + * If the previous step fails, fallback returning one of the remaining unassigned address, + * regardless of its state in the desired list. + */ static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr) { enum i3c_addr_slot_status status; u8 addr; for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) { - status = i3c_bus_get_addr_slot_status(bus, addr); + status = i3c_bus_get_addr_slot_status_mask(bus, addr, + I3C_ADDR_SLOT_EXT_STATUS_MASK); + if (status == I3C_ADDR_SLOT_FREE) + return addr; + } + + for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) { + status = i3c_bus_get_addr_slot_status_mask(bus, addr, + I3C_ADDR_SLOT_STATUS_MASK); if (status == I3C_ADDR_SLOT_FREE) return addr; } @@ -1417,7 +1460,7 @@ static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev) I3C_ADDR_SLOT_FREE); if (dev->boardinfo && dev->boardinfo->init_dyn_addr) - i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr, + i3c_bus_set_addr_slot_status(&master->bus, dev->boardinfo->init_dyn_addr, I3C_ADDR_SLOT_FREE); } @@ -1506,16 +1549,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr) { struct i3c_master_controller *master = i3c_dev_get_master(dev); - enum i3c_addr_slot_status status; int ret; - if (dev->info.dyn_addr != old_dyn_addr && - (!dev->boardinfo || - dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) { - status = i3c_bus_get_addr_slot_status(&master->bus, - dev->info.dyn_addr); - if (status != I3C_ADDR_SLOT_FREE) - return -EBUSY; + if (dev->info.dyn_addr != old_dyn_addr) { i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr, I3C_ADDR_SLOT_I3C_DEV); @@ -1918,9 +1954,11 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) goto err_rstdaa; } - i3c_bus_set_addr_slot_status(&master->bus, - i3cboardinfo->init_dyn_addr, - I3C_ADDR_SLOT_I3C_DEV); + /* Do not mark as occupied until real device exist in bus */ + i3c_bus_set_addr_slot_status_mask(&master->bus, + i3cboardinfo->init_dyn_addr, + I3C_ADDR_SLOT_EXT_DESIRED, + I3C_ADDR_SLOT_EXT_STATUS_MASK); /* * Only try to create/attach devices that have a static @@ -2051,11 +2089,16 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, ibireq.max_payload_len = olddev->ibi->max_payload_len; ibireq.num_slots = olddev->ibi->num_slots; - if (olddev->ibi->enabled) { + if (olddev->ibi->enabled) enable_ibi = true; - i3c_dev_disable_ibi_locked(olddev); - } - + /* + * The olddev should not receive any commands on the + * i3c bus as it does not exist and has been assigned + * a new address. This will result in NACK or timeout. + * So, update the olddev->ibi->enabled flag to false + * to avoid DISEC with OldAddr. + */ + olddev->ibi->enabled = false; i3c_dev_free_ibi_locked(olddev); } mutex_unlock(&olddev->ibi_lock); @@ -2083,7 +2126,8 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, else expected_dyn_addr = newdev->info.dyn_addr; - if (newdev->info.dyn_addr != expected_dyn_addr) { + if (newdev->info.dyn_addr != expected_dyn_addr && + i3c_bus_get_addr_slot_status(&master->bus, expected_dyn_addr) == I3C_ADDR_SLOT_FREE) { /* * Try to apply the expected dynamic address. If it fails, keep * the address assigned by the master. diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 8d694672c110..5b5c2e4bdc51 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -220,6 +220,14 @@ #define XFER_TIMEOUT (msecs_to_jiffies(1000)) #define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */ + +/* Timing values to configure 12.5MHz frequency */ +#define AMD_I3C_OD_TIMING 0x4C007C +#define AMD_I3C_PP_TIMING 0x8001A + +/* List of quirks */ +#define AMD_I3C_OD_PP_TIMING BIT(1) + struct dw_i3c_cmd { u32 cmd_lo; u32 cmd_hi; @@ -794,6 +802,12 @@ static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc) return ret; } +static void amd_configure_od_pp_quirk(struct dw_i3c_master *master) +{ + master->i3c_od_timing = AMD_I3C_OD_TIMING; + master->i3c_pp_timing = AMD_I3C_PP_TIMING; +} + static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, struct i3c_ccc_cmd *ccc) { @@ -803,6 +817,13 @@ static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, if (ccc->id == I3C_CCC_ENTDAA) return -EINVAL; + /* AMD platform specific OD and PP timings */ + if (master->quirks & AMD_I3C_OD_PP_TIMING) { + amd_configure_od_pp_quirk(master); + writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); + writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); + } + ret = pm_runtime_resume_and_get(master->dev); if (ret < 0) { dev_err(master->dev, @@ -1602,6 +1623,8 @@ int dw_i3c_common_probe(struct dw_i3c_master *master, master->maxdevs = ret >> 16; master->free_pos = GENMASK(master->maxdevs - 1, 0); + master->quirks = (unsigned long)device_get_match_data(&pdev->dev); + INIT_WORK(&master->hj_work, dw_i3c_hj_work); ret = i3c_master_register(&master->base, &pdev->dev, &dw_mipi_i3c_ops, false); @@ -1675,6 +1698,10 @@ static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master) static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master) { + /* AMD platform specific OD and PP timings */ + if (master->quirks & AMD_I3C_OD_PP_TIMING) + amd_configure_od_pp_quirk(master); + writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING); writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); @@ -1748,12 +1775,19 @@ static const struct of_device_id dw_i3c_master_of_match[] = { }; MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match); +static const struct acpi_device_id amd_i3c_device_match[] = { + { "AMDI0015", AMD_I3C_OD_PP_TIMING }, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match); + static struct platform_driver dw_i3c_driver = { .probe = dw_i3c_probe, .remove_new = dw_i3c_remove, .driver = { .name = "dw-i3c-master", .of_match_table = dw_i3c_master_of_match, + .acpi_match_table = amd_i3c_device_match, .pm = &dw_i3c_pm_ops, }, }; diff --git a/drivers/i3c/master/dw-i3c-master.h b/drivers/i3c/master/dw-i3c-master.h index 219ff815d3a7..c5cb695c16ab 100644 --- a/drivers/i3c/master/dw-i3c-master.h +++ b/drivers/i3c/master/dw-i3c-master.h @@ -50,6 +50,7 @@ struct dw_i3c_master { u32 bus_free_timing; u32 i2c_fm_timing; u32 i2c_fmp_timing; + u32 quirks; /* * Per-device hardware data, used to manage the device address table * (DAT) diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index a82c47c9986d..e6e482a259b4 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -80,8 +80,6 @@ #define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */ #define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */ #define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */ -#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */ -#define INTR_HC_RINGS GENMASK(7, 0) #define DAT_SECTION 0x30 /* Device Address Table */ #define DAT_ENTRY_SIZE GENMASK(31, 28) @@ -438,7 +436,8 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev) kfree(dev_data); return ret; } - mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr); + mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, + dev->info.dyn_addr ?: dev->info.static_addr); dev_data->dat_idx = ret; } i3c_dev_set_master_data(dev, dev_data); @@ -597,9 +596,6 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) if (val) { reg_write(INTR_STATUS, val); - } else { - /* v1.0 does not have PIO cascaded notification bits */ - val |= INTR_HC_PIO; } if (val & INTR_HC_RESET_CANCEL) { @@ -610,14 +606,9 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) dev_err(&hci->master.dev, "Host Controller Internal Error\n"); val &= ~INTR_HC_INTERNAL_ERR; } - if (val & INTR_HC_PIO) { - hci->io->irq_handler(hci, 0); - val &= ~INTR_HC_PIO; - } - if (val & INTR_HC_RINGS) { - hci->io->irq_handler(hci, val & INTR_HC_RINGS); - val &= ~INTR_HC_RINGS; - } + + hci->io->irq_handler(hci); + if (val) dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val); else diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c index a918e96b21fd..e8e56a8d2057 100644 --- a/drivers/i3c/master/mipi-i3c-hci/dma.c +++ b/drivers/i3c/master/mipi-i3c-hci/dma.c @@ -159,10 +159,10 @@ static void hci_dma_cleanup(struct i3c_hci *hci) for (i = 0; i < rings->total; i++) { rh = &rings->headers[i]; + rh_reg_write(INTR_SIGNAL_ENABLE, 0); rh_reg_write(RING_CONTROL, 0); rh_reg_write(CR_SETUP, 0); rh_reg_write(IBI_SETUP, 0); - rh_reg_write(INTR_SIGNAL_ENABLE, 0); if (rh->xfer) dma_free_coherent(&hci->master.dev, @@ -733,20 +733,16 @@ done: rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks); } -static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask) +static bool hci_dma_irq_handler(struct i3c_hci *hci) { struct hci_rings_data *rings = hci->io_data; unsigned int i; bool handled = false; - for (i = 0; mask && i < rings->total; i++) { + for (i = 0; i < rings->total; i++) { struct hci_rh_data *rh; u32 status; - if (!(mask & BIT(i))) - continue; - mask &= ~BIT(i); - rh = &rings->headers[i]; status = rh_reg_read(INTR_STATUS); DBG("rh%d status: %#x", i, status); diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h index aaa47ac47381..69ea1d10414b 100644 --- a/drivers/i3c/master/mipi-i3c-hci/hci.h +++ b/drivers/i3c/master/mipi-i3c-hci/hci.h @@ -115,7 +115,7 @@ static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n) /* This abstracts PIO vs DMA operations */ struct hci_io_ops { - bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask); + bool (*irq_handler)(struct i3c_hci *hci); int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev, diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c index d0272aa93599..2fc71e696911 100644 --- a/drivers/i3c/master/mipi-i3c-hci/pio.c +++ b/drivers/i3c/master/mipi-i3c-hci/pio.c @@ -979,7 +979,7 @@ static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci, i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot); } -static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused) +static bool hci_pio_irq_handler(struct i3c_hci *hci) { struct hci_pio_data *pio = hci->io_data; u32 status; diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index a7bfc678153e..c1ee3828e7ee 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -130,8 +130,8 @@ #define SVC_I3C_PPBAUD_MAX 15 #define SVC_I3C_QUICK_I2C_CLK 4170000 -#define SVC_I3C_EVENT_IBI BIT(0) -#define SVC_I3C_EVENT_HOTJOIN BIT(1) +#define SVC_I3C_EVENT_IBI GENMASK(7, 0) +#define SVC_I3C_EVENT_HOTJOIN BIT(31) struct svc_i3c_cmd { u8 addr; @@ -214,7 +214,7 @@ struct svc_i3c_master { spinlock_t lock; } ibi; struct mutex lock; - int enabled_events; + u32 enabled_events; u32 mctrl_config; }; @@ -388,10 +388,11 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, return 0; } -static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master, +static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master, bool mandatory_byte) { unsigned int ibi_ack_nack; + u32 reg; ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK; if (mandatory_byte) @@ -400,13 +401,43 @@ static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master, ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE; writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL); + + return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg, + SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000); + } -static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master) +static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master) { + int ret; + u32 reg; + writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK | SVC_I3C_MCTRL_IBIRESP_NACK, master->regs + SVC_I3C_MCTRL); + + ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg, + SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000); + return ret; +} + +static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus) +{ + u32 ibitype; + int ret = 0; + + ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus); + + writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); + + /* Hardware can't auto emit NACK for hot join and master request */ + switch (ibitype) { + case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: + case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: + ret = svc_i3c_master_nack_ibi(master); + } + + return ret; } static void svc_i3c_master_ibi_work(struct work_struct *work) @@ -418,7 +449,16 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) u32 status, val; int ret; - mutex_lock(&master->lock); + /* + * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5: + * + * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C + * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent + * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if + * any irq or schedule happen during transaction. + */ + guard(spinlock_irqsave)(&master->xferqueue.lock); + /* * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing * readl_relaxed_poll_timeout() to return immediately. Consequently, @@ -438,8 +478,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) master->regs + SVC_I3C_MCTRL); /* Wait for IBIWON, should take approximately 100us */ - ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, - SVC_I3C_MSTATUS_IBIWON(val), 0, 1000); + ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val, + SVC_I3C_MSTATUS_IBIWON(val), 0, 100); if (ret) { dev_err(master->dev, "Timeout when polling for IBIWON\n"); svc_i3c_master_emit_stop(master); @@ -511,7 +551,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) reenable_ibis: svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); - mutex_unlock(&master->lock); } static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) @@ -854,6 +893,9 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, int ret, i; while (true) { + /* clean SVC_I3C_MINT_IBIWON w1c bits */ + writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); + /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA. * * ENTER DAA: @@ -905,6 +947,11 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, ret = svc_i3c_master_readb(master, data, 2); if (ret) break; + } else if (SVC_I3C_MSTATUS_IBIWON(reg)) { + ret = svc_i3c_master_handle_ibi_won(master, reg); + if (ret) + break; + continue; } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) { if (SVC_I3C_MSTATUS_STATE_IDLE(reg) && SVC_I3C_MSTATUS_COMPLETE(reg)) { @@ -1056,12 +1103,27 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m) if (ret) goto rpm_out; - /* Register all devices who participated to the core */ - for (i = 0; i < dev_nb; i++) { - ret = i3c_master_add_i3c_dev_locked(m, addrs[i]); - if (ret) - goto rpm_out; - } + /* + * Register all devices who participated to the core + * + * If two devices (A and B) are detected in DAA and address 0xa is assigned to + * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked() + * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being + * registered on the bus. The I3C stack might still consider 0xb a free + * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A, + * causing both devices A and B to use the same address 0xb, violating the I3C + * specification. + * + * The return value for i3c_master_add_i3c_dev_locked() should not be checked + * because subsequent steps will scan the entire I3C bus, independent of + * whether i3c_master_add_i3c_dev_locked() returns success. + * + * If device A registration fails, there is still a chance to register device + * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while + * retrieving device information. + */ + for (i = 0; i < dev_nb; i++) + i3c_master_add_i3c_dev_locked(m, addrs[i]); /* Configure IBI auto-rules */ ret = svc_i3c_update_ibirules(master); @@ -1163,6 +1225,26 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, if (ret) goto emit_stop; + /* + * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a + * Frame with I3C Target Address. + * + * The I3C Controller normally should start a Frame, the Address may be arbitrated, + * and so the Controller shall monitor to see whether an In-Band Interrupt request, + * a Controller Role Request (i.e., Secondary Controller requests to become the + * Active Controller), or a Hot-Join Request has been made. + * + * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue + * repeat start. Address arbitrate only happen at START, never happen at REPEAT + * start. + */ + if (SVC_I3C_MSTATUS_IBIWON(reg)) { + ret = svc_i3c_master_handle_ibi_won(master, reg); + if (ret) + goto emit_stop; + continue; + } + if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { /* * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3. @@ -1196,24 +1278,6 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, } } - /* - * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame - * with I3C Target Address. - * - * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so - * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller - * Role Request (i.e., Secondary Controller requests to become the Active Controller), or - * a Hot-Join Request has been made. - * - * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure - * and yield the above events handler. - */ - if (SVC_I3C_MSTATUS_IBIWON(reg)) { - ret = -EAGAIN; - *actual_len = 0; - goto emit_stop; - } - if (rnw) ret = svc_i3c_master_read(master, in, xfer_len); else @@ -1624,7 +1688,7 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) return ret; } - master->enabled_events |= SVC_I3C_EVENT_IBI; + master->enabled_events++; svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); @@ -1636,7 +1700,7 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) struct svc_i3c_master *master = to_svc_i3c_master(m); int ret; - master->enabled_events &= ~SVC_I3C_EVENT_IBI; + master->enabled_events--; if (!master->enabled_events) svc_i3c_master_disable_interrupts(master); @@ -1827,8 +1891,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev) rpm_disable: pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); err_disable_clks: svc_i3c_master_unprepare_clks(master); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 07fb8d3c037f..142170473e75 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -35,6 +35,8 @@ MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); #define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */ +#define CM_DIRECT_RETRY_CTX ((void *) 1UL) + static const char * const ibcm_rej_reason_strs[] = { [IB_CM_REJ_NO_QP] = "no QP", [IB_CM_REJ_NO_EEC] = "no EEC", @@ -93,8 +95,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work); static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param); -static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, - const void *private_data, u8 private_data_len); +static void cm_issue_dreq(struct cm_id_private *cm_id_priv); static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len); static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, @@ -307,12 +308,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) goto out; } - /* Timeout set by caller if response is expected. */ m->ah = ah; - m->retries = cm_id_priv->max_cm_retries; - - refcount_inc(&cm_id_priv->refcount); - m->context[0] = cm_id_priv; out: spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); @@ -321,16 +317,13 @@ out: static void cm_free_msg(struct ib_mad_send_buf *msg) { - struct cm_id_private *cm_id_priv = msg->context[0]; - if (msg->ah) rdma_destroy_ah(msg->ah, 0); - cm_deref_id(cm_id_priv); ib_free_send_mad(msg); } static struct ib_mad_send_buf * -cm_alloc_priv_msg(struct cm_id_private *cm_id_priv) +cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state) { struct ib_mad_send_buf *msg; @@ -339,7 +332,15 @@ cm_alloc_priv_msg(struct cm_id_private *cm_id_priv) msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return msg; + cm_id_priv->msg = msg; + refcount_inc(&cm_id_priv->refcount); + msg->context[0] = cm_id_priv; + msg->context[1] = (void *) (unsigned long) state; + + msg->retries = cm_id_priv->max_cm_retries; + msg->timeout_ms = cm_id_priv->timeout_ms; + return msg; } @@ -358,13 +359,20 @@ static void cm_free_priv_msg(struct ib_mad_send_buf *msg) ib_free_send_mad(msg); } -static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port, - struct ib_mad_recv_wc *mad_recv_wc) +static struct ib_mad_send_buf * +cm_alloc_response_msg_no_ah(struct cm_port *port, + struct ib_mad_recv_wc *mad_recv_wc, + bool direct_retry) { - return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, - 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, - GFP_ATOMIC, - IB_MGMT_BASE_VERSION); + struct ib_mad_send_buf *m; + + m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, + GFP_ATOMIC, IB_MGMT_BASE_VERSION); + if (!IS_ERR(m)) + m->context[0] = direct_retry ? CM_DIRECT_RETRY_CTX : NULL; + + return m; } static int cm_create_response_msg_ah(struct cm_port *port, @@ -384,12 +392,13 @@ static int cm_create_response_msg_ah(struct cm_port *port, static int cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, + bool direct_retry, struct ib_mad_send_buf **msg) { struct ib_mad_send_buf *m; int ret; - m = cm_alloc_response_msg_no_ah(port, mad_recv_wc); + m = cm_alloc_response_msg_no_ah(port, mad_recv_wc, direct_retry); if (IS_ERR(m)) return PTR_ERR(m); @@ -403,13 +412,6 @@ static int cm_alloc_response_msg(struct cm_port *port, return 0; } -static void cm_free_response_msg(struct ib_mad_send_buf *msg) -{ - if (msg->ah) - rdma_destroy_ah(msg->ah, 0); - ib_free_send_mad(msg); -} - static void *cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; @@ -1109,7 +1111,8 @@ retest: cm_id->state = IB_CM_IDLE; break; } - cm_send_dreq_locked(cm_id_priv, NULL, 0); + cm_issue_dreq(cm_id_priv); + cm_enter_timewait(cm_id_priv); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->msg); @@ -1557,7 +1560,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, if (param->alternate_path) cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); - msg = cm_alloc_priv_msg(cm_id_priv); + msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REQ_SENT); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out_unlock; @@ -1566,8 +1569,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, req_msg = (struct cm_req_msg *)msg->mad; cm_format_req(req_msg, cm_id_priv, param); cm_id_priv->tid = req_msg->hdr.tid; - msg->timeout_ms = cm_id_priv->timeout_ms; - msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT; cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); @@ -1598,7 +1599,7 @@ static int cm_issue_rej(struct cm_port *port, struct cm_rej_msg *rej_msg, *rcv_msg; int ret; - ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); + ret = cm_alloc_response_msg(port, mad_recv_wc, false, &msg); if (ret) return ret; @@ -1624,7 +1625,7 @@ static int cm_issue_rej(struct cm_port *port, IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); ret = ib_post_send_mad(msg, NULL); if (ret) - cm_free_response_msg(msg); + cm_free_msg(msg); return ret; } @@ -1951,7 +1952,7 @@ static void cm_dup_req_handler(struct cm_work *work, } spin_unlock_irq(&cm_id_priv->lock); - ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg); if (ret) return; @@ -1980,7 +1981,7 @@ static void cm_dup_req_handler(struct cm_work *work, return; unlock: spin_unlock_irq(&cm_id_priv->lock); -free: cm_free_response_msg(msg); +free: cm_free_msg(msg); } static struct cm_id_private *cm_match_req(struct cm_work *work, @@ -2294,7 +2295,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, goto out; } - msg = cm_alloc_priv_msg(cm_id_priv); + msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REP_SENT); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out; @@ -2302,8 +2303,6 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, rep_msg = (struct cm_rep_msg *) msg->mad; cm_format_rep(rep_msg, cm_id_priv, param); - msg->timeout_ms = cm_id_priv->timeout_ms; - msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; trace_icm_send_rep(cm_id); ret = ib_post_send_mad(msg, NULL); @@ -2444,7 +2443,7 @@ static void cm_dup_rep_handler(struct cm_work *work) atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]); - ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg); if (ret) goto deref; @@ -2469,7 +2468,7 @@ static void cm_dup_rep_handler(struct cm_work *work) goto deref; unlock: spin_unlock_irq(&cm_id_priv->lock); -free: cm_free_response_msg(msg); +free: cm_free_msg(msg); deref: cm_deref_id(cm_id_priv); } @@ -2653,59 +2652,68 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, private_data_len); } -static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, - const void *private_data, u8 private_data_len) +static void cm_issue_dreq(struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); + msg = cm_alloc_msg(cm_id_priv); + if (IS_ERR(msg)) + return; + + cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, NULL, 0); + + trace_icm_send_dreq(&cm_id_priv->id); + ret = ib_post_send_mad(msg, NULL); + if (ret) + cm_free_msg(msg); +} + +int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, + u8 private_data_len) +{ + struct cm_id_private *cm_id_priv = + container_of(cm_id, struct cm_id_private, id); + struct ib_mad_send_buf *msg; + unsigned long flags; + int ret; + if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; + spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { trace_icm_dreq_skipped(&cm_id_priv->id); - return -EINVAL; + ret = -EINVAL; + goto unlock; } if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); - msg = cm_alloc_priv_msg(cm_id_priv); + msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_DREQ_SENT); if (IS_ERR(msg)) { cm_enter_timewait(cm_id_priv); - return PTR_ERR(msg); + ret = PTR_ERR(msg); + goto unlock; } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, private_data, private_data_len); - msg->timeout_ms = cm_id_priv->timeout_ms; - msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; trace_icm_send_dreq(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); cm_free_priv_msg(msg); - return ret; + goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_SENT; - return 0; -} - -int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, - u8 private_data_len) -{ - struct cm_id_private *cm_id_priv = - container_of(cm_id, struct cm_id_private, id); - unsigned long flags; - int ret; - - spin_lock_irqsave(&cm_id_priv->lock, flags); - ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len); +unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } @@ -2791,7 +2799,7 @@ static int cm_issue_drep(struct cm_port *port, struct cm_drep_msg *drep_msg; int ret; - ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); + ret = cm_alloc_response_msg(port, mad_recv_wc, true, &msg); if (ret) return ret; @@ -2809,7 +2817,7 @@ static int cm_issue_drep(struct cm_port *port, IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); ret = ib_post_send_mad(msg, NULL); if (ret) - cm_free_response_msg(msg); + cm_free_msg(msg); return ret; } @@ -2856,7 +2864,8 @@ static int cm_dreq_handler(struct cm_work *work) case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); - msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); + msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc, + true); if (IS_ERR(msg)) goto unlock; @@ -2867,7 +2876,7 @@ static int cm_dreq_handler(struct cm_work *work) if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || ib_post_send_mad(msg, NULL)) - cm_free_response_msg(msg); + cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] @@ -3361,7 +3370,8 @@ static int cm_lap_handler(struct cm_work *work) case IB_CM_MRA_LAP_SENT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_LAP_COUNTER]); - msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); + msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc, + true); if (IS_ERR(msg)) goto unlock; @@ -3374,7 +3384,7 @@ static int cm_lap_handler(struct cm_work *work) if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || ib_post_send_mad(msg, NULL)) - cm_free_response_msg(msg); + cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] @@ -3513,7 +3523,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, goto out_unlock; } - msg = cm_alloc_priv_msg(cm_id_priv); + msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_SIDR_REQ_SENT); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out_unlock; @@ -3521,8 +3531,6 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv, param); - msg->timeout_ms = cm_id_priv->timeout_ms; - msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT; trace_icm_send_sidr_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); @@ -3768,17 +3776,17 @@ out: static void cm_process_send_error(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf *msg, - enum ib_cm_state state, enum ib_wc_status wc_status) { + enum ib_cm_state state = (unsigned long) msg->context[1]; struct ib_cm_event cm_event = {}; int ret; - /* Discard old sends or ones without a response. */ + /* Discard old sends. */ spin_lock_irq(&cm_id_priv->lock); if (msg != cm_id_priv->msg) { spin_unlock_irq(&cm_id_priv->lock); - cm_free_msg(msg); + cm_free_priv_msg(msg); return; } cm_free_priv_msg(msg); @@ -3826,9 +3834,7 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_send_buf *msg = mad_send_wc->send_buf; - struct cm_id_private *cm_id_priv = msg->context[0]; - enum ib_cm_state state = - (enum ib_cm_state)(unsigned long)msg->context[1]; + struct cm_id_private *cm_id_priv; struct cm_port *port; u16 attr_index; @@ -3836,13 +3842,12 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, attr_index = be16_to_cpu(((struct ib_mad_hdr *) msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; - /* - * If the send was in response to a received message (context[0] is not - * set to a cm_id), and is not a REJ, then it is a send that was - * manually retried. - */ - if (!cm_id_priv && (attr_index != CM_REJ_COUNTER)) + if (msg->context[0] == CM_DIRECT_RETRY_CTX) { msg->retries = 1; + cm_id_priv = NULL; + } else { + cm_id_priv = msg->context[0]; + } atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]); if (msg->retries) @@ -3850,10 +3855,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, &port->counters[CM_XMIT_RETRIES][attr_index]); if (cm_id_priv) - cm_process_send_error(cm_id_priv, msg, state, - mad_send_wc->status); + cm_process_send_error(cm_id_priv, msg, mad_send_wc->status); else - cm_free_response_msg(msg); + cm_free_msg(msg); } static void cm_work_handler(struct work_struct *_work) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e029401b5680..ca9b956c034d 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -437,6 +437,7 @@ int ib_device_rename(struct ib_device *ibdev, const char *name) client->rename(ibdev, client_data); } up_read(&ibdev->client_data_rwsem); + rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT); up_read(&devices_rwsem); return 0; } @@ -2759,6 +2760,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, resize_cq); SET_DEVICE_OP(dev_ops, set_vf_guid); SET_DEVICE_OP(dev_ops, set_vf_link_state); + SET_DEVICE_OP(dev_ops, ufile_hw_cleanup); SET_OBJ_SIZE(dev_ops, ib_ah); SET_OBJ_SIZE(dev_ops, ib_counters); @@ -2852,6 +2854,40 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { }, }; +static int ib_netdevice_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct net_device *ib_ndev; + struct ib_device *ibdev; + u32 port; + + switch (event) { + case NETDEV_CHANGENAME: + ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN); + if (!ibdev) + return NOTIFY_DONE; + + rdma_for_each_port(ibdev, port) { + ib_ndev = ib_device_get_netdev(ibdev, port); + if (ndev == ib_ndev) + rdma_nl_notify_event(ibdev, port, + RDMA_NETDEV_RENAME_EVENT); + dev_put(ib_ndev); + } + ib_device_put(ibdev); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block nb_netdevice = { + .notifier_call = ib_netdevice_event, +}; + static int __init ib_core_init(void) { int ret = -ENOMEM; @@ -2923,6 +2959,8 @@ static int __init ib_core_init(void) goto err_parent; } + register_netdevice_notifier(&nb_netdevice); + return 0; err_parent: @@ -2952,6 +2990,7 @@ err: static void __exit ib_core_cleanup(void) { + unregister_netdevice_notifier(&nb_netdevice); roce_gid_mgmt_cleanup(); rdma_nl_unregister(RDMA_NL_LS); nldev_exit(); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 7dc8e2ec62cc..ff121e59b9c0 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -2729,6 +2729,25 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { }, }; +static int fill_mon_netdev_rename(struct sk_buff *msg, + struct ib_device *device, u32 port, + const struct net *net) +{ + struct net_device *netdev = ib_device_get_netdev(device, port); + int ret = 0; + + if (!netdev || !net_eq(dev_net(netdev), net)) + goto out; + + ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); + if (ret) + goto out; + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); +out: + dev_put(netdev); + return ret; +} + static int fill_mon_netdev_association(struct sk_buff *msg, struct ib_device *device, u32 port, const struct net *net) @@ -2793,6 +2812,18 @@ static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num, "Failed to send RDMA monitor netdev detach event: port %d\n", port_num); break; + case RDMA_RENAME_EVENT: + dev_warn_ratelimited(&device->dev, + "Failed to send RDMA monitor rename device event\n"); + break; + + case RDMA_NETDEV_RENAME_EVENT: + netdev = ib_device_get_netdev(device, port_num); + dev_warn_ratelimited(&device->dev, + "Failed to send RDMA monitor netdev rename event: port %d netdev %d\n", + port_num, netdev->ifindex); + dev_put(netdev); + break; default: break; } @@ -2822,14 +2853,19 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num, switch (type) { case RDMA_REGISTER_EVENT: case RDMA_UNREGISTER_EVENT: + case RDMA_RENAME_EVENT: ret = fill_nldev_handle(skb, device); if (ret) goto err_free; break; case RDMA_NETDEV_ATTACH_EVENT: case RDMA_NETDEV_DETACH_EVENT: - ret = fill_mon_netdev_association(skb, device, - port_num, net); + ret = fill_mon_netdev_association(skb, device, port_num, net); + if (ret) + goto err_free; + break; + case RDMA_NETDEV_RENAME_EVENT: + ret = fill_mon_netdev_rename(skb, device, port_num, net); if (ret) goto err_free; break; diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 29b1ab1d5f93..90c177edf9b0 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -58,8 +58,8 @@ void uverbs_uobject_put(struct ib_uobject *uobject) } EXPORT_SYMBOL(uverbs_uobject_put); -static int uverbs_try_lock_object(struct ib_uobject *uobj, - enum rdma_lookup_mode mode) +int uverbs_try_lock_object(struct ib_uobject *uobj, + enum rdma_lookup_mode mode) { /* * When a shared access is required, we use a positive counter. Each @@ -84,6 +84,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, } return 0; } +EXPORT_SYMBOL(uverbs_try_lock_object); static void assert_uverbs_usecnt(struct ib_uobject *uobj, enum rdma_lookup_mode mode) @@ -880,9 +881,14 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, enum rdma_remove_reason reason) { + struct uverbs_attr_bundle attrs = { .ufile = ufile }; + struct ib_ucontext *ucontext = ufile->ucontext; + struct ib_device *ib_dev = ucontext->device; struct ib_uobject *obj, *next_obj; int ret = -EINVAL; - struct uverbs_attr_bundle attrs = { .ufile = ufile }; + + if (ib_dev->ops.ufile_hw_cleanup) + ib_dev->ops.ufile_hw_cleanup(ufile); /* * This shouldn't run while executing other commands on this diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index d5131b3ba8ab..a9f2c6b1b29e 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -515,6 +515,27 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev) } EXPORT_SYMBOL(rdma_roce_rescan_device); +/** + * rdma_roce_rescan_port - Rescan all of the network devices in the system + * and add their gids if relevant to the port of the RoCE device. + * + * @ib_dev: IB device + * @port: Port number + */ +void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port) +{ + struct net_device *ndev = NULL; + + if (rdma_protocol_roce(ib_dev, port)) { + ndev = ib_device_get_netdev(ib_dev, port); + if (!ndev) + return; + enum_all_gids_of_dev_cb(ib_dev, port, ndev, ndev); + dev_put(ndev); + } +} +EXPORT_SYMBOL(rdma_roce_rescan_port); + static void callback_for_addr_gid_device_scan(struct ib_device *device, u32 port, struct net_device *rdma_ndev, @@ -575,16 +596,17 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u32 port, } } -static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, - struct net_device *event_ndev) +void roce_del_all_netdev_gids(struct ib_device *ib_dev, + u32 port, struct net_device *ndev) { - ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); + ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev); } +EXPORT_SYMBOL(roce_del_all_netdev_gids); static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { - handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); + handle_netdev_upper(ib_dev, port, cookie, roce_del_all_netdev_gids); } static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port, diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 821d93c8f712..797e2fcc8072 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -133,35 +133,6 @@ struct ib_uverbs_completion_event_file { struct ib_uverbs_event_queue ev_queue; }; -struct ib_uverbs_file { - struct kref ref; - struct ib_uverbs_device *device; - struct mutex ucontext_lock; - /* - * ucontext must be accessed via ib_uverbs_get_ucontext() or with - * ucontext_lock held - */ - struct ib_ucontext *ucontext; - struct ib_uverbs_async_event_file *default_async_file; - struct list_head list; - - /* - * To access the uobjects list hw_destroy_rwsem must be held for write - * OR hw_destroy_rwsem held for read AND uobjects_lock held. - * hw_destroy_rwsem should be called across any destruction of the HW - * object of an associated uobject. - */ - struct rw_semaphore hw_destroy_rwsem; - spinlock_t uobjects_lock; - struct list_head uobjects; - - struct mutex umap_lock; - struct list_head umaps; - struct page *disassociate_page; - - struct xarray idr; -}; - struct ib_uverbs_event { union { struct ib_uverbs_async_event_desc async; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 94454186ed81..85cfc790a7bb 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -76,6 +76,7 @@ static dev_t dynamic_uverbs_dev; static DEFINE_IDA(uverbs_ida); static int ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); +static struct ib_client uverbs_client; static char *uverbs_devnode(const struct device *dev, umode_t *mode) { @@ -217,6 +218,7 @@ void ib_uverbs_release_file(struct kref *ref) if (file->disassociate_page) __free_pages(file->disassociate_page, 0); + mutex_destroy(&file->disassociation_lock); mutex_destroy(&file->umap_lock); mutex_destroy(&file->ucontext_lock); kfree(file); @@ -698,8 +700,13 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) ret = PTR_ERR(ucontext); goto out; } + + mutex_lock(&file->disassociation_lock); + vma->vm_ops = &rdma_umap_ops; ret = ucontext->device->ops.mmap(ucontext, vma); + + mutex_unlock(&file->disassociation_lock); out: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return ret; @@ -721,6 +728,8 @@ static void rdma_umap_open(struct vm_area_struct *vma) /* We are racing with disassociation */ if (!down_read_trylock(&ufile->hw_destroy_rwsem)) goto out_zap; + mutex_lock(&ufile->disassociation_lock); + /* * Disassociation already completed, the VMA should already be zapped. */ @@ -732,10 +741,12 @@ static void rdma_umap_open(struct vm_area_struct *vma) goto out_unlock; rdma_umap_priv_init(priv, vma, opriv->entry); + mutex_unlock(&ufile->disassociation_lock); up_read(&ufile->hw_destroy_rwsem); return; out_unlock: + mutex_unlock(&ufile->disassociation_lock); up_read(&ufile->hw_destroy_rwsem); out_zap: /* @@ -819,7 +830,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) { struct rdma_umap_priv *priv, *next_priv; - lockdep_assert_held(&ufile->hw_destroy_rwsem); + mutex_lock(&ufile->disassociation_lock); while (1) { struct mm_struct *mm = NULL; @@ -845,8 +856,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) break; } mutex_unlock(&ufile->umap_lock); - if (!mm) + if (!mm) { + mutex_unlock(&ufile->disassociation_lock); return; + } /* * The umap_lock is nested under mmap_lock since it used within @@ -876,7 +889,31 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) mmap_read_unlock(mm); mmput(mm); } + + mutex_unlock(&ufile->disassociation_lock); +} + +/** + * rdma_user_mmap_disassociate() - Revoke mmaps for a device + * @device: device to revoke + * + * This function should be called by drivers that need to disable mmaps for the + * device, for instance because it is going to be reset. + */ +void rdma_user_mmap_disassociate(struct ib_device *device) +{ + struct ib_uverbs_device *uverbs_dev = + ib_get_client_data(device, &uverbs_client); + struct ib_uverbs_file *ufile; + + mutex_lock(&uverbs_dev->lists_mutex); + list_for_each_entry(ufile, &uverbs_dev->uverbs_file_list, list) { + if (ufile->ucontext) + uverbs_user_mmap_disassociate(ufile); + } + mutex_unlock(&uverbs_dev->lists_mutex); } +EXPORT_SYMBOL(rdma_user_mmap_disassociate); /* * ib_uverbs_open() does not need the BKL: @@ -947,6 +984,8 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) mutex_init(&file->umap_lock); INIT_LIST_HEAD(&file->umaps); + mutex_init(&file->disassociation_lock); + filp->private_data = file; list_add_tail(&file->list, &dev->uverbs_file_list); mutex_unlock(&dev->lists_mutex); diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile index ee9bb1be61ea..f63417d2ccc6 100644 --- a/drivers/infiniband/hw/bnxt_re/Makefile +++ b/drivers/infiniband/hw/bnxt_re/Makefile @@ -4,4 +4,5 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnxt obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o bnxt_re-y := main.o ib_verbs.o \ qplib_res.o qplib_rcfw.o \ - qplib_sp.o qplib_fp.o hw_counters.o + qplib_sp.o qplib_fp.o hw_counters.o \ + debugfs.o diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index e94518b12f86..2975b11b79bf 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -154,8 +154,25 @@ struct bnxt_re_pacing { #define BNXT_RE_GRC_FIFO_REG_BASE 0x2000 +#define BNXT_RE_MIN_MSIX 2 +#define BNXT_RE_MAX_MSIX BNXT_MAX_ROCE_MSIX +struct bnxt_re_nq_record { + struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX]; + struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; + int num_msix; + /* serialize NQ access */ + struct mutex load_lock; +}; + #define MAX_CQ_HASH_BITS (16) #define MAX_SRQ_HASH_BITS (16) + +static inline bool bnxt_re_chip_gen_p7(u16 chip_num) +{ + return (chip_num == CHIP_NUM_58818 || + chip_num == CHIP_NUM_57608); +} + struct bnxt_re_dev { struct ib_device ibdev; struct list_head list; @@ -174,27 +191,24 @@ struct bnxt_re_dev { unsigned int version, major, minor; struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; - int num_msix; int id; struct delayed_work worker; u8 cur_prio_map; - /* FP Notification Queue (CQ & SRQ) */ - struct tasklet_struct nq_task; - /* RCFW Channel */ struct bnxt_qplib_rcfw rcfw; - /* NQ */ - struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX]; + /* NQ record */ + struct bnxt_re_nq_record *nqr; /* Device Resources */ struct bnxt_qplib_dev_attr dev_attr; struct bnxt_qplib_ctx qplib_ctx; struct bnxt_qplib_res qplib_res; struct bnxt_qplib_dpi dpi_privileged; + struct bnxt_qplib_cq_coal_param cq_coalescing; struct mutex qp_lock; /* protect qp list */ struct list_head qp_list; @@ -213,6 +227,8 @@ struct bnxt_re_dev { struct delayed_work dbq_pacing_work; DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS); DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS); + struct dentry *dbg_root; + struct dentry *qp_debugfs; }; #define to_bnxt_re_dev(ptr, member) \ @@ -239,4 +255,23 @@ static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev) rdev->qplib_res.pacing_data->dev_err_state = test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); } + +static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev) +{ + if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) || + rdev->rcfw.res->cctx->hwrm_intf_ver < HWRM_VERSION_READ_CTX) + return -EOPNOTSUPP; + return 0; +} + +#define BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5 1088 +#define BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5 128 +#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5 128 +#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5 192 + +#define BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 1088 +#define BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 192 +#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192 +#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192 + #endif diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c new file mode 100644 index 000000000000..7c47039044ef --- /dev/null +++ b/drivers/infiniband/hw/bnxt_re/debugfs.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Copyright (c) 2024, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Limited and/or its subsidiaries. + * + * Description: Debugfs component of the bnxt_re driver + */ + +#include <linux/debugfs.h> +#include <linux/pci.h> +#include <rdma/ib_addr.h> + +#include "bnxt_ulp.h" +#include "roce_hsi.h" +#include "qplib_res.h" +#include "qplib_sp.h" +#include "qplib_fp.h" +#include "qplib_rcfw.h" +#include "bnxt_re.h" +#include "ib_verbs.h" +#include "debugfs.h" + +static struct dentry *bnxt_re_debugfs_root; + +static inline const char *bnxt_re_qp_state_str(u8 state) +{ + switch (state) { + case CMDQ_MODIFY_QP_NEW_STATE_RESET: + return "RST"; + case CMDQ_MODIFY_QP_NEW_STATE_INIT: + return "INIT"; + case CMDQ_MODIFY_QP_NEW_STATE_RTR: + return "RTR"; + case CMDQ_MODIFY_QP_NEW_STATE_RTS: + return "RTS"; + case CMDQ_MODIFY_QP_NEW_STATE_SQE: + return "SQER"; + case CMDQ_MODIFY_QP_NEW_STATE_SQD: + return "SQD"; + case CMDQ_MODIFY_QP_NEW_STATE_ERR: + return "ERR"; + default: + return "Invalid QP state"; + } +} + +static inline const char *bnxt_re_qp_type_str(u8 type) +{ + switch (type) { + case CMDQ_CREATE_QP1_TYPE_GSI: return "QP1"; + case CMDQ_CREATE_QP_TYPE_GSI: return "QP1"; + case CMDQ_CREATE_QP_TYPE_RC: return "RC"; + case CMDQ_CREATE_QP_TYPE_UD: return "UD"; + case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE: return "RAW_ETHERTYPE"; + default: return "Invalid transport type"; + } +} + +static ssize_t qp_info_read(struct file *filep, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_re_qp *qp = filep->private_data; + char *buf; + int len; + + if (*ppos) + return 0; + + buf = kasprintf(GFP_KERNEL, + "QPN\t\t: %d\n" + "transport\t: %s\n" + "state\t\t: %s\n" + "mtu\t\t: %d\n" + "timeout\t\t: %d\n" + "remote QPN\t: %d\n", + qp->qplib_qp.id, + bnxt_re_qp_type_str(qp->qplib_qp.type), + bnxt_re_qp_state_str(qp->qplib_qp.state), + qp->qplib_qp.mtu, + qp->qplib_qp.timeout, + qp->qplib_qp.dest_qpn); + if (!buf) + return -ENOMEM; + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_qp_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qp_info_read, +}; + +void bnxt_re_debug_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + char resn[32]; + + sprintf(resn, "0x%x", qp->qplib_qp.id); + qp->dentry = debugfs_create_file(resn, 0400, rdev->qp_debugfs, qp, &debugfs_qp_fops); +} + +void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + debugfs_remove(qp->dentry); +} + +void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev) +{ + struct pci_dev *pdev = rdev->en_dev->pdev; + + rdev->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), bnxt_re_debugfs_root); + + rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root); +} + +void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev) +{ + debugfs_remove_recursive(rdev->qp_debugfs); + + debugfs_remove_recursive(rdev->dbg_root); + rdev->dbg_root = NULL; +} + +void bnxt_re_register_debugfs(void) +{ + bnxt_re_debugfs_root = debugfs_create_dir("bnxt_re", NULL); +} + +void bnxt_re_unregister_debugfs(void) +{ + debugfs_remove(bnxt_re_debugfs_root); +} diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.h b/drivers/infiniband/hw/bnxt_re/debugfs.h new file mode 100644 index 000000000000..cd3be0a9ec7e --- /dev/null +++ b/drivers/infiniband/hw/bnxt_re/debugfs.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Copyright (c) 2024, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Limited and/or its subsidiaries. + * + * Description: Debugfs header + */ + +#ifndef __BNXT_RE_DEBUGFS__ +#define __BNXT_RE_DEBUGFS__ + +void bnxt_re_debug_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp); +void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp); + +void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev); +void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev); + +void bnxt_re_register_debugfs(void); +void bnxt_re_unregister_debugfs(void); + +#endif diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index e66ae9f22c71..82023394e330 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -62,6 +62,7 @@ #include "bnxt_re.h" #include "ib_verbs.h" +#include "debugfs.h" #include <rdma/uverbs_types.h> #include <rdma/uverbs_std_types.h> @@ -94,9 +95,9 @@ static int __from_ib_access_flags(int iflags) return qflags; }; -static enum ib_access_flags __to_ib_access_flags(int qflags) +static int __to_ib_access_flags(int qflags) { - enum ib_access_flags iflags = 0; + int iflags = 0; if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) iflags |= IB_ACCESS_LOCAL_WRITE; @@ -113,7 +114,49 @@ static enum ib_access_flags __to_ib_access_flags(int qflags) if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) iflags |= IB_ACCESS_ON_DEMAND; return iflags; -}; +} + +static u8 __qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx *cctx, int iflags) +{ + u8 qflags = 0; + + if (!bnxt_qplib_is_chip_gen_p5_p7(cctx)) + /* For Wh+ */ + return (u8)__from_ib_access_flags(iflags); + + /* For P5, P7 and later chips */ + if (iflags & IB_ACCESS_LOCAL_WRITE) + qflags |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE; + if (iflags & IB_ACCESS_REMOTE_WRITE) + qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; + if (iflags & IB_ACCESS_REMOTE_READ) + qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; + if (iflags & IB_ACCESS_REMOTE_ATOMIC) + qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC; + + return qflags; +} + +static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags) +{ + int iflags = 0; + + if (!bnxt_qplib_is_chip_gen_p5_p7(cctx)) + /* For Wh+ */ + return __to_ib_access_flags(qflags); + + /* For P5, P7 and later chips */ + if (qflags & CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE) + iflags |= IB_ACCESS_LOCAL_WRITE; + if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE) + iflags |= IB_ACCESS_REMOTE_WRITE; + if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_READ) + iflags |= IB_ACCESS_REMOTE_READ; + if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC) + iflags |= IB_ACCESS_REMOTE_ATOMIC; + + return iflags; +} static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev, struct bnxt_qplib_mrw *qplib_mr) @@ -211,6 +254,22 @@ int bnxt_re_query_device(struct ib_device *ibdev, return 0; } +int bnxt_re_modify_device(struct ib_device *ibdev, + int device_modify_mask, + struct ib_device_modify *device_modify) +{ + ibdev_dbg(ibdev, "Modify device with mask 0x%x", device_modify_mask); + + if (device_modify_mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (!(device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)) + return 0; + + memcpy(ibdev->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX); + return 0; +} + /* Port */ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *port_attr) @@ -939,6 +998,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) atomic_dec(&rdev->stats.res.ud_qp_count); + bnxt_re_debug_rem_qpinfo(rdev, qp); + ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); @@ -1622,6 +1683,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, if (active_qps > rdev->stats.res.ud_qp_watermark) rdev->stats.res.ud_qp_watermark = active_qps; } + bnxt_re_debug_add_qpinfo(rdev, qp); return 0; qp_destroy: @@ -1814,8 +1876,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges); srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; srq->srq_limit = srq_init_attr->attr.srq_limit; - srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; - nq = &rdev->nq[0]; + srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; + nq = &rdev->nqr->nq[0]; if (udata) { rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); @@ -2041,12 +2103,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; qp->qplib_qp.access = - __from_ib_access_flags(qp_attr->qp_access_flags); + __qp_access_flags_from_ib(qp->qplib_qp.cctx, + qp_attr->qp_access_flags); /* LOCAL_WRITE access must be set to allow RC receive */ - qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; - /* Temp: Set all params on QP as of now */ - qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; - qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; + qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE; } if (qp_attr_mask & IB_QP_PKEY_INDEX) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; @@ -2080,7 +2140,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp->qplib_qp.ah.sgid_index = ctx->idx; qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; qp->qplib_qp.ah.hop_limit = grh->hop_limit; - qp->qplib_qp.ah.traffic_class = grh->traffic_class; + qp->qplib_qp.ah.traffic_class = grh->traffic_class >> 2; qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr); ether_addr_copy(qp->qplib_qp.ah.dmac, qp_attr->ah_attr.roce.dmac); @@ -2251,7 +2311,8 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state); qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; - qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); + qp_attr->qp_access_flags = __qp_access_flags_to_ib(qp->qplib_qp.cctx, + qplib_qp->access); qp_attr->pkey_index = qplib_qp->pkey_index; qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; @@ -2972,6 +3033,28 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, return rc; } +static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev) +{ + int min, indx; + + mutex_lock(&rdev->nqr->load_lock); + for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) { + if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load) + min = indx; + } + rdev->nqr->nq[min].load++; + mutex_unlock(&rdev->nqr->load_lock); + + return &rdev->nqr->nq[min]; +} + +static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq) +{ + mutex_lock(&rdev->nqr->load_lock); + nq->load--; + mutex_unlock(&rdev->nqr->load_lock); +} + /* Completion Queues */ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { @@ -2990,6 +3073,8 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) hash_del(&cq->hash_entry); } bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); + + bnxt_re_put_nq(rdev, nq); ib_umem_release(cq->umem); atomic_dec(&rdev->stats.res.cq_count); @@ -3008,8 +3093,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_qplib_chip_ctx *cctx; - struct bnxt_qplib_nq *nq = NULL; - unsigned int nq_alloc_cnt; int cqe = attr->cqe; int rc, entries; u32 active_cqs; @@ -3060,15 +3143,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, cq->qplib_cq.dpi = &rdev->dpi_privileged; } - /* - * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a - * used for getting the NQ index. - */ - nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt); - nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)]; cq->qplib_cq.max_wqe = entries; - cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; - cq->qplib_cq.nq = nq; + cq->qplib_cq.coalescing = &rdev->cq_coalescing; + cq->qplib_cq.nq = bnxt_re_get_nq(rdev); + cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id; rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); if (rc) { @@ -3078,7 +3156,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, cq->ib_cq.cqe = entries; cq->cq_period = cq->qplib_cq.period; - nq->budget++; active_cqs = atomic_inc_return(&rdev->stats.res.cq_count); if (active_cqs > rdev->stats.res.cq_watermark) @@ -3633,7 +3710,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, wc->byte_len = orig_cqe->length; wc->qp = &gsi_qp->ib_qp; - wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata)); + wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata); wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { @@ -3778,7 +3855,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) (unsigned long)(cqe->qp_handle), struct bnxt_re_qp, qplib_qp); wc->qp = &qp->ib_qp; - wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata)); + if (cqe->flags & CQ_RES_RC_FLAGS_IMM) + wc->ex.imm_data = cpu_to_be32(cqe->immdata); + else + wc->ex.invalidate_rkey = cqe->invrkey; wc->src_qp = cqe->src_qp; memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->port_num = 1; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b789e47ec97a..ac59f1d73b15 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -95,6 +95,7 @@ struct bnxt_re_qp { struct ib_ud_header qp1_hdr; struct bnxt_re_cq *scq; struct bnxt_re_cq *rcq; + struct dentry *dentry; }; struct bnxt_re_cq { @@ -196,6 +197,9 @@ static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev, int bnxt_re_query_device(struct ib_device *ibdev, struct ib_device_attr *ib_attr, struct ib_udata *udata); +int bnxt_re_modify_device(struct ib_device *ibdev, + int device_modify_mask, + struct ib_device_modify *device_modify); int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *port_attr); int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 9eb290ec71a8..b7af0d5ff3b6 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -67,6 +67,7 @@ #include <rdma/bnxt_re-abi.h> #include "bnxt.h" #include "hw_counters.h" +#include "debugfs.h" static char version[] = BNXT_RE_DESC "\n"; @@ -183,6 +184,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) rdev->rcfw.res = &rdev->qplib_res; rdev->qplib_res.dattr = &rdev->dev_attr; rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); + rdev->qplib_res.en_dev = en_dev; bnxt_re_set_drv_mode(rdev); @@ -287,12 +289,17 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev) { + /* + * Use the total VF count since the actual VF count may not be + * available at this point. + */ rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); - if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { - bnxt_re_set_resource_limits(rdev); - bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, - &rdev->qplib_ctx); - } + if (!rdev->num_vfs) + return; + + bnxt_re_set_resource_limits(rdev); + bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, + &rdev->qplib_ctx); } static void bnxt_re_shutdown(struct auxiliary_device *adev) @@ -316,8 +323,8 @@ static void bnxt_re_stop_irq(void *handle) rdev = en_info->rdev; rcfw = &rdev->rcfw; - for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { - nq = &rdev->nq[indx - 1]; + for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) { + nq = &rdev->nqr->nq[indx - 1]; bnxt_qplib_nq_stop_irq(nq, false); } @@ -334,7 +341,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) int indx, rc; rdev = en_info->rdev; - msix_ent = rdev->en_dev->msix_entries; + msix_ent = rdev->nqr->msix_entries; rcfw = &rdev->rcfw; if (!ent) { /* Not setting the f/w timeout bit in rcfw. @@ -349,8 +356,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) /* Vectors may change after restart, so update with new vectors * in device sctructure. */ - for (indx = 0; indx < rdev->num_msix; indx++) - rdev->en_dev->msix_entries[indx].vector = ent[indx].vector; + for (indx = 0; indx < rdev->nqr->num_msix; indx++) + rdev->nqr->msix_entries[indx].vector = ent[indx].vector; rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, false); @@ -358,8 +365,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n"); return; } - for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { - nq = &rdev->nq[indx - 1]; + for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) { + nq = &rdev->nqr->nq[indx - 1]; rc = bnxt_qplib_nq_start_irq(nq, indx - 1, msix_ent[indx].vector, false); if (rc) { @@ -873,6 +880,253 @@ static const struct attribute_group bnxt_re_dev_attr_group = { .attrs = bnxt_re_attributes, }; +static int bnxt_re_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) +{ + struct bnxt_qplib_hwq *mr_hwq; + struct nlattr *table_attr; + struct bnxt_re_mr *mr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); + mr_hwq = &mr->qplib_mr.hwq; + + if (rdma_nl_put_driver_u32(msg, "page_size", + mr_hwq->qe_ppg * mr_hwq->element_size)) + goto err; + if (rdma_nl_put_driver_u32(msg, "max_elements", mr_hwq->max_elements)) + goto err; + if (rdma_nl_put_driver_u32(msg, "element_size", mr_hwq->element_size)) + goto err; + if (rdma_nl_put_driver_u64_hex(msg, "hwq", (unsigned long)mr_hwq)) + goto err; + if (rdma_nl_put_driver_u64_hex(msg, "va", mr->qplib_mr.va)) + goto err; + + nla_nest_end(msg, table_attr); + return 0; + +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int bnxt_re_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) +{ + struct bnxt_re_dev *rdev; + struct bnxt_re_mr *mr; + int err, len; + void *data; + + mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); + rdev = mr->rdev; + + err = bnxt_re_read_context_allowed(rdev); + if (err) + return err; + + len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 : + BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5; + data = kzalloc(len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_MRW, + mr->qplib_mr.lkey, len, data); + if (!err) + err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); + + kfree(data); + return err; +} + +static int bnxt_re_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) +{ + struct bnxt_qplib_hwq *cq_hwq; + struct nlattr *table_attr; + struct bnxt_re_cq *cq; + + cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); + cq_hwq = &cq->qplib_cq.hwq; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + if (rdma_nl_put_driver_u32(msg, "cq_depth", cq_hwq->depth)) + goto err; + if (rdma_nl_put_driver_u32(msg, "max_elements", cq_hwq->max_elements)) + goto err; + if (rdma_nl_put_driver_u32(msg, "element_size", cq_hwq->element_size)) + goto err; + if (rdma_nl_put_driver_u32(msg, "max_wqe", cq->qplib_cq.max_wqe)) + goto err; + + nla_nest_end(msg, table_attr); + return 0; + +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int bnxt_re_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) +{ + struct bnxt_re_dev *rdev; + struct bnxt_re_cq *cq; + int err, len; + void *data; + + cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); + rdev = cq->rdev; + + err = bnxt_re_read_context_allowed(rdev); + if (err) + return err; + + len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 : + BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5; + data = kzalloc(len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + err = bnxt_qplib_read_context(&rdev->rcfw, + CMDQ_READ_CONTEXT_TYPE_CQ, + cq->qplib_cq.id, len, data); + if (!err) + err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); + + kfree(data); + return err; +} + +static int bnxt_re_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp) +{ + struct bnxt_qplib_qp *qplib_qp; + struct nlattr *table_attr; + struct bnxt_re_qp *qp; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + qplib_qp = &qp->qplib_qp; + + if (rdma_nl_put_driver_u32(msg, "sq_max_wqe", qplib_qp->sq.max_wqe)) + goto err; + if (rdma_nl_put_driver_u32(msg, "sq_max_sge", qplib_qp->sq.max_sge)) + goto err; + if (rdma_nl_put_driver_u32(msg, "sq_wqe_size", qplib_qp->sq.wqe_size)) + goto err; + if (rdma_nl_put_driver_u32(msg, "sq_swq_start", qplib_qp->sq.swq_start)) + goto err; + if (rdma_nl_put_driver_u32(msg, "sq_swq_last", qplib_qp->sq.swq_last)) + goto err; + if (rdma_nl_put_driver_u32(msg, "rq_max_wqe", qplib_qp->rq.max_wqe)) + goto err; + if (rdma_nl_put_driver_u32(msg, "rq_max_sge", qplib_qp->rq.max_sge)) + goto err; + if (rdma_nl_put_driver_u32(msg, "rq_wqe_size", qplib_qp->rq.wqe_size)) + goto err; + if (rdma_nl_put_driver_u32(msg, "rq_swq_start", qplib_qp->rq.swq_start)) + goto err; + if (rdma_nl_put_driver_u32(msg, "rq_swq_last", qplib_qp->rq.swq_last)) + goto err; + if (rdma_nl_put_driver_u32(msg, "timeout", qplib_qp->timeout)) + goto err; + + nla_nest_end(msg, table_attr); + return 0; + +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int bnxt_re_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibqp->device, ibdev); + int err, len; + void *data; + + err = bnxt_re_read_context_allowed(rdev); + if (err) + return err; + + len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 : + BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5; + data = kzalloc(len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_QPC, + ibqp->qp_num, len, data); + if (!err) + err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); + + kfree(data); + return err; +} + +static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq) +{ + struct nlattr *table_attr; + struct bnxt_re_srq *srq; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); + + if (rdma_nl_put_driver_u32_hex(msg, "wqe_size", srq->qplib_srq.wqe_size)) + goto err; + if (rdma_nl_put_driver_u32_hex(msg, "max_wqe", srq->qplib_srq.max_wqe)) + goto err; + if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge)) + goto err; + + nla_nest_end(msg, table_attr); + return 0; + +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int bnxt_re_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq) +{ + struct bnxt_re_dev *rdev; + struct bnxt_re_srq *srq; + int err, len; + void *data; + + srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); + rdev = srq->rdev; + + err = bnxt_re_read_context_allowed(rdev); + if (err) + return err; + + len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 : + BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5; + + data = kzalloc(len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_SRQ, + srq->qplib_srq.id, len, data); + if (!err) + err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); + + kfree(data); + return err; +} + static const struct ib_device_ops bnxt_re_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_BNXT_RE, @@ -914,6 +1168,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .post_srq_recv = bnxt_re_post_srq_recv, .query_ah = bnxt_re_query_ah, .query_device = bnxt_re_query_device, + .modify_device = bnxt_re_modify_device, .query_pkey = bnxt_re_query_pkey, .query_port = bnxt_re_query_port, .query_qp = bnxt_re_query_qp, @@ -930,6 +1185,17 @@ static const struct ib_device_ops bnxt_re_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), }; +static const struct ib_device_ops restrack_ops = { + .fill_res_cq_entry = bnxt_re_fill_res_cq_entry, + .fill_res_cq_entry_raw = bnxt_re_fill_res_cq_entry_raw, + .fill_res_qp_entry = bnxt_re_fill_res_qp_entry, + .fill_res_qp_entry_raw = bnxt_re_fill_res_qp_entry_raw, + .fill_res_mr_entry = bnxt_re_fill_res_mr_entry, + .fill_res_mr_entry_raw = bnxt_re_fill_res_mr_entry_raw, + .fill_res_srq_entry = bnxt_re_fill_res_srq_entry, + .fill_res_srq_entry_raw = bnxt_re_fill_res_srq_entry_raw, +}; + static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) { struct ib_device *ibdev = &rdev->ibdev; @@ -943,7 +1209,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); - ibdev->num_comp_vectors = rdev->num_msix - 1; + ibdev->num_comp_vectors = rdev->nqr->num_msix - 1; ibdev->dev.parent = &rdev->en_dev->pdev->dev; ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; @@ -951,6 +1217,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ibdev->driver_def = bnxt_re_uapi_defs; ib_set_device_ops(ibdev, &bnxt_re_dev_ops); + ib_set_device_ops(ibdev, &restrack_ops); ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); if (ret) return ret; @@ -990,6 +1257,15 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev, atomic_set(&rdev->stats.res.pd_count, 0); rdev->cosq[0] = 0xFFFF; rdev->cosq[1] = 0xFFFF; + rdev->cq_coalescing.buf_maxtime = BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME; + if (bnxt_re_chip_gen_p7(en_dev->chip_num)) { + rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7; + rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7; + } else { + rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5; + rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5; + } + rdev->cq_coalescing.en_ring_idle_mode = BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE; return rdev; } @@ -1276,8 +1552,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) { int i; - for (i = 1; i < rdev->num_msix; i++) - bnxt_qplib_disable_nq(&rdev->nq[i - 1]); + for (i = 1; i < rdev->nqr->num_msix; i++) + bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]); if (rdev->qplib_res.rcfw) bnxt_qplib_cleanup_res(&rdev->qplib_res); @@ -1291,10 +1567,12 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) bnxt_qplib_init_res(&rdev->qplib_res); - for (i = 1; i < rdev->num_msix ; i++) { - db_offt = rdev->en_dev->msix_entries[i].db_offset; - rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], - i - 1, rdev->en_dev->msix_entries[i].vector, + mutex_init(&rdev->nqr->load_lock); + + for (i = 1; i < rdev->nqr->num_msix ; i++) { + db_offt = rdev->nqr->msix_entries[i].db_offset; + rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1], + i - 1, rdev->nqr->msix_entries[i].vector, db_offt, &bnxt_re_cqn_handler, &bnxt_re_srqn_handler); if (rc) { @@ -1307,20 +1585,22 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) return 0; fail: for (i = num_vec_enabled; i >= 0; i--) - bnxt_qplib_disable_nq(&rdev->nq[i]); + bnxt_qplib_disable_nq(&rdev->nqr->nq[i]); return rc; } static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) { + struct bnxt_qplib_nq *nq; u8 type; int i; - for (i = 0; i < rdev->num_msix - 1; i++) { + for (i = 0; i < rdev->nqr->num_msix - 1; i++) { type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); - bnxt_qplib_free_nq(&rdev->nq[i]); - rdev->nq[i].res = NULL; + nq = &rdev->nqr->nq[i]; + bnxt_re_net_ring_free(rdev, nq->ring_id, type); + bnxt_qplib_free_nq(nq); + nq->res = NULL; } } @@ -1362,12 +1642,12 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) if (rc) goto dealloc_res; - for (i = 0; i < rdev->num_msix - 1; i++) { + for (i = 0; i < rdev->nqr->num_msix - 1; i++) { struct bnxt_qplib_nq *nq; - nq = &rdev->nq[i]; + nq = &rdev->nqr->nq[i]; nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; - rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); + rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, nq); if (rc) { ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", i, rc); @@ -1375,17 +1655,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) } type = bnxt_qplib_get_ring_type(rdev->chip_ctx); rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; - rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; + rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count; rattr.type = type; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; - rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx; + rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx; rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate NQ fw id with rc = 0x%x", rc); - bnxt_qplib_free_nq(&rdev->nq[i]); + bnxt_qplib_free_nq(nq); goto free_nq; } num_vec_created++; @@ -1394,8 +1674,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) free_nq: for (i = num_vec_created - 1; i >= 0; i--) { type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); - bnxt_qplib_free_nq(&rdev->nq[i]); + bnxt_re_net_ring_free(rdev, rdev->nqr->nq[i].ring_id, type); + bnxt_qplib_free_nq(&rdev->nqr->nq[i]); } bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &rdev->dpi_privileged); @@ -1590,11 +1870,28 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) return rc; } +static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev) +{ + rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL); + if (!rdev->nqr) + return -ENOMEM; + + return 0; +} + +static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) +{ + kfree(rdev->nqr); + rdev->nqr = NULL; +} + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; int rc; + bnxt_re_debugfs_rem_pdev(rdev); + if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work_sync(&rdev->worker); @@ -1617,11 +1914,12 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) bnxt_qplib_free_rcfw_channel(&rdev->rcfw); } - rdev->num_msix = 0; + rdev->nqr->num_msix = 0; if (rdev->pacing.dbr_pacing) bnxt_re_deinitialize_dbr_pacing(rdev); + bnxt_re_free_nqr_mem(rdev); bnxt_re_destroy_chip_ctx(rdev); if (op_type == BNXT_RE_COMPLETE_REMOVE) { if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) @@ -1659,6 +1957,17 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) } set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) { + ibdev_err(&rdev->ibdev, + "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", + rdev->en_dev->ulp_tbl->msix_requested); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + return -EINVAL; + } + ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", + rdev->en_dev->ulp_tbl->msix_requested); + rc = bnxt_re_setup_chip_ctx(rdev); if (rc) { bnxt_unregister_dev(rdev->en_dev); @@ -1667,19 +1976,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) return -EINVAL; } + rc = bnxt_re_alloc_nqr_mem(rdev); + if (rc) { + bnxt_re_destroy_chip_ctx(rdev); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + return rc; + } + rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested; + memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries, + sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix); + /* Check whether VF or PF */ bnxt_re_get_sriov_func_type(rdev); - if (!rdev->en_dev->ulp_tbl->msix_requested) { - ibdev_err(&rdev->ibdev, - "Failed to get MSI-X vectors: %#x\n", rc); - rc = -EINVAL; - goto fail; - } - ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", - rdev->en_dev->ulp_tbl->msix_requested); - rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested; - bnxt_re_query_hwrm_intf_version(rdev); /* Establish RCFW Communication Channel to initialize the context @@ -1701,14 +2011,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) rattr.type = type; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; - rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; + rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); goto free_rcfw; } - db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset; - vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector; + db_offt = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset; + vid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector; rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, vid, db_offt, &bnxt_re_aeq_handler); @@ -1785,16 +2095,16 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); - /* - * Use the total VF count since the actual VF count may not be - * available at this point. - */ - bnxt_re_vf_res_config(rdev); + + if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)) + bnxt_re_vf_res_config(rdev); } hash_init(rdev->cq_hash); if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) hash_init(rdev->srq_hash); + bnxt_re_debugfs_add_pdev(rdev); + return 0; free_sctx: bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); @@ -1896,11 +2206,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) if (enable) { cc_param.enable = 1; - cc_param.cc_mode = CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE; + cc_param.tos_ecn = 1; } - cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE | - CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC | + cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC | CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN); if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param)) @@ -2033,12 +2342,6 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) rdev = en_info->rdev; en_dev = en_info->en_dev; mutex_lock(&bnxt_re_mutex); - /* L2 driver may invoke this callback during device error/crash or device - * reset. Current RoCE driver doesn't recover the device in case of - * error. Handle the error by dispatching fatal events to all qps - * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as - * L2 driver want to modify the MSIx table. - */ ibdev_info(&rdev->ibdev, "Handle device suspend call"); /* Check the current device state from bnxt_en_dev and move the @@ -2046,17 +2349,12 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) * This prevents more commands to HW during clean-up, * in case the device is already in error. */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) + if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) { set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); - - bnxt_re_dev_stop(rdev); - bnxt_re_stop_irq(adev); - /* Move the device states to detached and avoid sending any more - * commands to HW - */ - set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); - set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); - wake_up_all(&rdev->rcfw.cmdq.waitq); + set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); + wake_up_all(&rdev->rcfw.cmdq.waitq); + bnxt_re_dev_stop(rdev); + } if (rdev->pacing.dbr_pacing) bnxt_re_set_pacing_dev_state(rdev); @@ -2075,13 +2373,6 @@ static int bnxt_re_resume(struct auxiliary_device *adev) struct bnxt_re_dev *rdev; mutex_lock(&bnxt_re_mutex); - /* L2 driver may invoke this callback during device recovery, resume. - * reset. Current RoCE driver doesn't recover the device in case of - * error. Handle the error by dispatching fatal events to all qps - * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as - * L2 driver want to modify the MSIx table. - */ - bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT); rdev = en_info->rdev; ibdev_info(&rdev->ibdev, "Device resume completed"); @@ -2112,18 +2403,24 @@ static int __init bnxt_re_mod_init(void) int rc; pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version); + bnxt_re_register_debugfs(); + rc = auxiliary_driver_register(&bnxt_re_driver); if (rc) { pr_err("%s: Failed to register auxiliary driver\n", ROCE_DRV_MODULE_NAME); - return rc; + goto err_debug; } return 0; +err_debug: + bnxt_re_unregister_debugfs(); + return rc; } static void __exit bnxt_re_mod_exit(void) { auxiliary_driver_unregister(&bnxt_re_driver); + bnxt_re_unregister_debugfs(); } module_init(bnxt_re_mod_init); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 7ad83566ab0f..e42abf5be6c0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -556,6 +556,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, nq->pdev = pdev; nq->cqn_handler = cqn_handler; nq->srqn_handler = srqn_handler; + nq->load = 0; /* Have a task to schedule CQ notifiers in post send case */ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); @@ -1282,12 +1283,47 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp) } } +static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp, + struct cmdq_modify_qp *req) +{ + u32 mandatory_flags = 0; + + if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC) + mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; + + if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT && + qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) { + if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq) + req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED); + mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; + } + + if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD || + qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI) + mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; + + qp->modify_flags |= mandatory_flags; + req->qp_type = qp->type; +} + +static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp) +{ + if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT && + qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) || + (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR && + qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) + return true; + + return false; +} + int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct creq_modify_qp_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; struct cmdq_modify_qp req = {}; + u16 vlan_pcp_vlan_dei_vlan_id; u32 temp32[4]; u32 bmask; int rc; @@ -1298,6 +1334,12 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) /* Filter out the qp_attr_mask based on the state->new transition */ __filter_modify_flags(qp); + if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { + /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */ + if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) && + is_optimized_state_transition(qp)) + bnxt_set_mandatory_attributes(qp, &req); + } bmask = qp->modify_flags; req.modify_mask = cpu_to_le32(qp->modify_flags); req.qp_cid = cpu_to_le32(qp->id); @@ -1378,7 +1420,16 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID) req.dest_qp_id = cpu_to_le32(qp->dest_qpn); - req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) { + vlan_pcp_vlan_dei_vlan_id = + ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id << + CMDQ_MODIFY_QP_VLAN_ID_SFT) & + CMDQ_MODIFY_QP_VLAN_ID_MASK); + vlan_pcp_vlan_dei_vlan_id |= + ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) & + CMDQ_MODIFY_QP_VLAN_PCP_MASK); + req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id); + } bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); @@ -2151,6 +2202,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) struct bnxt_qplib_cmdqmsg msg = {}; struct cmdq_create_cq req = {}; struct bnxt_qplib_pbl *pbl; + u32 coalescing = 0; u32 pg_sz_lvl; int rc; @@ -2177,6 +2229,25 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) req.dpi = cpu_to_le32(cq->dpi->dpi); req.cq_handle = cpu_to_le64(cq->cq_handle); req.cq_size = cpu_to_le32(cq->max_wqe); + + if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) { + req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID); + coalescing |= ((cq->coalescing->buf_maxtime << + CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) & + CMDQ_CREATE_CQ_BUF_MAXTIME_MASK); + coalescing |= ((cq->coalescing->normal_maxbuf << + CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) & + CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK); + coalescing |= ((cq->coalescing->during_maxbuf << + CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) & + CMDQ_CREATE_CQ_DURING_MAXBUF_MASK); + if (cq->coalescing->en_ring_idle_mode) + coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE; + else + coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE; + req.coalescing = cpu_to_le32(coalescing); + } + pbl = &cq->hwq.pbl[PBL_LVL_0]; pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) << CMDQ_CREATE_CQ_PG_SIZE_SFT); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 820611a23943..ef3424c81345 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -383,6 +383,25 @@ static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que, return avail <= slots; } +/* CQ coalescing parameters */ +struct bnxt_qplib_cq_coal_param { + u16 buf_maxtime; + u8 normal_maxbuf; + u8 during_maxbuf; + u8 en_ring_idle_mode; +}; + +#define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1 +#define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7 0x8 +#define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7 0x8 +#define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5 0x1 +#define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5 0x1 +#define BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE 0x1 +#define BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME 0x1bf +#define BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF 0x1f +#define BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF 0x1f +#define BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE 0x1 + struct bnxt_qplib_cqe { u8 status; u8 type; @@ -391,7 +410,7 @@ struct bnxt_qplib_cqe { u16 cfa_meta; u64 wr_id; union { - __le32 immdata; + u32 immdata; u32 invrkey; }; u64 qp_handle; @@ -445,6 +464,7 @@ struct bnxt_qplib_cq { */ spinlock_t flush_lock; /* QP flush management */ u16 cnq_events; + struct bnxt_qplib_cq_coal_param *coalescing; }; #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) @@ -499,6 +519,7 @@ struct bnxt_qplib_nq { struct tasklet_struct nq_tasklet; bool requested; int budget; + u32 load; cqn_handler_t cqn_handler; srqn_handler_t srqn_handler; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index e82bd37158ad..5e90ea232de8 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -831,6 +831,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct creq_initialize_fw_resp resp = {}; struct cmdq_initialize_fw req = {}; struct bnxt_qplib_cmdqmsg msg = {}; + u16 flags = 0; u8 pgsz, lvl; int rc; @@ -849,10 +850,8 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, * shall setup this area for VF. Skipping the * HW programming */ - if (is_virtfn) + if (is_virtfn || bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) goto skip_ctx_setup; - if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) - goto config_vf_res; lvl = ctx->qpc_tbl.level; pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl); @@ -896,16 +895,14 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); -config_vf_res: - req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); - req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); - req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); - req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); - req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); - skip_ctx_setup: if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags)) - req.flags |= cpu_to_le16(CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED); + flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED; + if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2)) + flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED; + if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT) + flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT; + req.flags |= cpu_to_le16(flags); req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 07779aeb7575..88814cb3aa74 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -131,6 +131,8 @@ static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req) #define RCFW_CMD_IS_BLOCKING 0x8000 #define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000DULL +/* HWRM version 1.10.3.18 */ +#define HWRM_VERSION_READ_CTX 0x1000A00030012 /* Crsq buf is 1024-Byte */ struct bnxt_qplib_crsbe { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index c2f710364e0f..21fb148713a6 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -39,6 +39,8 @@ #ifndef __BNXT_QPLIB_RES_H__ #define __BNXT_QPLIB_RES_H__ +#include "bnxt_ulp.h" + extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; #define CHIP_NUM_57508 0x1750 @@ -302,6 +304,7 @@ struct bnxt_qplib_res { struct bnxt_qplib_chip_ctx *cctx; struct bnxt_qplib_dev_attr *dattr; struct net_device *netdev; + struct bnxt_en_dev *en_dev; struct bnxt_qplib_rcfw *rcfw; struct bnxt_qplib_pd_tbl pd_tbl; /* To protect the pd table bit map */ @@ -576,4 +579,14 @@ static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2) return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED; } +static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2) +{ + return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED; +} + +static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2) +{ + return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED; +} + #endif /* __BNXT_QPLIB_RES_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index e29fbbdab9fd..7e20ae3d2c4f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -981,3 +981,38 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); return rc; } + +int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 res_type, + u32 xid, u32 resp_size, void *resp_va) +{ + struct creq_read_context resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_read_context req = {}; + struct bnxt_qplib_rcfw_sbuf sbuf; + int rc; + + sbuf.size = resp_size; + sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_READ_CONTEXT, sizeof(req)); + req.resp_addr = cpu_to_le64(sbuf.dma_addr); + req.resp_size = resp_size / BNXT_QPLIB_CMDQE_UNITS; + + req.xid = cpu_to_le32(xid); + req.type = res_type; + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto free_mem; + + memcpy(resp_va, sbuf.sb, resp_size); +free_mem: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); + return rc; +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index ecf3f45fea74..e6beeb514b7d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -353,6 +353,8 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, struct bnxt_qplib_ext_stat *estat); int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, struct bnxt_qplib_cc_param *cc_param); +int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid, + u32 resp_size, void *resp_va); #define BNXT_VAR_MAX_WQE 4352 #define BNXT_VAR_MAX_SLOT_ALIGN 256 diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 3ec895284e49..a98fc9c2313e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -216,6 +216,8 @@ struct cmdq_initialize_fw { __le16 flags; #define CMDQ_INITIALIZE_FW_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL #define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL + #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL + #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL __le16 cookie; u8 resp_size; u8 reserved8; @@ -559,6 +561,7 @@ struct cmdq_modify_qp { #define CMDQ_MODIFY_QP_OPCODE_LAST CMDQ_MODIFY_QP_OPCODE_MODIFY_QP u8 cmd_size; __le16 flags; + #define CMDQ_MODIFY_QP_FLAGS_SRQ_USED 0x1UL __le16 cookie; u8 resp_size; u8 qp_type; @@ -1137,6 +1140,7 @@ struct cmdq_create_cq { #define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL + #define CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID 0x8UL __le16 cookie; u8 resp_size; u8 reserved8; @@ -1169,7 +1173,18 @@ struct cmdq_create_cq { __le32 cq_size; __le64 pbl; __le16 steering_tag; - u8 reserved48[6]; + u8 reserved48[2]; + __le32 coalescing; + #define CMDQ_CREATE_CQ_BUF_MAXTIME_MASK 0x1ffUL + #define CMDQ_CREATE_CQ_BUF_MAXTIME_SFT 0 + #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK 0x3e00UL + #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT 9 + #define CMDQ_CREATE_CQ_DURING_MAXBUF_MASK 0x7c000UL + #define CMDQ_CREATE_CQ_DURING_MAXBUF_SFT 14 + #define CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE 0x80000UL + #define CMDQ_CREATE_CQ_UNUSED12_MASK 0xfff00000UL + #define CMDQ_CREATE_CQ_UNUSED12_SFT 20 + __le64 reserved64; }; /* creq_create_cq_resp (size:128b/16B) */ @@ -2251,6 +2266,46 @@ struct creq_set_func_resources_resp { u8 reserved48[6]; }; +/* cmdq_read_context (size:192b/24B) */ +struct cmdq_read_context { + u8 opcode; + #define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT 0x85UL + #define CMDQ_READ_CONTEXT_OPCODE_LAST CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 xid; + u8 type; + #define CMDQ_READ_CONTEXT_TYPE_QPC 0x0UL + #define CMDQ_READ_CONTEXT_TYPE_CQ 0x1UL + #define CMDQ_READ_CONTEXT_TYPE_MRW 0x2UL + #define CMDQ_READ_CONTEXT_TYPE_SRQ 0x3UL + #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ + u8 unused_0[3]; +}; + +/* creq_read_context (size:128b/16B) */ +struct creq_read_context { + u8 type; + #define CREQ_READ_CONTEXT_TYPE_MASK 0x3fUL + #define CREQ_READ_CONTEXT_TYPE_SFT 0 + #define CREQ_READ_CONTEXT_TYPE_QP_EVENT 0x38UL + #define CREQ_READ_CONTEXT_TYPE_LAST CREQ_READ_CONTEXT_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_READ_CONTEXT_V 0x1UL + u8 event; + #define CREQ_READ_CONTEXT_EVENT_READ_CONTEXT 0x85UL + #define CREQ_READ_CONTEXT_EVENT_LAST CREQ_READ_CONTEXT_EVENT_READ_CONTEXT + __le16 reserved16; + __le32 reserved_32; +}; + /* cmdq_map_tc_to_cos (size:192b/24B) */ struct cmdq_map_tc_to_cos { u8 opcode; diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index cd03a5429beb..fe0b6aec7839 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -30,7 +30,8 @@ enum efa_admin_aq_opcode { EFA_ADMIN_DEALLOC_UAR = 17, EFA_ADMIN_CREATE_EQ = 18, EFA_ADMIN_DESTROY_EQ = 19, - EFA_ADMIN_MAX_OPCODE = 19, + EFA_ADMIN_ALLOC_MR = 20, + EFA_ADMIN_MAX_OPCODE = 20, }; enum efa_admin_aq_feature_id { @@ -150,8 +151,11 @@ struct efa_admin_create_qp_cmd { /* UAR number */ u16 uar; + /* Requested service level for the QP, 0 is the default SL */ + u8 sl; + /* MBZ */ - u16 reserved; + u8 reserved; /* MBZ */ u32 reserved2; @@ -459,6 +463,41 @@ struct efa_admin_dereg_mr_resp { struct efa_admin_acq_common_desc acq_common_desc; }; +/* + * Allocation of MemoryRegion, required for QP working with Virtual + * Addresses in kernel verbs semantics, ready for fast registration use. + */ +struct efa_admin_alloc_mr_cmd { + /* Common Admin Queue descriptor */ + struct efa_admin_aq_common_desc aq_common_desc; + + /* Protection Domain */ + u16 pd; + + /* MBZ */ + u16 reserved1; + + /* Maximum number of pages this MR supports. */ + u32 max_pages; +}; + +struct efa_admin_alloc_mr_resp { + /* Common Admin Queue completion descriptor */ + struct efa_admin_acq_common_desc acq_common_desc; + + /* + * L_Key, to be used in conjunction with local buffer references in + * SQ and RQ WQE, or with virtual RQ/CQ rings + */ + u32 l_key; + + /* + * R_Key, to be used in RDMA messages to refer to remotely accessed + * memory region + */ + u32 r_key; +}; + struct efa_admin_create_cq_cmd { struct efa_admin_aq_common_desc aq_common_desc; @@ -483,8 +522,8 @@ struct efa_admin_create_cq_cmd { */ u8 cq_caps_2; - /* completion queue depth in # of entries. must be power of 2 */ - u16 cq_depth; + /* Sub completion queue depth in # of entries. must be power of 2 */ + u16 sub_cq_depth; /* EQ number assigned to this cq */ u16 eqn; @@ -519,8 +558,8 @@ struct efa_admin_create_cq_resp { u16 cq_idx; - /* actual cq depth in number of entries */ - u16 cq_actual_depth; + /* actual sub cq depth in number of entries */ + u16 sub_cq_actual_depth; /* CQ doorbell address, as offset to PCIe DB BAR */ u32 db_offset; @@ -578,6 +617,8 @@ struct efa_admin_basic_stats { u64 rx_pkts; u64 rx_drops; + + u64 qkey_viol; }; struct efa_admin_messages_stats { @@ -677,6 +718,15 @@ struct efa_admin_feature_device_attr_desc { /* Unique global ID for an EFA device */ u64 guid; + + /* The device maximum link speed in Gbit/sec */ + u16 max_link_speed_gbps; + + /* MBZ */ + u16 reserved0; + + /* MBZ */ + u32 reserved1; }; struct efa_admin_feature_queue_attr_desc { @@ -1057,7 +1107,6 @@ struct efa_admin_host_info { /* create_eq_cmd */ #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) -#define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK BIT(6) #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK BIT(0) /* host_info */ diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h index 83f20c38a840..35700c93e639 100644 --- a/drivers/infiniband/hw/efa/efa_admin_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_ADMIN_H_ @@ -96,7 +96,7 @@ struct efa_admin_acq_entry { struct efa_admin_aenq_common_desc { u16 group; - u16 syndrom; + u16 syndrome; /* * 0 : phase diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 5a774925cdea..c6b89c45fdc9 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -31,6 +31,7 @@ int efa_com_create_qp(struct efa_com_dev *edev, create_qp_cmd.qp_alloc_size.recv_queue_depth = params->rq_depth; create_qp_cmd.uar = params->uarn; + create_qp_cmd.sl = params->sl; if (params->unsolicited_write_recv) EFA_SET(&create_qp_cmd.flags, EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV, 1); @@ -163,7 +164,7 @@ int efa_com_create_cq(struct efa_com_dev *edev, EFA_SET(&create_cmd.cq_caps_2, EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS, params->entry_size_in_bytes / 4); - create_cmd.cq_depth = params->cq_depth; + create_cmd.sub_cq_depth = params->sub_cq_depth; create_cmd.num_sub_cqs = params->num_sub_cqs; create_cmd.uar = params->uarn; if (params->interrupt_mode_enabled) { @@ -191,7 +192,7 @@ int efa_com_create_cq(struct efa_com_dev *edev, } result->cq_idx = cmd_completion.cq_idx; - result->actual_depth = params->cq_depth; + result->actual_depth = params->sub_cq_depth; result->db_off = cmd_completion.db_offset; result->db_valid = EFA_GET(&cmd_completion.flags, EFA_ADMIN_CREATE_CQ_RESP_DB_VALID); @@ -466,6 +467,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, result->max_rdma_size = resp.u.device_attr.max_rdma_size; result->device_caps = resp.u.device_attr.device_caps; result->guid = resp.u.device_attr.guid; + result->max_link_speed_gbps = resp.u.device_attr.max_link_speed_gbps; if (result->admin_api_version < 1) { ibdev_err_ratelimited( diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index 668d033f7477..5511355b700d 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h @@ -27,6 +27,7 @@ struct efa_com_create_qp_params { u16 pd; u16 uarn; u8 qp_type; + u8 sl; u8 unsolicited_write_recv : 1; }; @@ -71,7 +72,7 @@ struct efa_com_create_cq_params { /* cq physical base address in OS memory */ dma_addr_t dma_addr; /* completion queue depth in # of entries */ - u16 cq_depth; + u16 sub_cq_depth; u16 num_sub_cqs; u16 uarn; u16 eqn; @@ -141,6 +142,7 @@ struct efa_com_get_device_attr_result { u16 max_wr_rdma_sge; u16 max_tx_batch; u16 min_sq_depth; + u16 max_link_speed_gbps; u8 db_bar; }; diff --git a/drivers/infiniband/hw/efa/efa_io_defs.h b/drivers/infiniband/hw/efa/efa_io_defs.h index 2d8eb96eaa81..a4c9fd33da38 100644 --- a/drivers/infiniband/hw/efa/efa_io_defs.h +++ b/drivers/infiniband/hw/efa/efa_io_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_IO_H_ @@ -10,6 +10,7 @@ #define EFA_IO_TX_DESC_NUM_RDMA_BUFS 1 #define EFA_IO_TX_DESC_INLINE_MAX_SIZE 32 #define EFA_IO_TX_DESC_IMM_DATA_SIZE 4 +#define EFA_IO_TX_DESC_INLINE_PBL_SIZE 1 enum efa_io_queue_type { /* send queue (of a QP) */ @@ -25,6 +26,10 @@ enum efa_io_send_op_type { EFA_IO_RDMA_READ = 1, /* RDMA write */ EFA_IO_RDMA_WRITE = 2, + /* Fast MR registration */ + EFA_IO_FAST_REG = 3, + /* Fast MR invalidation */ + EFA_IO_FAST_INV = 4, }; enum efa_io_comp_status { @@ -34,15 +39,15 @@ enum efa_io_comp_status { EFA_IO_COMP_STATUS_FLUSHED = 1, /* Internal QP error */ EFA_IO_COMP_STATUS_LOCAL_ERROR_QP_INTERNAL_ERROR = 2, - /* Bad operation type */ - EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_OP_TYPE = 3, + /* Unsupported operation */ + EFA_IO_COMP_STATUS_LOCAL_ERROR_UNSUPPORTED_OP = 3, /* Bad AH */ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_AH = 4, /* LKEY not registered or does not match IOVA */ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_LKEY = 5, /* Message too long */ EFA_IO_COMP_STATUS_LOCAL_ERROR_BAD_LENGTH = 6, - /* Destination ENI is down or does not run EFA */ + /* RKEY not registered or does not match remote IOVA */ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_ADDRESS = 7, /* Connection was reset by remote side */ EFA_IO_COMP_STATUS_REMOTE_ERROR_ABORT = 8, @@ -54,8 +59,17 @@ enum efa_io_comp_status { EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_LENGTH = 11, /* Unexpected status returned by responder */ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_STATUS = 12, - /* Unresponsive remote - detected locally */ + /* Unresponsive remote - was previously responsive */ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNRESP_REMOTE = 13, + /* No valid AH at remote side (required for RDMA operations) */ + EFA_IO_COMP_STATUS_REMOTE_ERROR_UNKNOWN_PEER = 14, + /* Unreachable remote - never received a response */ + EFA_IO_COMP_STATUS_LOCAL_ERROR_UNREACH_REMOTE = 15, +}; + +enum efa_io_frwr_pbl_mode { + EFA_IO_FRWR_INLINE_PBL = 0, + EFA_IO_FRWR_DIRECT_PBL = 1, }; struct efa_io_tx_meta_desc { @@ -95,13 +109,13 @@ struct efa_io_tx_meta_desc { /* * If inline_msg bit is set, length of inline message in bytes, - * otherwise length of SGL (number of buffers). + * otherwise length of SGL (number of buffers). */ u16 length; /* - * immediate data: if has_imm is set, then this field is included - * within Tx message and reported in remote Rx completion. + * immediate data: if has_imm is set, then this field is included within + * Tx message and reported in remote Rx completion. */ u32 immediate_data; @@ -158,6 +172,63 @@ struct efa_io_rdma_req { struct efa_io_tx_buf_desc local_mem[1]; }; +struct efa_io_fast_mr_reg_req { + /* Updated local key of the MR after lkey/rkey increment */ + u32 lkey; + + /* + * permissions + * 0 : local_write_enable - Local write permissions: + * must be set for RQ buffers and buffers posted for + * RDMA Read requests + * 1 : remote_write_enable - Remote write + * permissions: must be set to enable RDMA write to + * the region + * 2 : remote_read_enable - Remote read permissions: + * must be set to enable RDMA read from the region + * 7:3 : reserved2 - MBZ + */ + u8 permissions; + + /* + * control flags + * 4:0 : phys_page_size_shift - page size is (1 << + * phys_page_size_shift) + * 6:5 : pbl_mode - enum efa_io_frwr_pbl_mode + * 7 : reserved - MBZ + */ + u8 flags; + + /* MBZ */ + u8 reserved[2]; + + /* IO Virtual Address associated with this MR */ + u64 iova; + + /* Memory region length, in bytes */ + u64 mr_length; + + /* Physical Buffer List, each element is page-aligned. */ + union { + /* + * Inline array of physical page addresses (optimization + * for short region activation). + */ + u64 inline_array[1]; + + /* points to PBL (Currently only direct) */ + u64 dma_addr; + } pbl; +}; + +struct efa_io_fast_mr_inv_req { + /* Local key of the MR to invalidate */ + u32 lkey; + + /* MBZ */ + u8 reserved[28]; +}; + /* * Tx WQE, composed of tx meta descriptors followed by either tx buffer * descriptors or inline data @@ -174,6 +245,12 @@ struct efa_io_tx_wqe { /* RDMA local and remote memory addresses */ struct efa_io_rdma_req rdma_req; + + /* Fast registration */ + struct efa_io_fast_mr_reg_req reg_mr_req; + + /* Fast invalidation */ + struct efa_io_fast_mr_inv_req inv_mr_req; } data; }; @@ -208,7 +285,7 @@ struct efa_io_rx_desc { struct efa_io_cdesc_common { /* * verbs-generated request ID, as provided in the completed tx or rx - * descriptor. + * descriptor. */ u16 req_id; @@ -221,7 +298,8 @@ struct efa_io_cdesc_common { * 3 : has_imm - indicates that immediate data is * present - for RX completions only * 6:4 : op_type - enum efa_io_send_op_type - * 7 : reserved31 - MBZ + * 7 : unsolicited - indicates that there is no + * matching request - for RDMA with imm. RX only */ u8 flags; @@ -291,6 +369,13 @@ struct efa_io_rx_cdesc_ex { /* tx_buf_desc */ #define EFA_IO_TX_BUF_DESC_LKEY_MASK GENMASK(23, 0) +/* fast_mr_reg_req */ +#define EFA_IO_FAST_MR_REG_REQ_LOCAL_WRITE_ENABLE_MASK BIT(0) +#define EFA_IO_FAST_MR_REG_REQ_REMOTE_WRITE_ENABLE_MASK BIT(1) +#define EFA_IO_FAST_MR_REG_REQ_REMOTE_READ_ENABLE_MASK BIT(2) +#define EFA_IO_FAST_MR_REG_REQ_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0) +#define EFA_IO_FAST_MR_REG_REQ_PBL_MODE_MASK GENMASK(6, 5) + /* rx_desc */ #define EFA_IO_RX_DESC_LKEY_MASK GENMASK(23, 0) #define EFA_IO_RX_DESC_FIRST_MASK BIT(30) @@ -301,5 +386,6 @@ struct efa_io_rx_cdesc_ex { #define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1) #define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3) #define EFA_IO_CDESC_COMMON_OP_TYPE_MASK GENMASK(6, 4) +#define EFA_IO_CDESC_COMMON_UNSOLICITED_MASK BIT(7) #endif /* _EFA_IO_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index cc13415ff7e7..a8645a40730f 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -85,6 +85,8 @@ static const struct rdma_stat_desc efa_port_stats_descs[] = { EFA_DEFINE_PORT_STATS(EFA_STATS_STR) }; +#define EFA_DEFAULT_LINK_SPEED_GBPS 100 + #define EFA_CHUNK_PAYLOAD_SHIFT 12 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 @@ -277,10 +279,47 @@ int efa_query_device(struct ib_device *ibdev, return 0; } +static void efa_link_gbps_to_speed_and_width(u16 gbps, + enum ib_port_speed *speed, + enum ib_port_width *width) +{ + if (gbps >= 400) { + *width = IB_WIDTH_8X; + *speed = IB_SPEED_HDR; + } else if (gbps >= 200) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_HDR; + } else if (gbps >= 120) { + *width = IB_WIDTH_12X; + *speed = IB_SPEED_FDR10; + } else if (gbps >= 100) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_EDR; + } else if (gbps >= 60) { + *width = IB_WIDTH_12X; + *speed = IB_SPEED_DDR; + } else if (gbps >= 50) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_HDR; + } else if (gbps >= 40) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_FDR10; + } else if (gbps >= 30) { + *width = IB_WIDTH_12X; + *speed = IB_SPEED_SDR; + } else { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_EDR; + } +} + int efa_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct efa_dev *dev = to_edev(ibdev); + enum ib_port_speed link_speed; + enum ib_port_width link_width; + u16 link_gbps; props->lmc = 1; @@ -288,8 +327,10 @@ int efa_query_port(struct ib_device *ibdev, u32 port, props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; - props->active_speed = IB_SPEED_EDR; - props->active_width = IB_WIDTH_4X; + link_gbps = dev->dev_attr.max_link_speed_gbps ?: EFA_DEFAULT_LINK_SPEED_GBPS; + efa_link_gbps_to_speed_and_width(link_gbps, &link_speed, &link_width); + props->active_speed = link_speed; + props->active_width = link_width; props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); props->max_msg_sz = dev->dev_attr.mtu; @@ -676,7 +717,7 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, goto err_out; } - if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_90)) { + if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_98)) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; @@ -732,6 +773,8 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, create_qp_params.rq_base_addr = qp->rq_dma_addr; } + create_qp_params.sl = cmd.sl; + if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV) create_qp_params.unsolicited_write_recv = true; @@ -1167,7 +1210,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } params.uarn = cq->ucontext->uarn; - params.cq_depth = entries; + params.sub_cq_depth = entries; params.dma_addr = cq->dma_addr; params.entry_size_in_bytes = cmd.cq_entry_size; params.num_sub_cqs = cmd.num_sub_cqs; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index c52e6b2c9914..a442eca498b8 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -13235,7 +13235,7 @@ int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) /* * Clear all interrupt sources on the chip. */ -void clear_all_interrupts(struct hfi1_devdata *dd) +static void clear_all_interrupts(struct hfi1_devdata *dd) { int i; diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index d861aa8fc640..8841db16bde7 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -1404,7 +1404,6 @@ irqreturn_t receive_context_interrupt_napi(int irq, void *data); int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set); void init_qsfp_int(struct hfi1_devdata *dd); -void clear_all_interrupts(struct hfi1_devdata *dd); void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr); void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr); void reset_interrupts(struct hfi1_devdata *dd); diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 4ec66611a143..4106423a1b39 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -179,8 +179,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC, hr_cq->cqn); if (ret) - dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, - hr_cq->cqn); + dev_err_ratelimited(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", + ret, hr_cq->cqn); xa_erase_irq(&cq_table->array, hr_cq->cqn); diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c index e8febb40f645..b869cdc54118 100644 --- a/drivers/infiniband/hw/hns/hns_roce_debugfs.c +++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c @@ -5,6 +5,7 @@ #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/pci.h> #include "hns_roce_device.h" @@ -86,7 +87,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev) { struct hns_roce_dev_debugfs *dbgfs = &hr_dev->dbgfs; - dbgfs->root = debugfs_create_dir(dev_name(&hr_dev->ib_dev.dev), + dbgfs->root = debugfs_create_dir(pci_name(hr_dev->pci_dev), hns_roce_dbgfs_root); create_sw_stat_debugfs(hr_dev, dbgfs->root); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 0b1e21cb6d2d..560a1d9de408 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -489,12 +489,6 @@ struct hns_roce_bank { u32 next; /* Next ID to allocate. */ }; -struct hns_roce_idx_table { - u32 *spare_idx; - u32 head; - u32 tail; -}; - struct hns_roce_qp_table { struct hns_roce_hem_table qp_table; struct hns_roce_hem_table irrl_table; @@ -503,7 +497,7 @@ struct hns_roce_qp_table { struct mutex scc_mutex; struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM]; struct mutex bank_mutex; - struct hns_roce_idx_table idx_table; + struct xarray dip_xa; }; struct hns_roce_cq_table { @@ -593,6 +587,7 @@ struct hns_roce_dev; enum { HNS_ROCE_FLUSH_FLAG = 0, + HNS_ROCE_STOP_FLUSH_FLAG = 1, }; struct hns_roce_work { @@ -656,6 +651,8 @@ struct hns_roce_qp { enum hns_roce_cong_type cong_type; u8 tc_mode; u8 priority; + spinlock_t flush_lock; + struct hns_roce_dip *dip; }; struct hns_roce_ib_iboe { @@ -982,8 +979,6 @@ struct hns_roce_dev { enum hns_roce_device_state state; struct list_head qp_list; /* list of all qps on this dev */ spinlock_t qp_list_lock; /* protect qp_list */ - struct list_head dip_list; /* list of all dest ips on this dev */ - spinlock_t dip_list_lock; /* protect dip_list */ struct list_head pgdir_list; struct mutex pgdir_mutex; @@ -1289,6 +1284,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); +void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn); void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index c7c167e2a045..f84521be3bea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -300,7 +300,7 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop, struct hns_roce_hem_index *index) { - struct ib_device *ibdev = &hr_dev->ib_dev; + struct device *dev = hr_dev->dev; unsigned long mhop_obj = obj; u32 l0_idx, l1_idx, l2_idx; u32 chunk_ba_num; @@ -331,14 +331,14 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev, index->buf = l0_idx; break; default: - ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n", - table->type, mhop->hop_num); + dev_err(dev, "table %u not support mhop.hop_num = %u!\n", + table->type, mhop->hop_num); return -EINVAL; } if (unlikely(index->buf >= table->num_hem)) { - ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n", - table->type, index->buf, table->num_hem); + dev_err(dev, "table %u exceed hem limt idx %llu, max %lu!\n", + table->type, index->buf, table->num_hem); return -EINVAL; } @@ -448,14 +448,14 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop, struct hns_roce_hem_index *index) { - struct ib_device *ibdev = &hr_dev->ib_dev; + struct device *dev = hr_dev->dev; u32 step_idx; int ret = 0; if (index->inited & HEM_INDEX_L0) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0); if (ret) { - ibdev_err(ibdev, "set HEM step 0 failed!\n"); + dev_err(dev, "set HEM step 0 failed!\n"); goto out; } } @@ -463,7 +463,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, if (index->inited & HEM_INDEX_L1) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1); if (ret) { - ibdev_err(ibdev, "set HEM step 1 failed!\n"); + dev_err(dev, "set HEM step 1 failed!\n"); goto out; } } @@ -475,7 +475,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, step_idx = mhop->hop_num; ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); if (ret) - ibdev_err(ibdev, "set HEM step last failed!\n"); + dev_err(dev, "set HEM step last failed!\n"); } out: return ret; @@ -485,14 +485,14 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj) { - struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_hem_index index = {}; struct hns_roce_hem_mhop mhop = {}; + struct device *dev = hr_dev->dev; int ret; ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); if (ret) { - ibdev_err(ibdev, "calc hem config failed!\n"); + dev_err(dev, "calc hem config failed!\n"); return ret; } @@ -504,7 +504,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ret = alloc_mhop_hem(hr_dev, table, &mhop, &index); if (ret) { - ibdev_err(ibdev, "alloc mhop hem failed!\n"); + dev_err(dev, "alloc mhop hem failed!\n"); goto out; } @@ -512,7 +512,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, if (table->type < HEM_TYPE_MTT) { ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index); if (ret) { - ibdev_err(ibdev, "set HEM address to HW failed!\n"); + dev_err(dev, "set HEM address to HW failed!\n"); goto err_alloc; } } @@ -575,7 +575,7 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop, struct hns_roce_hem_index *index) { - struct ib_device *ibdev = &hr_dev->ib_dev; + struct device *dev = hr_dev->dev; u32 hop_num = mhop->hop_num; u32 chunk_ba_num; u32 step_idx; @@ -605,21 +605,21 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx); if (ret) - ibdev_warn(ibdev, "failed to clear hop%u HEM, ret = %d.\n", - hop_num, ret); + dev_warn(dev, "failed to clear hop%u HEM, ret = %d.\n", + hop_num, ret); if (index->inited & HEM_INDEX_L1) { ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1); if (ret) - ibdev_warn(ibdev, "failed to clear HEM step 1, ret = %d.\n", - ret); + dev_warn(dev, "failed to clear HEM step 1, ret = %d.\n", + ret); } if (index->inited & HEM_INDEX_L0) { ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0); if (ret) - ibdev_warn(ibdev, "failed to clear HEM step 0, ret = %d.\n", - ret); + dev_warn(dev, "failed to clear HEM step 0, ret = %d.\n", + ret); } } } @@ -629,14 +629,14 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, unsigned long obj, int check_refcount) { - struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_hem_index index = {}; struct hns_roce_hem_mhop mhop = {}; + struct device *dev = hr_dev->dev; int ret; ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); if (ret) { - ibdev_err(ibdev, "calc hem config failed!\n"); + dev_err(dev, "calc hem config failed!\n"); return; } @@ -672,8 +672,8 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT); if (ret) - dev_warn(dev, "failed to clear HEM base address, ret = %d.\n", - ret); + dev_warn_ratelimited(dev, "failed to clear HEM base address, ret = %d.\n", + ret); hns_roce_free_hem(hr_dev, table->hem[i]); table->hem[i] = NULL; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 24e906b9d3ae..697b17cca02e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -373,19 +373,12 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, static int check_send_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { - struct ib_device *ibdev = &hr_dev->ib_dev; - if (unlikely(hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_INIT || - hr_qp->state == IB_QPS_RTR)) { - ibdev_err(ibdev, "failed to post WQE, QP state %u!\n", - hr_qp->state); + hr_qp->state == IB_QPS_RTR)) return -EINVAL; - } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { - ibdev_err(ibdev, "failed to post WQE, dev state %d!\n", - hr_dev->state); + else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) return -EIO; - } return 0; } @@ -582,7 +575,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, if (WARN_ON(ret)) return ret; - hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO, (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, @@ -2560,20 +2553,19 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev) free_link_table_buf(hr_dev, &priv->ext_llm); } -static void free_dip_list(struct hns_roce_dev *hr_dev) +static void free_dip_entry(struct hns_roce_dev *hr_dev) { struct hns_roce_dip *hr_dip; - struct hns_roce_dip *tmp; - unsigned long flags; + unsigned long idx; - spin_lock_irqsave(&hr_dev->dip_list_lock, flags); + xa_lock(&hr_dev->qp_table.dip_xa); - list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) { - list_del(&hr_dip->node); + xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) { + __xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx); kfree(hr_dip); } - spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); + xa_unlock(&hr_dev->qp_table.dip_xa); } static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev) @@ -2775,8 +2767,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, IB_QPS_INIT, NULL); if (ret) { - ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", - ret); + ibdev_err_ratelimited(ibdev, "failed to modify qp to init, ret = %d.\n", + ret); return ret; } @@ -2981,7 +2973,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) hns_roce_free_link_table(hr_dev); if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) - free_dip_list(hr_dev); + free_dip_entry(hr_dev); } static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, @@ -3421,8 +3413,8 @@ static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp) ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr); if (ret) { - ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n", - ret); + ibdev_err_ratelimited(ibdev, "failed to post wqe for free mr, ret = %d.\n", + ret); return ret; } @@ -3461,9 +3453,9 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) ret = free_mr_post_send_lp_wqe(hr_qp); if (ret) { - ibdev_err(ibdev, - "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", - hr_qp->qpn, ret); + ibdev_err_ratelimited(ibdev, + "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", + hr_qp->qpn, ret); break; } @@ -3474,16 +3466,16 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) while (cqe_cnt) { npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc); if (npolled < 0) { - ibdev_err(ibdev, - "failed to poll cqe for free mr, remain %d cqe.\n", - cqe_cnt); + ibdev_err_ratelimited(ibdev, + "failed to poll cqe for free mr, remain %d cqe.\n", + cqe_cnt); goto out; } if (time_after(jiffies, end)) { - ibdev_err(ibdev, - "failed to poll cqe for free mr and timeout, remain %d cqe.\n", - cqe_cnt); + ibdev_err_ratelimited(ibdev, + "failed to poll cqe for free mr and timeout, remain %d cqe.\n", + cqe_cnt); goto out; } cqe_cnt -= npolled; @@ -4701,26 +4693,49 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask, return 0; } +static int alloc_dip_entry(struct xarray *dip_xa, u32 qpn) +{ + struct hns_roce_dip *hr_dip; + int ret; + + hr_dip = xa_load(dip_xa, qpn); + if (hr_dip) + return 0; + + hr_dip = kzalloc(sizeof(*hr_dip), GFP_KERNEL); + if (!hr_dip) + return -ENOMEM; + + ret = xa_err(xa_store(dip_xa, qpn, hr_dip, GFP_KERNEL)); + if (ret) + kfree(hr_dip); + + return ret; +} + static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, u32 *dip_idx) { const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx; - u32 *head = &hr_dev->qp_table.idx_table.head; - u32 *tail = &hr_dev->qp_table.idx_table.tail; + struct xarray *dip_xa = &hr_dev->qp_table.dip_xa; + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_dip *hr_dip; - unsigned long flags; + unsigned long idx; int ret = 0; - spin_lock_irqsave(&hr_dev->dip_list_lock, flags); + ret = alloc_dip_entry(dip_xa, ibqp->qp_num); + if (ret) + return ret; - spare_idx[*tail] = ibqp->qp_num; - *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1); + xa_lock(dip_xa); - list_for_each_entry(hr_dip, &hr_dev->dip_list, node) { - if (!memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) { + xa_for_each(dip_xa, idx, hr_dip) { + if (hr_dip->qp_cnt && + !memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) { *dip_idx = hr_dip->dip_idx; + hr_dip->qp_cnt++; + hr_qp->dip = hr_dip; goto out; } } @@ -4728,19 +4743,24 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, /* If no dgid is found, a new dip and a mapping between dgid and * dip_idx will be created. */ - hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC); - if (!hr_dip) { - ret = -ENOMEM; - goto out; + xa_for_each(dip_xa, idx, hr_dip) { + if (hr_dip->qp_cnt) + continue; + + *dip_idx = idx; + memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); + hr_dip->dip_idx = idx; + hr_dip->qp_cnt++; + hr_qp->dip = hr_dip; + break; } - memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); - hr_dip->dip_idx = *dip_idx = spare_idx[*head]; - *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1); - list_add_tail(&hr_dip->node, &hr_dev->dip_list); + /* This should never happen. */ + if (WARN_ON_ONCE(!hr_qp->dip)) + ret = -ENOSPC; out: - spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); + xa_unlock(dip_xa); return ret; } @@ -5061,10 +5081,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); int ret = 0; - if (!check_qp_state(cur_state, new_state)) { - ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n"); + if (!check_qp_state(cur_state, new_state)) return -EINVAL; - } if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { memset(qpc_mask, 0, hr_dev->caps.qpc_sz); @@ -5325,7 +5343,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, /* SW pass context to HW */ ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp); if (ret) { - ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret); + ibdev_err_ratelimited(ibdev, "failed to modify QP, ret = %d.\n", ret); goto out; } @@ -5463,7 +5481,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context); if (ret) { - ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret); + ibdev_err_ratelimited(ibdev, + "failed to query QPC, ret = %d.\n", + ret); ret = -EINVAL; goto out; } @@ -5471,7 +5491,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, state = hr_reg_read(&context, QPC_QP_ST); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { - ibdev_err(ibdev, "Illegal ib_qp_state\n"); + ibdev_err_ratelimited(ibdev, "Illegal ib_qp_state\n"); ret = -EINVAL; goto out; } @@ -5564,9 +5584,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET, udata); if (ret) - ibdev_err(ibdev, - "failed to modify QP to RST, ret = %d.\n", - ret); + ibdev_err_ratelimited(ibdev, + "failed to modify QP to RST, ret = %d.\n", + ret); } send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; @@ -5594,17 +5614,41 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, return ret; } +static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + struct hns_roce_dip *hr_dip = hr_qp->dip; + + xa_lock(&hr_dev->qp_table.dip_xa); + + hr_dip->qp_cnt--; + if (!hr_dip->qp_cnt) + memset(hr_dip->dgid, 0, GID_LEN_V2); + + xa_unlock(&hr_dev->qp_table.dip_xa); +} + int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + unsigned long flags; int ret; + /* Make sure flush_cqe() is completed */ + spin_lock_irqsave(&hr_qp->flush_lock, flags); + set_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag); + spin_unlock_irqrestore(&hr_qp->flush_lock, flags); + flush_work(&hr_qp->flush_work.work); + + if (hr_qp->cong_type == CONG_TYPE_DIP) + put_dip_ctx_idx(hr_dev, hr_qp); + ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); if (ret) - ibdev_err(&hr_dev->ib_dev, - "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n", - hr_qp->qpn, ret); + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n", + hr_qp->qpn, ret); hns_roce_qp_destroy(hr_dev, hr_qp, udata); @@ -5898,9 +5942,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) - ibdev_err(&hr_dev->ib_dev, - "failed to process cmd when modifying CQ, ret = %d.\n", - ret); + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to process cmd when modifying CQ, ret = %d.\n", + ret); err_out: if (ret) @@ -5924,9 +5968,9 @@ static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn, ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC, cqn); if (ret) { - ibdev_err(&hr_dev->ib_dev, - "failed to process cmd when querying CQ, ret = %d.\n", - ret); + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to process cmd when querying CQ, ret = %d.\n", + ret); goto err_mailbox; } @@ -5967,11 +6011,10 @@ err_mailbox: return ret; } -static void hns_roce_irq_work_handle(struct work_struct *work) +static void dump_aeqe_log(struct hns_roce_work *irq_work) { - struct hns_roce_work *irq_work = - container_of(work, struct hns_roce_work, work); - struct ib_device *ibdev = &irq_work->hr_dev->ib_dev; + struct hns_roce_dev *hr_dev = irq_work->hr_dev; + struct ib_device *ibdev = &hr_dev->ib_dev; switch (irq_work->event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: @@ -6015,6 +6058,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work) case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: ibdev_warn(ibdev, "DB overflow.\n"); break; + case HNS_ROCE_EVENT_TYPE_MB: + break; case HNS_ROCE_EVENT_TYPE_FLR: ibdev_warn(ibdev, "function level reset.\n"); break; @@ -6025,8 +6070,46 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ibdev_err(ibdev, "invalid xrceth error.\n"); break; default: + ibdev_info(ibdev, "Undefined event %d.\n", + irq_work->event_type); break; } +} + +static void hns_roce_irq_work_handle(struct work_struct *work) +{ + struct hns_roce_work *irq_work = + container_of(work, struct hns_roce_work, work); + struct hns_roce_dev *hr_dev = irq_work->hr_dev; + int event_type = irq_work->event_type; + u32 queue_num = irq_work->queue_num; + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_PATH_MIG: + case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: + case HNS_ROCE_EVENT_TYPE_COMM_EST: + case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: + case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: + case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: + case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: + hns_roce_qp_event(hr_dev, queue_num, event_type); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: + case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: + hns_roce_srq_event(hr_dev, queue_num, event_type); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + hns_roce_cq_event(hr_dev, queue_num, event_type); + break; + default: + break; + } + + dump_aeqe_log(irq_work); kfree(irq_work); } @@ -6087,14 +6170,14 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - struct device *dev = hr_dev->dev; struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); irqreturn_t aeqe_found = IRQ_NONE; + int num_aeqes = 0; int event_type; u32 queue_num; int sub_type; - while (aeqe) { + while (aeqe && num_aeqes < HNS_AEQ_POLLING_BUDGET) { /* Make sure we read AEQ entry after we have checked the * ownership bit */ @@ -6105,25 +6188,12 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); switch (event_type) { - case HNS_ROCE_EVENT_TYPE_PATH_MIG: - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - case HNS_ROCE_EVENT_TYPE_COMM_EST: - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: - hns_roce_qp_event(hr_dev, queue_num, event_type); - break; - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - hns_roce_srq_event(hr_dev, queue_num, event_type); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - hns_roce_cq_event(hr_dev, queue_num, event_type); + hns_roce_flush_cqe(hr_dev, queue_num); break; case HNS_ROCE_EVENT_TYPE_MB: hns_roce_cmd_event(hr_dev, @@ -6131,12 +6201,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, aeqe->event.cmd.status, le64_to_cpu(aeqe->event.cmd.out_param)); break; - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - case HNS_ROCE_EVENT_TYPE_FLR: - break; default: - dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n", - event_type, eq->eqn, eq->cons_index); break; } @@ -6150,6 +6215,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, hns_roce_v2_init_irq_work(hr_dev, eq, queue_num); aeqe = next_aeqe_sw_v2(eq); + ++num_aeqes; } update_eq_db(eq); @@ -6699,6 +6765,9 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) int ret; int i; + if (hr_dev->caps.aeqe_depth < HNS_AEQ_POLLING_BUDGET) + return -EINVAL; + other_num = hr_dev->caps.num_other_vectors; comp_num = hr_dev->caps.num_comp_vectors; aeq_num = hr_dev->caps.num_aeq_vectors; @@ -7017,6 +7086,7 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; } + static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev; @@ -7035,6 +7105,9 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) hr_dev->active = false; hr_dev->dis_db = true; + + rdma_user_mmap_disassociate(&hr_dev->ib_dev); + hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index c65f68a14a26..cbdbc9edbce6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -85,6 +85,11 @@ #define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) +/* budget must be smaller than aeqe_depth to guarantee that we update + * the ci before we polled all the entries in the EQ. + */ +#define HNS_AEQ_POLLING_BUDGET 64 + enum { HNS_ROCE_CMD_FLAG_IN = BIT(0), HNS_ROCE_CMD_FLAG_OUT = BIT(1), @@ -919,6 +924,7 @@ struct hns_roce_v2_rc_send_wqe { #define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7) #define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8) #define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9) +#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10) #define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11) #define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12) #define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15) @@ -1342,7 +1348,7 @@ struct hns_roce_v2_priv { struct hns_roce_dip { u8 dgid[GID_LEN_V2]; u32 dip_idx; - struct list_head node; /* all dips are on a list */ + u32 qp_cnt; }; struct fmea_ram_ecc { diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4cb0af733587..ae24c81c9812 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -466,6 +466,11 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) pgprot_t prot; int ret; + if (hr_dev->dis_db) { + atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]); + return -EPERM; + } + rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff); if (!rdma_entry) { atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]); @@ -1130,8 +1135,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) INIT_LIST_HEAD(&hr_dev->qp_list); spin_lock_init(&hr_dev->qp_list_lock); - INIT_LIST_HEAD(&hr_dev->dip_list); - spin_lock_init(&hr_dev->dip_list_lock); ret = hns_roce_register_device(hr_dev); if (ret) diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 846da8c78b8b..bf30b3a65a9b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -138,8 +138,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1)); if (ret) - ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n", - ret); + ibdev_warn_ratelimited(ibdev, "failed to destroy mpt, ret = %d.\n", + ret); } free_mr_pbl(hr_dev, mr); @@ -435,15 +435,16 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) } int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, - unsigned int *sg_offset) + unsigned int *sg_offset_p) { + unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mtr *mtr = &mr->pbl_mtr; int ret, sg_num = 0; - if (!IS_ALIGNED(*sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || + if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || ibmr->page_size < HNS_HW_PAGE_SIZE || ibmr->page_size > HNS_HW_MAX_PAGE_SIZE) return sg_num; @@ -454,7 +455,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, if (!mr->page_list) return sg_num; - sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page); if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 6b03ba671ff8..9e2e76c59406 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -39,6 +39,25 @@ #include "hns_roce_device.h" #include "hns_roce_hem.h" +static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, + u32 qpn) +{ + struct device *dev = hr_dev->dev; + struct hns_roce_qp *qp; + unsigned long flags; + + xa_lock_irqsave(&hr_dev->qp_table_xa, flags); + qp = __hns_roce_qp_lookup(hr_dev, qpn); + if (qp) + refcount_inc(&qp->refcount); + xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); + + if (!qp) + dev_warn(dev, "async event for bogus QP %08x\n", qpn); + + return qp; +} + static void flush_work_handle(struct work_struct *work) { struct hns_roce_work *flush_work = container_of(work, @@ -71,11 +90,18 @@ static void flush_work_handle(struct work_struct *work) void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_work *flush_work = &hr_qp->flush_work; + unsigned long flags; + + spin_lock_irqsave(&hr_qp->flush_lock, flags); + /* Exit directly after destroy_qp() */ + if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) { + spin_unlock_irqrestore(&hr_qp->flush_lock, flags); + return; + } - flush_work->hr_dev = hr_dev; - INIT_WORK(&flush_work->work, flush_work_handle); refcount_inc(&hr_qp->refcount); queue_work(hr_dev->irq_workq, &flush_work->work); + spin_unlock_irqrestore(&hr_qp->flush_lock, flags); } void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) @@ -95,31 +121,28 @@ void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { - struct device *dev = hr_dev->dev; struct hns_roce_qp *qp; - xa_lock(&hr_dev->qp_table_xa); - qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (qp) - refcount_inc(&qp->refcount); - xa_unlock(&hr_dev->qp_table_xa); - - if (!qp) { - dev_warn(dev, "async event for bogus QP %08x\n", qpn); + qp = hns_roce_qp_lookup(hr_dev, qpn); + if (!qp) return; - } - if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || - event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { - qp->state = IB_QPS_ERR; + qp->event(qp, (enum hns_roce_event)event_type); - flush_cqe(hr_dev, qp); - } + if (refcount_dec_and_test(&qp->refcount)) + complete(&qp->free); +} - qp->event(qp, (enum hns_roce_event)event_type); +void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn) +{ + struct hns_roce_qp *qp; + + qp = hns_roce_qp_lookup(hr_dev, qpn); + if (!qp) + return; + + qp->state = IB_QPS_ERR; + flush_cqe(hr_dev, qp); if (refcount_dec_and_test(&qp->refcount)) complete(&qp->free); @@ -1124,6 +1147,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_udata *udata, struct hns_roce_qp *hr_qp) { + struct hns_roce_work *flush_work = &hr_qp->flush_work; struct hns_roce_ib_create_qp_resp resp = {}; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_ib_create_qp ucmd = {}; @@ -1132,9 +1156,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, mutex_init(&hr_qp->mutex); spin_lock_init(&hr_qp->sq.lock); spin_lock_init(&hr_qp->rq.lock); + spin_lock_init(&hr_qp->flush_lock); hr_qp->state = IB_QPS_RESET; hr_qp->flush_flag = 0; + flush_work->hr_dev = hr_dev; + INIT_WORK(&flush_work->work, flush_work_handle); if (init_attr->create_flags) return -EOPNOTSUPP; @@ -1546,14 +1573,10 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) unsigned int reserved_from_bot; unsigned int i; - qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps, - sizeof(u32), GFP_KERNEL); - if (!qp_table->idx_table.spare_idx) - return -ENOMEM; - mutex_init(&qp_table->scc_mutex); mutex_init(&qp_table->bank_mutex); xa_init(&hr_dev->qp_table_xa); + xa_init(&qp_table->dip_xa); reserved_from_bot = hr_dev->caps.reserved_qps; @@ -1578,7 +1601,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) ida_destroy(&hr_dev->qp_table.bank[i].ida); + xa_destroy(&hr_dev->qp_table.dip_xa); mutex_destroy(&hr_dev->qp_table.bank_mutex); mutex_destroy(&hr_dev->qp_table.scc_mutex); - kfree(hr_dev->qp_table.idx_table.spare_idx); } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index c9b8233f4b05..70c06ef65603 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -151,8 +151,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ, srq->srqn); if (ret) - dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", - ret, srq->srqn); + dev_err_ratelimited(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", + ret, srq->srqn); xa_erase_irq(&srq_table->xa, srq->srqn); diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 69999d8d24f3..4186884c66e1 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -27,6 +27,19 @@ enum devx_obj_flags { DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, DEVX_OBJ_FLAGS_DCT = 1 << 1, DEVX_OBJ_FLAGS_CQ = 1 << 2, + DEVX_OBJ_FLAGS_HW_FREED = 1 << 3, +}; + +#define MAX_ASYNC_CMDS 8 + +struct mlx5_async_cmd { + struct ib_uobject *uobject; + void *in; + int in_size; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + int err; + struct mlx5_async_work cb_work; + struct completion comp; }; struct devx_async_data { @@ -1405,7 +1418,9 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, */ mlx5r_deref_wait_odp_mkey(&obj->mkey); - if (obj->flags & DEVX_OBJ_FLAGS_DCT) + if (obj->flags & DEVX_OBJ_FLAGS_HW_FREED) + ret = 0; + else if (obj->flags & DEVX_OBJ_FLAGS_DCT) ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); else if (obj->flags & DEVX_OBJ_FLAGS_CQ) ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); @@ -2595,6 +2610,82 @@ void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev) } } +static void devx_async_destroy_cb(int status, struct mlx5_async_work *context) +{ + struct mlx5_async_cmd *devx_out = container_of(context, + struct mlx5_async_cmd, cb_work); + struct devx_obj *obj = devx_out->uobject->object; + + if (!status) + obj->flags |= DEVX_OBJ_FLAGS_HW_FREED; + + complete(&devx_out->comp); +} + +static void devx_async_destroy(struct mlx5_ib_dev *dev, + struct mlx5_async_cmd *cmd) +{ + init_completion(&cmd->comp); + cmd->err = mlx5_cmd_exec_cb(&dev->async_ctx, cmd->in, cmd->in_size, + &cmd->out, sizeof(cmd->out), + devx_async_destroy_cb, &cmd->cb_work); +} + +static void devx_wait_async_destroy(struct mlx5_async_cmd *cmd) +{ + if (!cmd->err) + wait_for_completion(&cmd->comp); + atomic_set(&cmd->uobject->usecnt, 0); +} + +void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile) +{ + struct mlx5_async_cmd async_cmd[MAX_ASYNC_CMDS]; + struct ib_ucontext *ucontext = ufile->ucontext; + struct ib_device *device = ucontext->device; + struct mlx5_ib_dev *dev = to_mdev(device); + struct ib_uobject *uobject; + struct devx_obj *obj; + int head = 0; + int tail = 0; + + list_for_each_entry(uobject, &ufile->uobjects, list) { + WARN_ON(uverbs_try_lock_object(uobject, UVERBS_LOOKUP_WRITE)); + + /* + * Currently we only support QP destruction, if other objects + * are to be destroyed need to add type synchronization to the + * cleanup algorithm and handle pre/post FW cleanup for the + * new types if needed. + */ + if (uobj_get_object_id(uobject) != MLX5_IB_OBJECT_DEVX_OBJ || + (get_dec_obj_type(uobject->object, MLX5_EVENT_TYPE_MAX) != + MLX5_OBJ_TYPE_QP)) { + atomic_set(&uobject->usecnt, 0); + continue; + } + + obj = uobject->object; + + async_cmd[tail % MAX_ASYNC_CMDS].in = obj->dinbox; + async_cmd[tail % MAX_ASYNC_CMDS].in_size = obj->dinlen; + async_cmd[tail % MAX_ASYNC_CMDS].uobject = uobject; + + devx_async_destroy(dev, &async_cmd[tail % MAX_ASYNC_CMDS]); + tail++; + + if (tail - head == MAX_ASYNC_CMDS) { + devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]); + head++; + } + } + + while (head != tail) { + devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]); + head++; + } +} + static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h index ee2213275fd6..1344bf4c9d21 100644 --- a/drivers/infiniband/hw/mlx5/devx.h +++ b/drivers/infiniband/hw/mlx5/devx.h @@ -28,6 +28,7 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user); void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid); int mlx5_ib_devx_init(struct mlx5_ib_dev *dev); void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev); +void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile); #else static inline int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user) { @@ -41,5 +42,8 @@ static inline int mlx5_ib_devx_init(struct mlx5_ib_dev *dev) static inline void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev) { } +static inline void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile) +{ +} #endif #endif /* _MLX5_IB_DEVX_H */ diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 1b6c5e37d169..2453ae4384a7 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -278,7 +278,13 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num, goto done; } - err = query_ib_ppcnt(mdev, mdev_port_num, 0, out_cnt, sz, 0); + if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) + err = query_ib_ppcnt(mdev, mdev_port_num, port_num, + out_cnt, sz, 0); + else + err = query_ib_ppcnt(mdev, mdev_port_num, 0, + out_cnt, sz, 0); + if (!err) pma_cnt_assign(pma_cnt, out_cnt); } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4999239c8f41..bc7930d0c564 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1182,6 +1182,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; + + if (MLX5_CAP_GEN_2(mdev, dp_ordering_force) && + (MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc) || + MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc) || + MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc) || + MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud) || + MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc))) + resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP; } if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) { @@ -2997,7 +3005,6 @@ unlock: static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) { struct mlx5_ib_resources *devr = &dev->devr; - int port; int ret; if (!MLX5_CAP_GEN(dev->mdev, xrc)) @@ -3013,10 +3020,6 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) return ret; } - for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) - INIT_WORK(&devr->ports[port].pkey_change_work, - pkey_change_handler); - mutex_init(&devr->cq_lock); mutex_init(&devr->srq_lock); @@ -3026,16 +3029,6 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) { struct mlx5_ib_resources *devr = &dev->devr; - int port; - - /* - * Make sure no change P_Key work items are still executing. - * - * At this stage, the mlx5_ib_event should be unregistered - * and it ensures that no new works are added. - */ - for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) - cancel_work_sync(&devr->ports[port].pkey_change_work); /* After s0/s1 init, they are not unset during the device lifetime. */ if (devr->s1) { @@ -3211,12 +3204,14 @@ static int lag_event(struct notifier_block *nb, unsigned long event, void *data) struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev, lag_events); struct mlx5_core_dev *mdev = dev->mdev; + struct ib_device *ibdev = &dev->ib_dev; + struct net_device *old_ndev = NULL; struct mlx5_ib_port *port; struct net_device *ndev; - int i, err; - int portnum; + u32 portnum = 0; + int ret = 0; + int i; - portnum = 0; switch (event) { case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE: ndev = data; @@ -3232,19 +3227,24 @@ static int lag_event(struct notifier_block *nb, unsigned long event, void *data) } } } - err = ib_device_set_netdev(&dev->ib_dev, ndev, - portnum + 1); - dev_put(ndev); - if (err) - return err; - /* Rescan gids after new netdev assignment */ - rdma_roce_rescan_device(&dev->ib_dev); + old_ndev = ib_device_get_netdev(ibdev, portnum + 1); + ret = ib_device_set_netdev(ibdev, ndev, portnum + 1); + if (ret) + goto out; + + if (old_ndev) + roce_del_all_netdev_gids(ibdev, portnum + 1, + old_ndev); + rdma_roce_rescan_port(ibdev, portnum + 1); } break; default: return NOTIFY_DONE; } - return NOTIFY_OK; + +out: + dev_put(old_ndev); + return notifier_from_errno(ret); } static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev) @@ -4134,6 +4134,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .req_notify_cq = mlx5_ib_arm_cq, .rereg_user_mr = mlx5_ib_rereg_user_mr, .resize_cq = mlx5_ib_resize_cq, + .ufile_hw_cleanup = mlx5_ib_ufile_hw_cleanup, INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), @@ -4464,6 +4465,13 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) { + struct mlx5_ib_resources *devr = &dev->devr; + int port; + + for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) + INIT_WORK(&devr->ports[port].pkey_change_work, + pkey_change_handler); + dev->mdev_events.notifier_call = mlx5_ib_event; mlx5_notifier_register(dev->mdev, &dev->mdev_events); @@ -4474,8 +4482,14 @@ static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) { + struct mlx5_ib_resources *devr = &dev->devr; + int port; + mlx5r_macsec_event_unregister(dev); mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); + + for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) + cancel_work_sync(&devr->ports[port].pkey_change_work); } void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev, @@ -4565,9 +4579,6 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, mlx5_ib_dev_res_init, mlx5_ib_dev_res_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, - mlx5_ib_stage_dev_notifier_init, - mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_ODP, mlx5_ib_odp_init_one, mlx5_ib_odp_cleanup_one), @@ -4592,6 +4603,9 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, + mlx5_ib_stage_dev_notifier_init, + mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), @@ -4628,9 +4642,6 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, mlx5_ib_dev_res_init, mlx5_ib_dev_res_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, - mlx5_ib_stage_dev_notifier_init, - mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_counters_init, mlx5_ib_counters_cleanup), @@ -4652,6 +4663,9 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, + mlx5_ib_stage_dev_notifier_init, + mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 23fd72f7f63d..a01b592aa716 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -521,6 +521,7 @@ struct mlx5_ib_qp { struct mlx5_bf bf; u8 has_rq:1; u8 is_rss:1; + u8 is_ooo_rq:1; /* only for user space QPs. For kernel * we have it from the bf object @@ -972,7 +973,6 @@ enum mlx5_ib_stages { MLX5_IB_STAGE_QP, MLX5_IB_STAGE_SRQ, MLX5_IB_STAGE_DEVICE_RESOURCES, - MLX5_IB_STAGE_DEVICE_NOTIFIER, MLX5_IB_STAGE_ODP, MLX5_IB_STAGE_COUNTERS, MLX5_IB_STAGE_CONG_DEBUGFS, @@ -981,6 +981,7 @@ enum mlx5_ib_stages { MLX5_IB_STAGE_PRE_IB_REG_UMR, MLX5_IB_STAGE_WHITELIST_UID, MLX5_IB_STAGE_IB_REG, + MLX5_IB_STAGE_DEVICE_NOTIFIER, MLX5_IB_STAGE_POST_IB_REG_UMR, MLX5_IB_STAGE_DELAY_DROP, MLX5_IB_STAGE_RESTRACK, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 10ce3b44f645..a43eba9d3572 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1960,7 +1960,7 @@ static int atomic_size_to_mode(int size_mask) } static int get_atomic_mode(struct mlx5_ib_dev *dev, - enum ib_qp_type qp_type) + struct mlx5_ib_qp *qp) { u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic); @@ -1970,7 +1970,7 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev, if (!atomic) return -EOPNOTSUPP; - if (qp_type == MLX5_IB_QPT_DCT) + if (qp->type == MLX5_IB_QPT_DCT) atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); else atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); @@ -1984,6 +1984,10 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev, atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD)) atomic_mode = MLX5_ATOMIC_MODE_IB_COMP; + /* OOO DP QPs do not support larger than 8-Bytes atomic operations */ + if (atomic_mode > MLX5_ATOMIC_MODE_8B && qp->is_ooo_rq) + atomic_mode = MLX5_ATOMIC_MODE_8B; + return atomic_mode; } @@ -2839,6 +2843,29 @@ static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd, return 0; } +static bool get_dp_ooo_cap(struct mlx5_core_dev *mdev, enum ib_qp_type qp_type) +{ + if (!MLX5_CAP_GEN_2(mdev, dp_ordering_force)) + return false; + + switch (qp_type) { + case IB_QPT_RC: + return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc); + case IB_QPT_XRC_INI: + case IB_QPT_XRC_TGT: + return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc); + case IB_QPT_UC: + return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc); + case IB_QPT_UD: + return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud); + case MLX5_IB_QPT_DCI: + case MLX5_IB_QPT_DCT: + return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc); + default: + return false; + } +} + static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, bool cond, struct mlx5_ib_qp *qp) { @@ -3365,7 +3392,7 @@ static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp, if (access_flags & IB_ACCESS_REMOTE_ATOMIC) { int atomic_mode; - atomic_mode = get_atomic_mode(dev, qp->type); + atomic_mode = get_atomic_mode(dev, qp); if (atomic_mode < 0) return -EOPNOTSUPP; @@ -4316,6 +4343,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) MLX5_SET(qpc, qpc, deth_sqpn, 1); + if (qp->is_ooo_rq && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { + MLX5_SET(qpc, qpc, dp_ordering_1, 1); + MLX5_SET(qpc, qpc, dp_ordering_force, 1); + } + mlx5_cur = to_mlx5_state(cur_state); mlx5_new = to_mlx5_state(new_state); @@ -4531,7 +4563,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) { int atomic_mode; - atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT); + atomic_mode = get_atomic_mode(dev, qp); if (atomic_mode < 0) return -EOPNOTSUPP; @@ -4573,6 +4605,10 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7); + if (qp->is_ooo_rq) { + MLX5_SET(dctc, dctc, dp_ordering_1, 1); + MLX5_SET(dctc, dctc, dp_ordering_force, 1); + } err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, MLX5_ST_SZ_BYTES(create_dct_in), out, @@ -4676,11 +4712,16 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, min(udata->inlen, sizeof(ucmd)))) return -EFAULT; - if (ucmd.comp_mask || + if (ucmd.comp_mask & ~MLX5_IB_MODIFY_QP_OOO_DP || memchr_inv(&ucmd.burst_info.reserved, 0, sizeof(ucmd.burst_info.reserved))) return -EOPNOTSUPP; + if (ucmd.comp_mask & MLX5_IB_MODIFY_QP_OOO_DP) { + if (!get_dp_ooo_cap(dev->mdev, qp->type)) + return -EOPNOTSUPP; + qp->is_ooo_rq = 1; + } } if (qp->type == IB_QPT_GSI) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index d2f7b5195c19..91d329e90308 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -775,6 +775,7 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) * Yield the processor */ spin_lock_irqsave(&qp->state_lock, flags); + attr->cur_qp_state = qp_state(qp); if (qp->attr.sq_draining) { spin_unlock_irqrestore(&qp->state_lock, flags); cond_resched(); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 479c07e6e4ed..87a02f0deb00 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -663,10 +663,12 @@ int rxe_requester(struct rxe_qp *qp) if (unlikely(qp_state(qp) == IB_QPS_ERR)) { wqe = __req_next_wqe(qp); spin_unlock_irqrestore(&qp->state_lock, flags); - if (wqe) + if (wqe) { + wqe->status = IB_WC_WR_FLUSH_ERR; goto err; - else + } else { goto exit; + } } if (unlikely(qp_state(qp) == IB_QPS_RESET)) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7da94fb8d7fa..4feb7170535c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -128,16 +128,13 @@ static void ipoib_get_ethtool_stats(struct net_device *dev, static void ipoib_get_strings(struct net_device __always_unused *dev, u32 stringset, u8 *data) { - u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) { - memcpy(p, ipoib_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) + ethtool_puts(&data, + ipoib_gstrings_stats[i].stat_string); break; default: break; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4e31bb0b6466..3b463db8ce39 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -49,6 +49,7 @@ #include <linux/jhash.h> #include <net/arp.h> #include <net/addrconf.h> +#include <net/pkt_sched.h> #include <linux/inetdevice.h> #include <rdma/ib_cache.h> @@ -2145,7 +2146,7 @@ void ipoib_setup_common(struct net_device *dev) dev->hard_header_len = IPOIB_HARD_LEN; dev->addr_len = INFINIBAND_ALEN; dev->type = ARPHRD_INFINIBAND; - dev->tx_queue_len = ipoib_sendq_size * 2; + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; dev->features = (NETIF_F_VLAN_CHALLENGED | NETIF_F_HIGHDMA); netif_keep_dst(dev); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c index 29b3d8fce3f5..316959940d2f 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c @@ -164,9 +164,7 @@ static void vnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) return; for (i = 0; i < VNIC_STATS_LEN; i++) - memcpy(data + i * ETH_GSTRING_LEN, - vnic_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + ethtool_puts(&data, vnic_gstrings_stats[i].stat_string); } /* ethtool ops */ diff --git a/drivers/input/input.c b/drivers/input/input.c index c51858f1cdc5..7f0477e04ad2 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -605,6 +605,9 @@ int input_open_device(struct input_handle *handle) handle->open++; + if (handle->handler->passive_observer) + goto out; + if (dev->users++ || dev->inhibited) { /* * Device is already opened and/or inhibited, @@ -668,11 +671,13 @@ void input_close_device(struct input_handle *handle) __input_release_device(handle); - if (!--dev->users && !dev->inhibited) { - if (dev->poller) - input_dev_poller_stop(dev->poller); - if (dev->close) - dev->close(dev); + if (!handle->handler->passive_observer) { + if (!--dev->users && !dev->inhibited) { + if (dev->poller) + input_dev_poller_stop(dev->poller); + if (dev->close) + dev->close(dev); + } } if (!--handle->open) { diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c index 6373d7aa739a..a9f1946cf0d6 100644 --- a/drivers/input/joystick/db9.c +++ b/drivers/input/joystick/db9.c @@ -505,24 +505,22 @@ static int db9_open(struct input_dev *dev) { struct db9 *db9 = input_get_drvdata(dev); struct parport *port = db9->pd->port; - int err; - err = mutex_lock_interruptible(&db9->mutex); - if (err) - return err; - - if (!db9->used++) { - parport_claim(db9->pd); - parport_write_data(port, 0xff); - if (db9_modes[db9->mode].reverse) { - parport_data_reverse(port); - parport_write_control(port, DB9_NORMAL); + scoped_guard(mutex_intr, &db9->mutex) { + if (!db9->used++) { + parport_claim(db9->pd); + parport_write_data(port, 0xff); + if (db9_modes[db9->mode].reverse) { + parport_data_reverse(port); + parport_write_control(port, DB9_NORMAL); + } + mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME); } - mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME); + + return 0; } - mutex_unlock(&db9->mutex); - return 0; + return -EINTR; } static void db9_close(struct input_dev *dev) @@ -530,14 +528,14 @@ static void db9_close(struct input_dev *dev) struct db9 *db9 = input_get_drvdata(dev); struct parport *port = db9->pd->port; - mutex_lock(&db9->mutex); + guard(mutex)(&db9->mutex); + if (!--db9->used) { del_timer_sync(&db9->timer); parport_write_control(port, 0x00); parport_data_forward(port); parport_release(db9->pd); } - mutex_unlock(&db9->mutex); } static void db9_attach(struct parport *pp) diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c index 2b553e2d838f..b53cafd7a5ee 100644 --- a/drivers/input/joystick/gamecon.c +++ b/drivers/input/joystick/gamecon.c @@ -765,33 +765,31 @@ static void gc_timer(struct timer_list *t) static int gc_open(struct input_dev *dev) { struct gc *gc = input_get_drvdata(dev); - int err; - err = mutex_lock_interruptible(&gc->mutex); - if (err) - return err; + scoped_guard(mutex_intr, &gc->mutex) { + if (!gc->used++) { + parport_claim(gc->pd); + parport_write_control(gc->pd->port, 0x04); + mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME); + } - if (!gc->used++) { - parport_claim(gc->pd); - parport_write_control(gc->pd->port, 0x04); - mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME); + return 0; } - mutex_unlock(&gc->mutex); - return 0; + return -EINTR; } static void gc_close(struct input_dev *dev) { struct gc *gc = input_get_drvdata(dev); - mutex_lock(&gc->mutex); + guard(mutex)(&gc->mutex); + if (!--gc->used) { del_timer_sync(&gc->timer); parport_write_control(gc->pd->port, 0x00); parport_release(gc->pd); } - mutex_unlock(&gc->mutex); } static int gc_setup_pad(struct gc *gc, int idx, int pad_type) diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c index 95c0348843e6..8c78cbe553c8 100644 --- a/drivers/input/joystick/iforce/iforce-ff.c +++ b/drivers/input/joystick/iforce/iforce-ff.c @@ -21,14 +21,13 @@ static int make_magnitude_modifier(struct iforce* iforce, unsigned char data[3]; if (!no_alloc) { - mutex_lock(&iforce->mem_mutex); - if (allocate_resource(&(iforce->device_memory), mod_chunk, 2, - iforce->device_memory.start, iforce->device_memory.end, 2L, - NULL, NULL)) { - mutex_unlock(&iforce->mem_mutex); + guard(mutex)(&iforce->mem_mutex); + + if (allocate_resource(&iforce->device_memory, mod_chunk, 2, + iforce->device_memory.start, + iforce->device_memory.end, + 2L, NULL, NULL)) return -ENOSPC; - } - mutex_unlock(&iforce->mem_mutex); } data[0] = LO(mod_chunk->start); @@ -54,14 +53,13 @@ static int make_period_modifier(struct iforce* iforce, period = TIME_SCALE(period); if (!no_alloc) { - mutex_lock(&iforce->mem_mutex); - if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0c, - iforce->device_memory.start, iforce->device_memory.end, 2L, - NULL, NULL)) { - mutex_unlock(&iforce->mem_mutex); + guard(mutex)(&iforce->mem_mutex); + + if (allocate_resource(&iforce->device_memory, mod_chunk, 0x0c, + iforce->device_memory.start, + iforce->device_memory.end, + 2L, NULL, NULL)) return -ENOSPC; - } - mutex_unlock(&iforce->mem_mutex); } data[0] = LO(mod_chunk->start); @@ -94,14 +92,13 @@ static int make_envelope_modifier(struct iforce* iforce, fade_duration = TIME_SCALE(fade_duration); if (!no_alloc) { - mutex_lock(&iforce->mem_mutex); + guard(mutex)(&iforce->mem_mutex); + if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0e, - iforce->device_memory.start, iforce->device_memory.end, 2L, - NULL, NULL)) { - mutex_unlock(&iforce->mem_mutex); + iforce->device_memory.start, + iforce->device_memory.end, + 2L, NULL, NULL)) return -ENOSPC; - } - mutex_unlock(&iforce->mem_mutex); } data[0] = LO(mod_chunk->start); @@ -131,14 +128,13 @@ static int make_condition_modifier(struct iforce* iforce, unsigned char data[10]; if (!no_alloc) { - mutex_lock(&iforce->mem_mutex); + guard(mutex)(&iforce->mem_mutex); + if (allocate_resource(&(iforce->device_memory), mod_chunk, 8, - iforce->device_memory.start, iforce->device_memory.end, 2L, - NULL, NULL)) { - mutex_unlock(&iforce->mem_mutex); + iforce->device_memory.start, + iforce->device_memory.end, + 2L, NULL, NULL)) return -ENOSPC; - } - mutex_unlock(&iforce->mem_mutex); } data[0] = LO(mod_chunk->start); diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c index 08c889a72f6c..74181d5123cd 100644 --- a/drivers/input/joystick/iforce/iforce-packets.c +++ b/drivers/input/joystick/iforce/iforce-packets.c @@ -31,49 +31,42 @@ int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data) int c; int empty; int head, tail; - unsigned long flags; /* * Update head and tail of xmit buffer */ - spin_lock_irqsave(&iforce->xmit_lock, flags); - - head = iforce->xmit.head; - tail = iforce->xmit.tail; - - - if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { - dev_warn(&iforce->dev->dev, - "not enough space in xmit buffer to send new packet\n"); - spin_unlock_irqrestore(&iforce->xmit_lock, flags); - return -1; - } + scoped_guard(spinlock_irqsave, &iforce->xmit_lock) { + head = iforce->xmit.head; + tail = iforce->xmit.tail; + + if (CIRC_SPACE(head, tail, XMIT_SIZE) < n + 2) { + dev_warn(&iforce->dev->dev, + "not enough space in xmit buffer to send new packet\n"); + return -1; + } - empty = head == tail; - XMIT_INC(iforce->xmit.head, n+2); + empty = head == tail; + XMIT_INC(iforce->xmit.head, n + 2); /* * Store packet in xmit buffer */ - iforce->xmit.buf[head] = HI(cmd); - XMIT_INC(head, 1); - iforce->xmit.buf[head] = LO(cmd); - XMIT_INC(head, 1); - - c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); - if (n < c) c=n; - - memcpy(&iforce->xmit.buf[head], - data, - c); - if (n != c) { - memcpy(&iforce->xmit.buf[0], - data + c, - n - c); + iforce->xmit.buf[head] = HI(cmd); + XMIT_INC(head, 1); + iforce->xmit.buf[head] = LO(cmd); + XMIT_INC(head, 1); + + c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); + if (n < c) + c = n; + + memcpy(&iforce->xmit.buf[head], data, c); + if (n != c) + memcpy(&iforce->xmit.buf[0], data + c, n - c); + + XMIT_INC(head, n); } - XMIT_INC(head, n); - spin_unlock_irqrestore(&iforce->xmit_lock, flags); /* * If necessary, start the transmission */ diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c index 2380546d7978..75b85c46dfa4 100644 --- a/drivers/input/joystick/iforce/iforce-serio.c +++ b/drivers/input/joystick/iforce/iforce-serio.c @@ -28,45 +28,39 @@ static void iforce_serio_xmit(struct iforce *iforce) iforce); unsigned char cs; int i; - unsigned long flags; if (test_and_set_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)) { set_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags); return; } - spin_lock_irqsave(&iforce->xmit_lock, flags); + guard(spinlock_irqsave)(&iforce->xmit_lock); -again: - if (iforce->xmit.head == iforce->xmit.tail) { - iforce_clear_xmit_and_wake(iforce); - spin_unlock_irqrestore(&iforce->xmit_lock, flags); - return; - } + do { + if (iforce->xmit.head == iforce->xmit.tail) + break; - cs = 0x2b; + cs = 0x2b; - serio_write(iforce_serio->serio, 0x2b); + serio_write(iforce_serio->serio, 0x2b); - serio_write(iforce_serio->serio, iforce->xmit.buf[iforce->xmit.tail]); - cs ^= iforce->xmit.buf[iforce->xmit.tail]; - XMIT_INC(iforce->xmit.tail, 1); - - for (i=iforce->xmit.buf[iforce->xmit.tail]; i >= 0; --i) { serio_write(iforce_serio->serio, iforce->xmit.buf[iforce->xmit.tail]); cs ^= iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); - } - serio_write(iforce_serio->serio, cs); + for (i = iforce->xmit.buf[iforce->xmit.tail]; i >= 0; --i) { + serio_write(iforce_serio->serio, + iforce->xmit.buf[iforce->xmit.tail]); + cs ^= iforce->xmit.buf[iforce->xmit.tail]; + XMIT_INC(iforce->xmit.tail, 1); + } - if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags)) - goto again; + serio_write(iforce_serio->serio, cs); - iforce_clear_xmit_and_wake(iforce); + } while (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags)); - spin_unlock_irqrestore(&iforce->xmit_lock, flags); + iforce_clear_xmit_and_wake(iforce); } static int iforce_serio_get_id(struct iforce *iforce, u8 id, diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index cba92bd590a8..1f00f76b0174 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -25,13 +25,11 @@ static void __iforce_usb_xmit(struct iforce *iforce) struct iforce_usb *iforce_usb = container_of(iforce, struct iforce_usb, iforce); int n, c; - unsigned long flags; - spin_lock_irqsave(&iforce->xmit_lock, flags); + guard(spinlock_irqsave)(&iforce->xmit_lock); if (iforce->xmit.head == iforce->xmit.tail) { iforce_clear_xmit_and_wake(iforce); - spin_unlock_irqrestore(&iforce->xmit_lock, flags); return; } @@ -45,7 +43,8 @@ static void __iforce_usb_xmit(struct iforce *iforce) /* Copy rest of data then */ c = CIRC_CNT_TO_END(iforce->xmit.head, iforce->xmit.tail, XMIT_SIZE); - if (n < c) c=n; + if (n < c) + c = n; memcpy(iforce_usb->out->transfer_buffer + 1, &iforce->xmit.buf[iforce->xmit.tail], @@ -53,11 +52,12 @@ static void __iforce_usb_xmit(struct iforce *iforce) if (n != c) { memcpy(iforce_usb->out->transfer_buffer + 1 + c, &iforce->xmit.buf[0], - n-c); + n - c); } XMIT_INC(iforce->xmit.tail, n); - if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) { + n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC); + if (n) { dev_warn(&iforce_usb->intf->dev, "usb_submit_urb failed %d\n", n); iforce_clear_xmit_and_wake(iforce); @@ -66,7 +66,6 @@ static void __iforce_usb_xmit(struct iforce *iforce) /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended. * As long as the urb completion handler is not called, the transmiting * is considered to be running */ - spin_unlock_irqrestore(&iforce->xmit_lock, flags); } static void iforce_usb_xmit(struct iforce *iforce) diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c index b0986d2195d6..c344dbc0c493 100644 --- a/drivers/input/joystick/n64joy.c +++ b/drivers/input/joystick/n64joy.c @@ -191,35 +191,32 @@ static void n64joy_poll(struct timer_list *t) static int n64joy_open(struct input_dev *dev) { struct n64joy_priv *priv = input_get_drvdata(dev); - int err; - - err = mutex_lock_interruptible(&priv->n64joy_mutex); - if (err) - return err; - - if (!priv->n64joy_opened) { - /* - * We could use the vblank irq, but it's not important if - * the poll point slightly changes. - */ - timer_setup(&priv->timer, n64joy_poll, 0); - mod_timer(&priv->timer, jiffies + msecs_to_jiffies(16)); - } - priv->n64joy_opened++; + scoped_guard(mutex_intr, &priv->n64joy_mutex) { + if (!priv->n64joy_opened) { + /* + * We could use the vblank irq, but it's not important + * if the poll point slightly changes. + */ + timer_setup(&priv->timer, n64joy_poll, 0); + mod_timer(&priv->timer, jiffies + msecs_to_jiffies(16)); + } - mutex_unlock(&priv->n64joy_mutex); - return err; + priv->n64joy_opened++; + return 0; + } + + return -EINTR; } static void n64joy_close(struct input_dev *dev) { struct n64joy_priv *priv = input_get_drvdata(dev); - mutex_lock(&priv->n64joy_mutex); + guard(mutex)(&priv->n64joy_mutex); + if (!--priv->n64joy_opened) del_timer_sync(&priv->timer); - mutex_unlock(&priv->n64joy_mutex); } static const u64 __initconst scandata[] ____cacheline_aligned = { diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 0a78dda3e0ea..db696ba61a3b 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -103,33 +103,31 @@ static void tgfx_timer(struct timer_list *t) static int tgfx_open(struct input_dev *dev) { struct tgfx *tgfx = input_get_drvdata(dev); - int err; - err = mutex_lock_interruptible(&tgfx->sem); - if (err) - return err; + scoped_guard(mutex_intr, &tgfx->sem) { + if (!tgfx->used++) { + parport_claim(tgfx->pd); + parport_write_control(tgfx->pd->port, 0x04); + mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); + } - if (!tgfx->used++) { - parport_claim(tgfx->pd); - parport_write_control(tgfx->pd->port, 0x04); - mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); + return 0; } - mutex_unlock(&tgfx->sem); - return 0; + return -EINTR; } static void tgfx_close(struct input_dev *dev) { struct tgfx *tgfx = input_get_drvdata(dev); - mutex_lock(&tgfx->sem); + guard(mutex)(&tgfx->sem); + if (!--tgfx->used) { del_timer_sync(&tgfx->timer); parport_write_control(tgfx->pd->port, 0x00); parport_release(tgfx->pd); } - mutex_unlock(&tgfx->sem); } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 22ea58bf76cb..ff9bc87f2f70 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1292,9 +1292,8 @@ static void xpad_irq_out(struct urb *urb) struct device *dev = &xpad->intf->dev; int status = urb->status; int error; - unsigned long flags; - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); switch (status) { case 0: @@ -1328,8 +1327,6 @@ static void xpad_irq_out(struct urb *urb) xpad->irq_out_active = false; } } - - spin_unlock_irqrestore(&xpad->odata_lock, flags); } static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad, @@ -1394,10 +1391,8 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad) { struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_CMD_IDX]; - unsigned long flags; - int retval; - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); packet->data[0] = 0x08; packet->data[1] = 0x00; @@ -1416,17 +1411,12 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad) /* Reset the sequence so we send out presence first */ xpad->last_out_packet = -1; - retval = xpad_try_sending_next_out_packet(xpad); - - spin_unlock_irqrestore(&xpad->odata_lock, flags); - - return retval; + return xpad_try_sending_next_out_packet(xpad); } static int xpad_start_xbox_one(struct usb_xpad *xpad) { - unsigned long flags; - int retval; + int error; if (usb_ifnum_to_if(xpad->udev, GIP_WIRED_INTF_AUDIO)) { /* @@ -1435,15 +1425,15 @@ static int xpad_start_xbox_one(struct usb_xpad *xpad) * Controller for Series X|S (0x20d6:0x200e) to report the * guide button. */ - retval = usb_set_interface(xpad->udev, - GIP_WIRED_INTF_AUDIO, 0); - if (retval) + error = usb_set_interface(xpad->udev, + GIP_WIRED_INTF_AUDIO, 0); + if (error) dev_warn(&xpad->dev->dev, "unable to disable audio interface: %d\n", - retval); + error); } - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); /* * Begin the init sequence by attempting to send a packet. @@ -1451,16 +1441,11 @@ static int xpad_start_xbox_one(struct usb_xpad *xpad) * sending any packets from the output ring. */ xpad->init_seq = 0; - retval = xpad_try_sending_next_out_packet(xpad); - - spin_unlock_irqrestore(&xpad->odata_lock, flags); - - return retval; + return xpad_try_sending_next_out_packet(xpad); } static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num) { - unsigned long flags; struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_CMD_IDX]; static const u8 mode_report_ack[] = { @@ -1468,7 +1453,7 @@ static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num) 0x00, GIP_CMD_VIRTUAL_KEY, GIP_OPT_INTERNAL, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00 }; - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); packet->len = sizeof(mode_report_ack); memcpy(packet->data, mode_report_ack, packet->len); @@ -1478,8 +1463,6 @@ static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num) /* Reset the sequence so we send out the ack now */ xpad->last_out_packet = -1; xpad_try_sending_next_out_packet(xpad); - - spin_unlock_irqrestore(&xpad->odata_lock, flags); } #ifdef CONFIG_JOYSTICK_XPAD_FF @@ -1489,8 +1472,6 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_FF_IDX]; __u16 strong; __u16 weak; - int retval; - unsigned long flags; if (effect->type != FF_RUMBLE) return 0; @@ -1498,7 +1479,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect strong = effect->u.rumble.strong_magnitude; weak = effect->u.rumble.weak_magnitude; - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); switch (xpad->xtype) { case XTYPE_XBOX: @@ -1564,15 +1545,10 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect dev_dbg(&xpad->dev->dev, "%s - rumble command sent to unsupported xpad type: %d\n", __func__, xpad->xtype); - retval = -EINVAL; - goto out; + return -EINVAL; } - retval = xpad_try_sending_next_out_packet(xpad); - -out: - spin_unlock_irqrestore(&xpad->odata_lock, flags); - return retval; + return xpad_try_sending_next_out_packet(xpad); } static int xpad_init_ff(struct usb_xpad *xpad) @@ -1625,11 +1601,10 @@ static void xpad_send_led_command(struct usb_xpad *xpad, int command) { struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_LED_IDX]; - unsigned long flags; command %= 16; - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); switch (xpad->xtype) { case XTYPE_XBOX360: @@ -1659,8 +1634,6 @@ static void xpad_send_led_command(struct usb_xpad *xpad, int command) } xpad_try_sending_next_out_packet(xpad); - - spin_unlock_irqrestore(&xpad->odata_lock, flags); } /* @@ -1785,11 +1758,10 @@ static void xpad_stop_input(struct usb_xpad *xpad) static void xpad360w_poweroff_controller(struct usb_xpad *xpad) { - unsigned long flags; struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_CMD_IDX]; - spin_lock_irqsave(&xpad->odata_lock, flags); + guard(spinlock_irqsave)(&xpad->odata_lock); packet->data[0] = 0x00; packet->data[1] = 0x00; @@ -1809,8 +1781,6 @@ static void xpad360w_poweroff_controller(struct usb_xpad *xpad) /* Reset the sequence so we send out poweroff now */ xpad->last_out_packet = -1; xpad_try_sending_next_out_packet(xpad); - - spin_unlock_irqrestore(&xpad->odata_lock, flags); } static int xpad360w_start_input(struct usb_xpad *xpad) @@ -2234,10 +2204,10 @@ static int xpad_suspend(struct usb_interface *intf, pm_message_t message) if (auto_poweroff && xpad->pad_present) xpad360w_poweroff_controller(xpad); } else { - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); + if (input_device_enabled(input)) xpad_stop_input(xpad); - mutex_unlock(&input->mutex); } xpad_stop_output(xpad); @@ -2249,26 +2219,25 @@ static int xpad_resume(struct usb_interface *intf) { struct usb_xpad *xpad = usb_get_intfdata(intf); struct input_dev *input = xpad->dev; - int retval = 0; - if (xpad->xtype == XTYPE_XBOX360W) { - retval = xpad360w_start_input(xpad); - } else { - mutex_lock(&input->mutex); - if (input_device_enabled(input)) { - retval = xpad_start_input(xpad); - } else if (xpad->xtype == XTYPE_XBOXONE) { - /* - * Even if there are no users, we'll send Xbox One pads - * the startup sequence so they don't sit there and - * blink until somebody opens the input device again. - */ - retval = xpad_start_xbox_one(xpad); - } - mutex_unlock(&input->mutex); + if (xpad->xtype == XTYPE_XBOX360W) + return xpad360w_start_input(xpad); + + guard(mutex)(&input->mutex); + + if (input_device_enabled(input)) + return xpad_start_input(xpad); + + if (xpad->xtype == XTYPE_XBOXONE) { + /* + * Even if there are no users, we'll send Xbox One pads + * the startup sequence so they don't sit there and + * blink until somebody opens the input device again. + */ + return xpad_start_xbox_one(xpad); } - return retval; + return 0; } static struct usb_driver xpad_driver = { diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c index 10c248f0c1fc..980d739e45b8 100644 --- a/drivers/input/keyboard/adp5520-keys.c +++ b/drivers/input/keyboard/adp5520-keys.c @@ -181,7 +181,7 @@ static struct platform_driver adp5520_keys_driver = { .name = "adp5520-keys", }, .probe = adp5520_keys_probe, - .remove_new = adp5520_keys_remove, + .remove = adp5520_keys_remove, }; module_platform_driver(adp5520_keys_driver); diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 922d3ab998f3..81d0876ee358 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c @@ -411,7 +411,7 @@ static void adp5589_gpio_set_value(struct gpio_chip *chip, unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); if (val) kpad->dat_out[bank] |= bit; @@ -420,8 +420,6 @@ static void adp5589_gpio_set_value(struct gpio_chip *chip, adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + bank, kpad->dat_out[bank]); - - mutex_unlock(&kpad->gpio_lock); } static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off) @@ -429,18 +427,13 @@ static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off) struct adp5589_kpad *kpad = gpiochip_get_data(chip); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - int ret; - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); kpad->dir[bank] &= ~bit; - ret = adp5589_write(kpad->client, - kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, - kpad->dir[bank]); - - mutex_unlock(&kpad->gpio_lock); - - return ret; + return adp5589_write(kpad->client, + kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, + kpad->dir[bank]); } static int adp5589_gpio_direction_output(struct gpio_chip *chip, @@ -449,9 +442,9 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip, struct adp5589_kpad *kpad = gpiochip_get_data(chip); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); - int ret; + int error; - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); kpad->dir[bank] |= bit; @@ -460,15 +453,19 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip, else kpad->dat_out[bank] &= ~bit; - ret = adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) - + bank, kpad->dat_out[bank]); - ret |= adp5589_write(kpad->client, - kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, - kpad->dir[bank]); + error = adp5589_write(kpad->client, + kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + bank, + kpad->dat_out[bank]); + if (error) + return error; - mutex_unlock(&kpad->gpio_lock); + error = adp5589_write(kpad->client, + kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, + kpad->dir[bank]); + if (error) + return error; - return ret; + return 0; } static int adp5589_build_gpiomap(struct adp5589_kpad *kpad, diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index 2c993fa8306a..b5ff71cd5a70 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -717,9 +717,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi); static void applespi_msg_complete(struct applespi_data *applespi, bool is_write_msg, bool is_read_compl) { - unsigned long flags; - - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); if (is_read_compl) applespi->read_active = false; @@ -733,8 +731,6 @@ static void applespi_msg_complete(struct applespi_data *applespi, applespi->cmd_msg_queued = 0; applespi_send_cmd_msg(applespi); } - - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } static void applespi_async_write_complete(void *context) @@ -888,33 +884,22 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi) static void applespi_init(struct applespi_data *applespi, bool is_resume) { - unsigned long flags; - - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); if (is_resume) applespi->want_mt_init_cmd = true; else applespi->want_tp_info_cmd = true; applespi_send_cmd_msg(applespi); - - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } static int applespi_set_capsl_led(struct applespi_data *applespi, bool capslock_on) { - unsigned long flags; - int sts; - - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); applespi->want_cl_led_on = capslock_on; - sts = applespi_send_cmd_msg(applespi); - - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); - - return sts; + return applespi_send_cmd_msg(applespi); } static void applespi_set_bl_level(struct led_classdev *led_cdev, @@ -922,9 +907,8 @@ static void applespi_set_bl_level(struct led_classdev *led_cdev, { struct applespi_data *applespi = container_of(led_cdev, struct applespi_data, backlight_info); - unsigned long flags; - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); if (value == 0) { applespi->want_bl_level = value; @@ -940,8 +924,6 @@ static void applespi_set_bl_level(struct led_classdev *led_cdev, } applespi_send_cmd_msg(applespi); - - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } static int applespi_event(struct input_dev *dev, unsigned int type, @@ -1427,9 +1409,7 @@ static void applespi_got_data(struct applespi_data *applespi) /* process packet header */ if (!applespi_verify_crc(applespi, applespi->rx_buffer, APPLESPI_PACKET_SIZE)) { - unsigned long flags; - - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); if (applespi->drain) { applespi->read_active = false; @@ -1438,8 +1418,6 @@ static void applespi_got_data(struct applespi_data *applespi) wake_up_all(&applespi->drain_complete); } - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); - return; } @@ -1572,11 +1550,10 @@ static u32 applespi_notify(acpi_handle gpe_device, u32 gpe, void *context) { struct applespi_data *applespi = context; int sts; - unsigned long flags; trace_applespi_irq_received(ET_RD_IRQ, PT_READ); - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); if (!applespi->suspended) { sts = applespi_async(applespi, &applespi->rd_m, @@ -1589,8 +1566,6 @@ static u32 applespi_notify(acpi_handle gpe_device, u32 gpe, void *context) applespi->read_active = true; } - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); - return ACPI_INTERRUPT_HANDLED; } @@ -1818,29 +1793,21 @@ static int applespi_probe(struct spi_device *spi) static void applespi_drain_writes(struct applespi_data *applespi) { - unsigned long flags; - - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); applespi->drain = true; wait_event_lock_irq(applespi->drain_complete, !applespi->write_active, applespi->cmd_msg_lock); - - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } static void applespi_drain_reads(struct applespi_data *applespi) { - unsigned long flags; - - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); + guard(spinlock_irqsave)(&applespi->cmd_msg_lock); wait_event_lock_irq(applespi->drain_complete, !applespi->read_active, applespi->cmd_msg_lock); applespi->suspended = true; - - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } static void applespi_remove(struct spi_device *spi) @@ -1907,21 +1874,18 @@ static int applespi_resume(struct device *dev) struct spi_device *spi = to_spi_device(dev); struct applespi_data *applespi = spi_get_drvdata(spi); acpi_status acpi_sts; - unsigned long flags; /* ensure our flags and state reflect a newly resumed device */ - spin_lock_irqsave(&applespi->cmd_msg_lock, flags); - - applespi->drain = false; - applespi->have_cl_led_on = false; - applespi->have_bl_level = 0; - applespi->cmd_msg_queued = 0; - applespi->read_active = false; - applespi->write_active = false; - - applespi->suspended = false; + scoped_guard(spinlock_irqsave, &applespi->cmd_msg_lock) { + applespi->drain = false; + applespi->have_cl_led_on = false; + applespi->have_bl_level = 0; + applespi->cmd_msg_queued = 0; + applespi->read_active = false; + applespi->write_active = false; - spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); + applespi->suspended = false; + } /* switch on the SPI interface */ applespi_enable_spi(applespi); diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 5855d4fc6e6a..ec94fcfa4cde 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -713,9 +713,9 @@ static int atkbd_event(struct input_dev *dev, static inline void atkbd_enable(struct atkbd *atkbd) { - serio_pause_rx(atkbd->ps2dev.serio); + guard(serio_pause_rx)(atkbd->ps2dev.serio); + atkbd->enabled = true; - serio_continue_rx(atkbd->ps2dev.serio); } /* @@ -725,9 +725,9 @@ static inline void atkbd_enable(struct atkbd *atkbd) static inline void atkbd_disable(struct atkbd *atkbd) { - serio_pause_rx(atkbd->ps2dev.serio); + guard(serio_pause_rx)(atkbd->ps2dev.serio); + atkbd->enabled = false; - serio_continue_rx(atkbd->ps2dev.serio); } static int atkbd_activate(struct atkbd *atkbd) diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index b21ef9d6ff9d..0c17cbaa3d27 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c @@ -416,7 +416,7 @@ static int cap11xx_led_set(struct led_classdev *cdev, static int cap11xx_init_leds(struct device *dev, struct cap11xx_priv *priv, int num_leds) { - struct device_node *node = dev->of_node, *child; + struct device_node *node = dev->of_node; struct cap11xx_led *led; int cnt = of_get_child_count(node); int error; @@ -445,7 +445,7 @@ static int cap11xx_init_leds(struct device *dev, if (error) return error; - for_each_child_of_node(node, child) { + for_each_child_of_node_scoped(node, child) { u32 reg; led->cdev.name = @@ -458,19 +458,15 @@ static int cap11xx_init_leds(struct device *dev, led->cdev.brightness = LED_OFF; error = of_property_read_u32(child, "reg", ®); - if (error != 0 || reg >= num_leds) { - of_node_put(child); + if (error != 0 || reg >= num_leds) return -EINVAL; - } led->reg = reg; led->priv = priv; error = devm_led_classdev_register(dev, &led->cdev); - if (error) { - of_node_put(child); + if (error) return error; - } priv->num_leds++; led++; diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 4c81b20ff6af..c1e53d87c8a7 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -770,7 +770,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(cros_ec_keyb_pm_ops, NULL, cros_ec_keyb_resume); static struct platform_driver cros_ec_keyb_driver = { .probe = cros_ec_keyb_probe, - .remove_new = cros_ec_keyb_remove, + .remove = cros_ec_keyb_remove, .driver = { .name = "cros-ec-keyb", .dev_groups = cros_ec_keyb_groups, diff --git a/drivers/input/keyboard/cypress-sf.c b/drivers/input/keyboard/cypress-sf.c index eb1d0720784d..335b72efc5aa 100644 --- a/drivers/input/keyboard/cypress-sf.c +++ b/drivers/input/keyboard/cypress-sf.c @@ -208,7 +208,7 @@ static int cypress_sf_resume(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(cypress_sf_pm_ops, cypress_sf_suspend, cypress_sf_resume); -static struct i2c_device_id cypress_sf_id_table[] = { +static const struct i2c_device_id cypress_sf_id_table[] = { { CYPRESS_SF_DEV_NAME }, { } }; diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index dcbc50304a5a..817c23438f6e 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -168,15 +168,13 @@ static int ep93xx_keypad_suspend(struct device *dev) struct ep93xx_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (keypad->enabled) { clk_disable(keypad->clk); keypad->enabled = false; } - mutex_unlock(&input_dev->mutex); - return 0; } @@ -186,7 +184,7 @@ static int ep93xx_keypad_resume(struct device *dev) struct ep93xx_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) { if (!keypad->enabled) { @@ -196,8 +194,6 @@ static int ep93xx_keypad_resume(struct device *dev) } } - mutex_unlock(&input_dev->mutex); - return 0; } @@ -289,7 +285,7 @@ static struct platform_driver ep93xx_keypad_driver = { .of_match_table = ep93xx_keypad_of_ids, }, .probe = ep93xx_keypad_probe, - .remove_new = ep93xx_keypad_remove, + .remove = ep93xx_keypad_remove, }; module_platform_driver(ep93xx_keypad_driver); diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c index c1a4d5055de6..c8d8d0ea35b0 100644 --- a/drivers/input/keyboard/hilkbd.c +++ b/drivers/input/keyboard/hilkbd.c @@ -180,9 +180,8 @@ static irqreturn_t hil_interrupt(int irq, void *handle) /* send a command to the HIL */ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len) { - unsigned long flags; + guard(spinlock_irqsave)(&hil_dev.lock); - spin_lock_irqsave(&hil_dev.lock, flags); while (hil_busy()) /* wait */; hil_command(cmd); @@ -191,7 +190,6 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len) /* wait */; hil_write_data(*(data++)); } - spin_unlock_irqrestore(&hil_dev.lock, flags); } diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c index e15a93619e82..b92268ddfd84 100644 --- a/drivers/input/keyboard/imx_keypad.c +++ b/drivers/input/keyboard/imx_keypad.c @@ -521,13 +521,11 @@ static int __maybe_unused imx_kbd_noirq_suspend(struct device *dev) struct input_dev *input_dev = kbd->input_dev; unsigned short reg_val = readw(kbd->mmio_base + KPSR); - /* imx kbd can wake up system even clock is disabled */ - mutex_lock(&input_dev->mutex); - - if (input_device_enabled(input_dev)) - clk_disable_unprepare(kbd->clk); - - mutex_unlock(&input_dev->mutex); + scoped_guard(mutex, &input_dev->mutex) { + /* imx kbd can wake up system even clock is disabled */ + if (input_device_enabled(input_dev)) + clk_disable_unprepare(kbd->clk); + } if (device_may_wakeup(&pdev->dev)) { if (reg_val & KBD_STAT_KPKD) @@ -547,23 +545,20 @@ static int __maybe_unused imx_kbd_noirq_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct imx_keypad *kbd = platform_get_drvdata(pdev); struct input_dev *input_dev = kbd->input_dev; - int ret = 0; + int error; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(kbd->irq); - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) { - ret = clk_prepare_enable(kbd->clk); - if (ret) - goto err_clk; + error = clk_prepare_enable(kbd->clk); + if (error) + return error; } -err_clk: - mutex_unlock(&input_dev->mutex); - - return ret; + return 0; } static const struct dev_pm_ops imx_kbd_pm_ops = { diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c index 1d71dd79ffd2..58631bf7ce55 100644 --- a/drivers/input/keyboard/ipaq-micro-keys.c +++ b/drivers/input/keyboard/ipaq-micro-keys.c @@ -54,18 +54,18 @@ static void micro_key_receive(void *data, int len, unsigned char *msg) static void micro_key_start(struct ipaq_micro_keys *keys) { - spin_lock(&keys->micro->lock); + guard(spinlock)(&keys->micro->lock); + keys->micro->key = micro_key_receive; keys->micro->key_data = keys; - spin_unlock(&keys->micro->lock); } static void micro_key_stop(struct ipaq_micro_keys *keys) { - spin_lock(&keys->micro->lock); + guard(spinlock)(&keys->micro->lock); + keys->micro->key = NULL; keys->micro->key_data = NULL; - spin_unlock(&keys->micro->lock); } static int micro_key_open(struct input_dev *input) @@ -141,13 +141,11 @@ static int micro_key_resume(struct device *dev) struct ipaq_micro_keys *keys = dev_get_drvdata(dev); struct input_dev *input = keys->input; - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); if (input_device_enabled(input)) micro_key_start(keys); - mutex_unlock(&input->mutex); - return 0; } diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c index 1315b0f0862f..b086c7b28d5c 100644 --- a/drivers/input/keyboard/iqs62x-keys.c +++ b/drivers/input/keyboard/iqs62x-keys.c @@ -323,7 +323,7 @@ static struct platform_driver iqs62x_keys_platform_driver = { .name = "iqs62x-keys", }, .probe = iqs62x_keys_probe, - .remove_new = iqs62x_keys_remove, + .remove = iqs62x_keys_remove, }; module_platform_driver(iqs62x_keys_platform_driver); diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index cf67ba13477a..e26bf2956344 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -350,11 +350,11 @@ static int lm8323_configure(struct lm8323_chip *lm) static void pwm_done(struct lm8323_pwm *pwm) { - mutex_lock(&pwm->lock); + guard(mutex)(&pwm->lock); + pwm->running = false; if (pwm->desired_brightness != pwm->brightness) schedule_work(&pwm->work); - mutex_unlock(&pwm->lock); } /* @@ -367,7 +367,7 @@ static irqreturn_t lm8323_irq(int irq, void *_lm) u8 ints; int i; - mutex_lock(&lm->lock); + guard(mutex)(&lm->lock); while ((lm8323_read(lm, LM8323_CMD_READ_INT, &ints, 1) == 1) && ints) { if (likely(ints & INT_KEYPAD)) @@ -394,8 +394,6 @@ static irqreturn_t lm8323_irq(int irq, void *_lm) } } - mutex_unlock(&lm->lock); - return IRQ_HANDLED; } @@ -445,7 +443,7 @@ static void lm8323_pwm_work(struct work_struct *work) u16 pwm_cmds[3]; int num_cmds = 0; - mutex_lock(&pwm->lock); + guard(mutex)(&pwm->lock); /* * Do nothing if we're already at the requested level, @@ -454,7 +452,7 @@ static void lm8323_pwm_work(struct work_struct *work) * finishes. */ if (pwm->running || pwm->desired_brightness == pwm->brightness) - goto out; + return; kill = (pwm->desired_brightness == 0); up = (pwm->desired_brightness > pwm->brightness); @@ -489,9 +487,6 @@ static void lm8323_pwm_work(struct work_struct *work) lm8323_write_pwm(pwm, kill, num_cmds, pwm_cmds); pwm->brightness = pwm->desired_brightness; - - out: - mutex_unlock(&pwm->lock); } static void lm8323_pwm_set_brightness(struct led_classdev *led_cdev, @@ -500,9 +495,9 @@ static void lm8323_pwm_set_brightness(struct led_classdev *led_cdev, struct lm8323_pwm *pwm = cdev_to_pwm(led_cdev); struct lm8323_chip *lm = pwm->chip; - mutex_lock(&pwm->lock); - pwm->desired_brightness = brightness; - mutex_unlock(&pwm->lock); + scoped_guard(mutex, &pwm->lock) { + pwm->desired_brightness = brightness; + } if (in_interrupt()) { schedule_work(&pwm->work); @@ -510,12 +505,12 @@ static void lm8323_pwm_set_brightness(struct led_classdev *led_cdev, /* * Schedule PWM work as usual unless we are going into suspend */ - mutex_lock(&lm->lock); - if (likely(!lm->pm_suspend)) - schedule_work(&pwm->work); - else - lm8323_pwm_work(&pwm->work); - mutex_unlock(&lm->lock); + scoped_guard(mutex, &lm->lock) { + if (likely(!lm->pm_suspend)) + schedule_work(&pwm->work); + else + lm8323_pwm_work(&pwm->work); + } } } @@ -608,9 +603,9 @@ static ssize_t lm8323_set_disable(struct device *dev, if (ret) return ret; - mutex_lock(&lm->lock); + guard(mutex)(&lm->lock); + lm->kp_enabled = !i; - mutex_unlock(&lm->lock); return count; } @@ -758,9 +753,9 @@ static int lm8323_suspend(struct device *dev) irq_set_irq_wake(client->irq, 0); disable_irq(client->irq); - mutex_lock(&lm->lock); - lm->pm_suspend = true; - mutex_unlock(&lm->lock); + scoped_guard(mutex, &lm->lock) { + lm->pm_suspend = true; + } for (i = 0; i < 3; i++) if (lm->pwm[i].enabled) @@ -775,9 +770,9 @@ static int lm8323_resume(struct device *dev) struct lm8323_chip *lm = i2c_get_clientdata(client); int i; - mutex_lock(&lm->lock); - lm->pm_suspend = false; - mutex_unlock(&lm->lock); + scoped_guard(mutex, &lm->lock) { + lm->pm_suspend = false; + } for (i = 0; i < 3; i++) if (lm->pwm[i].enabled) diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c index 4b0f8323c492..c501a93a4417 100644 --- a/drivers/input/keyboard/locomokbd.c +++ b/drivers/input/keyboard/locomokbd.c @@ -112,11 +112,10 @@ static inline void locomokbd_reset_col(unsigned long membase, int col) static void locomokbd_scankeyboard(struct locomokbd *locomokbd) { unsigned int row, col, rowd; - unsigned long flags; unsigned int num_pressed; unsigned long membase = locomokbd->base; - spin_lock_irqsave(&locomokbd->lock, flags); + guard(spinlock_irqsave)(&locomokbd->lock); locomokbd_charge_all(membase); @@ -167,8 +166,6 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd) mod_timer(&locomokbd->timer, jiffies + SCAN_INTERVAL); else locomokbd->count_cancel = 0; - - spin_unlock_irqrestore(&locomokbd->lock, flags); } /* diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c index 423035be86fb..2392e7ec3b19 100644 --- a/drivers/input/keyboard/lpc32xx-keys.c +++ b/drivers/input/keyboard/lpc32xx-keys.c @@ -262,7 +262,7 @@ static int lpc32xx_kscan_suspend(struct device *dev) struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); if (input_device_enabled(input)) { /* Clear IRQ and disable clock */ @@ -270,7 +270,6 @@ static int lpc32xx_kscan_suspend(struct device *dev) clk_disable_unprepare(kscandat->clk); } - mutex_unlock(&input->mutex); return 0; } @@ -279,19 +278,20 @@ static int lpc32xx_kscan_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; - int retval = 0; + int error; - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); if (input_device_enabled(input)) { /* Enable clock and clear IRQ */ - retval = clk_prepare_enable(kscandat->clk); - if (retval == 0) - writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); + error = clk_prepare_enable(kscandat->clk); + if (error) + return error; + + writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); } - mutex_unlock(&input->mutex); - return retval; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(lpc32xx_kscan_pm_ops, lpc32xx_kscan_suspend, diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c index 91a1d2958109..1a8f1fa53fbb 100644 --- a/drivers/input/keyboard/maple_keyb.c +++ b/drivers/input/keyboard/maple_keyb.c @@ -132,14 +132,11 @@ static void dc_kbd_callback(struct mapleq *mq) * We should always get the lock because the only * time it may be locked is if the driver is in the cleanup phase. */ - if (likely(mutex_trylock(&maple_keyb_mutex))) { - + scoped_guard(mutex_try, &maple_keyb_mutex) { if (buf[1] == mapledev->function) { memcpy(kbd->new, buf + 2, 8); dc_scan_kbd(kbd); } - - mutex_unlock(&maple_keyb_mutex); } } @@ -211,14 +208,12 @@ static int remove_maple_kbd(struct device *dev) struct maple_device *mdev = to_maple_dev(dev); struct dc_kbd *kbd = maple_get_drvdata(mdev); - mutex_lock(&maple_keyb_mutex); + guard(mutex)(&maple_keyb_mutex); input_unregister_device(kbd->dev); kfree(kbd); maple_set_drvdata(mdev, NULL); - - mutex_unlock(&maple_keyb_mutex); return 0; } diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 3c38bae576ed..2a3b3bfc2878 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -17,7 +17,6 @@ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/gpio.h> -#include <linux/gpio/consumer.h> #include <linux/input/matrix_keypad.h> #include <linux/slab.h> #include <linux/of.h> @@ -158,18 +157,17 @@ static void matrix_keypad_scan(struct work_struct *work) activate_all_cols(keypad, true); /* Enable IRQs again */ - spin_lock_irq(&keypad->lock); - keypad->scan_pending = false; - enable_row_irqs(keypad); - spin_unlock_irq(&keypad->lock); + scoped_guard(spinlock_irq, &keypad->lock) { + keypad->scan_pending = false; + enable_row_irqs(keypad); + } } static irqreturn_t matrix_keypad_interrupt(int irq, void *id) { struct matrix_keypad *keypad = id; - unsigned long flags; - spin_lock_irqsave(&keypad->lock, flags); + guard(spinlock_irqsave)(&keypad->lock); /* * See if another IRQ beaten us to it and scheduled the @@ -185,7 +183,6 @@ static irqreturn_t matrix_keypad_interrupt(int irq, void *id) msecs_to_jiffies(keypad->debounce_ms)); out: - spin_unlock_irqrestore(&keypad->lock, flags); return IRQ_HANDLED; } @@ -209,9 +206,9 @@ static void matrix_keypad_stop(struct input_dev *dev) { struct matrix_keypad *keypad = input_get_drvdata(dev); - spin_lock_irq(&keypad->lock); - keypad->stopped = true; - spin_unlock_irq(&keypad->lock); + scoped_guard(spinlock_irq, &keypad->lock) { + keypad->stopped = true; + } flush_delayed_work(&keypad->work); /* diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index 21827d2497fa..bd1a944ded46 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -82,42 +82,6 @@ static const struct mpr121_init_register init_reg_table[] = { { AUTO_CONFIG_CTRL_ADDR, 0x0b }, }; -static void mpr121_vdd_supply_disable(void *data) -{ - struct regulator *vdd_supply = data; - - regulator_disable(vdd_supply); -} - -static struct regulator *mpr121_vdd_supply_init(struct device *dev) -{ - struct regulator *vdd_supply; - int err; - - vdd_supply = devm_regulator_get(dev, "vdd"); - if (IS_ERR(vdd_supply)) { - dev_err(dev, "failed to get vdd regulator: %ld\n", - PTR_ERR(vdd_supply)); - return vdd_supply; - } - - err = regulator_enable(vdd_supply); - if (err) { - dev_err(dev, "failed to enable vdd regulator: %d\n", err); - return ERR_PTR(err); - } - - err = devm_add_action_or_reset(dev, mpr121_vdd_supply_disable, - vdd_supply); - if (err) { - dev_err(dev, "failed to add disable regulator action: %d\n", - err); - return ERR_PTR(err); - } - - return vdd_supply; -} - static void mpr_touchkey_report(struct input_dev *dev) { struct mpr121_touchkey *mpr121 = input_get_drvdata(dev); @@ -233,7 +197,6 @@ err_i2c_write: static int mpr_touchkey_probe(struct i2c_client *client) { struct device *dev = &client->dev; - struct regulator *vdd_supply; int vdd_uv; struct mpr121_touchkey *mpr121; struct input_dev *input_dev; @@ -241,11 +204,9 @@ static int mpr_touchkey_probe(struct i2c_client *client) int error; int i; - vdd_supply = mpr121_vdd_supply_init(dev); - if (IS_ERR(vdd_supply)) - return PTR_ERR(vdd_supply); - - vdd_uv = regulator_get_voltage(vdd_supply); + vdd_uv = devm_regulator_get_enable_read_voltage(dev, "vdd"); + if (vdd_uv < 0) + return dev_err_probe(dev, vdd_uv, "failed to get vdd voltage\n"); mpr121 = devm_kzalloc(dev, sizeof(*mpr121), GFP_KERNEL); if (!mpr121) diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c index 4364c3401ff1..5ad6be914160 100644 --- a/drivers/input/keyboard/mtk-pmic-keys.c +++ b/drivers/input/keyboard/mtk-pmic-keys.c @@ -307,7 +307,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev) int error, index = 0; unsigned int keycount; struct mt6397_chip *pmic_chip = dev_get_drvdata(pdev->dev.parent); - struct device_node *node = pdev->dev.of_node, *child; + struct device_node *node = pdev->dev.of_node; static const char *const irqnames[] = { "powerkey", "homekey" }; static const char *const irqnames_r[] = { "powerkey_r", "homekey_r" }; struct mtk_pmic_keys *keys; @@ -343,24 +343,20 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev) return -EINVAL; } - for_each_child_of_node(node, child) { + for_each_child_of_node_scoped(node, child) { keys->keys[index].regs = &mtk_pmic_regs->keys_regs[index]; keys->keys[index].irq = platform_get_irq_byname(pdev, irqnames[index]); - if (keys->keys[index].irq < 0) { - of_node_put(child); + if (keys->keys[index].irq < 0) return keys->keys[index].irq; - } if (of_device_is_compatible(node, "mediatek,mt6358-keys")) { keys->keys[index].irq_r = platform_get_irq_byname(pdev, irqnames_r[index]); - if (keys->keys[index].irq_r < 0) { - of_node_put(child); + if (keys->keys[index].irq_r < 0) return keys->keys[index].irq_r; - } } error = of_property_read_u32(child, @@ -369,7 +365,6 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev) dev_err(keys->dev, "failed to read key:%d linux,keycode property: %d\n", index, error); - of_node_put(child); return error; } @@ -377,10 +372,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev) keys->keys[index].wakeup = true; error = mtk_pmic_key_setup(keys, &keys->keys[index]); - if (error) { - of_node_put(child); + if (error) return error; - } index++; } diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c index 57587541110b..9e13f3f70a81 100644 --- a/drivers/input/keyboard/omap-keypad.c +++ b/drivers/input/keyboard/omap-keypad.c @@ -156,15 +156,15 @@ static ssize_t omap_kp_enable_store(struct device *dev, struct device_attribute if ((state != 1) && (state != 0)) return -EINVAL; - mutex_lock(&kp_enable_mutex); - if (state != kp_enable) { - if (state) - enable_irq(omap_kp->irq); - else - disable_irq(omap_kp->irq); - kp_enable = state; + scoped_guard(mutex, &kp_enable_mutex) { + if (state != kp_enable) { + if (state) + enable_irq(omap_kp->irq); + else + disable_irq(omap_kp->irq); + kp_enable = state; + } } - mutex_unlock(&kp_enable_mutex); return strnlen(buf, count); } @@ -290,7 +290,7 @@ static void omap_kp_remove(struct platform_device *pdev) static struct platform_driver omap_kp_driver = { .probe = omap_kp_probe, - .remove_new = omap_kp_remove, + .remove = omap_kp_remove, .driver = { .name = "omap-keypad", .dev_groups = omap_kp_groups, diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index 040b340995d8..bffe89c0717a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -144,7 +144,7 @@ static void omap4_keypad_scan_keys(struct omap4_keypad *keypad_data, u64 keys) { u64 changed; - mutex_lock(&keypad_data->lock); + guard(mutex)(&keypad_data->lock); changed = keys ^ keypad_data->keys; @@ -158,8 +158,6 @@ static void omap4_keypad_scan_keys(struct omap4_keypad *keypad_data, u64 keys) omap4_keypad_report_keys(keypad_data, changed & keys, true); keypad_data->keys = keys; - - mutex_unlock(&keypad_data->lock); } /* Interrupt handlers */ @@ -487,7 +485,7 @@ MODULE_DEVICE_TABLE(of, omap_keypad_dt_match); static struct platform_driver omap4_keypad_driver = { .probe = omap4_keypad_probe, - .remove_new = omap4_keypad_remove, + .remove = omap4_keypad_remove, .driver = { .name = "omap4-keypad", .of_match_table = omap_keypad_dt_match, diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c index 26a005f9ace3..35d1aa2a22a5 100644 --- a/drivers/input/keyboard/pmic8xxx-keypad.c +++ b/drivers/input/keyboard/pmic8xxx-keypad.c @@ -630,12 +630,10 @@ static int pmic8xxx_kp_suspend(struct device *dev) if (device_may_wakeup(dev)) { enable_irq_wake(kp->key_sense_irq); } else { - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) pmic8xxx_kp_disable(kp); - - mutex_unlock(&input_dev->mutex); } return 0; @@ -650,12 +648,10 @@ static int pmic8xxx_kp_resume(struct device *dev) if (device_may_wakeup(dev)) { disable_irq_wake(kp->key_sense_irq); } else { - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) pmic8xxx_kp_enable(kp); - - mutex_unlock(&input_dev->mutex); } return 0; diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index 3724363d140e..38ec619aa359 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -682,7 +682,7 @@ static int pxa27x_keypad_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; - int ret = 0; + int error; /* * If the keypad is used as wake up source, the clock is not turned @@ -691,19 +691,19 @@ static int pxa27x_keypad_resume(struct device *dev) if (device_may_wakeup(&pdev->dev)) { disable_irq_wake(keypad->irq); } else { - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) { /* Enable unit clock */ - ret = clk_prepare_enable(keypad->clk); - if (!ret) - pxa27x_keypad_config(keypad); - } + error = clk_prepare_enable(keypad->clk); + if (error) + return error; - mutex_unlock(&input_dev->mutex); + pxa27x_keypad_config(keypad); + } } - return ret; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(pxa27x_keypad_pm_ops, diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c index e212eff7687c..9f1049aa3048 100644 --- a/drivers/input/keyboard/samsung-keypad.c +++ b/drivers/input/keyboard/samsung-keypad.c @@ -587,7 +587,7 @@ MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids); static struct platform_driver samsung_keypad_driver = { .probe = samsung_keypad_probe, - .remove_new = samsung_keypad_remove, + .remove = samsung_keypad_remove, .driver = { .name = "samsung-keypad", .of_match_table = of_match_ptr(samsung_keypad_dt_match), diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 4ea4fd25c5d2..159f41eedd41 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -319,7 +319,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops, static struct platform_driver sh_keysc_device_driver = { .probe = sh_keysc_probe, - .remove_new = sh_keysc_remove, + .remove = sh_keysc_remove, .driver = { .name = "sh_keysc", .pm = pm_sleep_ptr(&sh_keysc_dev_pm_ops), diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c index 1df4feb8ba01..2fae337562a2 100644 --- a/drivers/input/keyboard/spear-keyboard.c +++ b/drivers/input/keyboard/spear-keyboard.c @@ -20,7 +20,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> -#include <linux/pm_wakeup.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/platform_data/keyboard-spear.h> @@ -274,7 +273,7 @@ static int spear_kbd_suspend(struct device *dev) struct input_dev *input_dev = kbd->input; unsigned int rate = 0, mode_ctl_reg, val; - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); /* explicitly enable clock as we may program device */ clk_enable(kbd->clk); @@ -315,8 +314,6 @@ static int spear_kbd_suspend(struct device *dev) /* restore previous clk state */ clk_disable(kbd->clk); - mutex_unlock(&input_dev->mutex); - return 0; } @@ -326,7 +323,7 @@ static int spear_kbd_resume(struct device *dev) struct spear_kbd *kbd = platform_get_drvdata(pdev); struct input_dev *input_dev = kbd->input; - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (device_may_wakeup(&pdev->dev)) { if (kbd->irq_wake_enabled) { @@ -342,8 +339,6 @@ static int spear_kbd_resume(struct device *dev) if (input_device_enabled(input_dev)) writel_relaxed(kbd->mode_ctl_reg, kbd->io_base + MODE_CTL_REG); - mutex_unlock(&input_dev->mutex); - return 0; } diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index 0d27324af809..e53ef4c670e4 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c @@ -216,14 +216,13 @@ static int keyscan_suspend(struct device *dev) struct st_keyscan *keypad = platform_get_drvdata(pdev); struct input_dev *input = keypad->input_dev; - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); if (device_may_wakeup(dev)) enable_irq_wake(keypad->irq); else if (input_device_enabled(input)) keyscan_stop(keypad); - mutex_unlock(&input->mutex); return 0; } @@ -232,17 +231,19 @@ static int keyscan_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct st_keyscan *keypad = platform_get_drvdata(pdev); struct input_dev *input = keypad->input_dev; - int retval = 0; + int error; - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); - if (device_may_wakeup(dev)) + if (device_may_wakeup(dev)) { disable_irq_wake(keypad->irq); - else if (input_device_enabled(input)) - retval = keyscan_start(keypad); + } else if (input_device_enabled(input)) { + error = keyscan_start(keypad); + if (error) + return error; + } - mutex_unlock(&input->mutex); - return retval; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c index ef2f44027894..0acded4fb9c9 100644 --- a/drivers/input/keyboard/stmpe-keypad.c +++ b/drivers/input/keyboard/stmpe-keypad.c @@ -414,7 +414,7 @@ static void stmpe_keypad_remove(struct platform_device *pdev) static struct platform_driver stmpe_keypad_driver = { .driver.name = "stmpe-keypad", .probe = stmpe_keypad_probe, - .remove_new = stmpe_keypad_remove, + .remove = stmpe_keypad_remove, }; module_platform_driver(stmpe_keypad_driver); diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c index f304cab0ebdb..5730f08f82d7 100644 --- a/drivers/input/keyboard/sun4i-lradc-keys.c +++ b/drivers/input/keyboard/sun4i-lradc-keys.c @@ -24,7 +24,6 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_wakeirq.h> -#include <linux/pm_wakeup.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -202,7 +201,7 @@ static void sun4i_lradc_close(struct input_dev *dev) static int sun4i_lradc_load_dt_keymap(struct device *dev, struct sun4i_lradc_data *lradc) { - struct device_node *np, *pp; + struct device_node *np; int i; int error; @@ -223,28 +222,25 @@ static int sun4i_lradc_load_dt_keymap(struct device *dev, return -ENOMEM; i = 0; - for_each_child_of_node(np, pp) { + for_each_child_of_node_scoped(np, pp) { struct sun4i_lradc_keymap *map = &lradc->chan0_map[i]; u32 channel; error = of_property_read_u32(pp, "channel", &channel); if (error || channel != 0) { dev_err(dev, "%pOFn: Inval channel prop\n", pp); - of_node_put(pp); return -EINVAL; } error = of_property_read_u32(pp, "voltage", &map->voltage); if (error) { dev_err(dev, "%pOFn: Inval voltage prop\n", pp); - of_node_put(pp); return -EINVAL; } error = of_property_read_u32(pp, "linux,code", &map->keycode); if (error) { dev_err(dev, "%pOFn: Inval linux,code prop\n", pp); - of_node_put(pp); return -EINVAL; } diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index 72fb46029710..3299e1919b37 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c @@ -241,9 +241,8 @@ static void sunkbd_reinit(struct work_struct *work) static void sunkbd_enable(struct sunkbd *sunkbd, bool enable) { - serio_pause_rx(sunkbd->serio); - sunkbd->enabled = enable; - serio_continue_rx(sunkbd->serio); + scoped_guard(serio_pause_rx, sunkbd->serio) + sunkbd->enabled = enable; if (!enable) { wake_up_interruptible(&sunkbd->wait); diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c index 6477a41c4bac..9159b5fec129 100644 --- a/drivers/input/misc/88pm80x_onkey.c +++ b/drivers/input/misc/88pm80x_onkey.c @@ -141,7 +141,7 @@ static struct platform_driver pm80x_onkey_driver = { .pm = &pm80x_onkey_pm_ops, }, .probe = pm80x_onkey_probe, - .remove_new = pm80x_onkey_remove, + .remove = pm80x_onkey_remove, }; module_platform_driver(pm80x_onkey_driver); diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c index 1acd8429c56c..d106f37df6bc 100644 --- a/drivers/input/misc/ad714x.c +++ b/drivers/input/misc/ad714x.c @@ -941,7 +941,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data) struct ad714x_chip *ad714x = data; int i; - mutex_lock(&ad714x->mutex); + guard(mutex)(&ad714x->mutex); ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); @@ -954,8 +954,6 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data) for (i = 0; i < ad714x->hw->touchpad_num; i++) ad714x_touchpad_state_machine(ad714x, i); - mutex_unlock(&ad714x->mutex); - return IRQ_HANDLED; } @@ -1169,13 +1167,11 @@ static int ad714x_suspend(struct device *dev) dev_dbg(ad714x->dev, "%s enter\n", __func__); - mutex_lock(&ad714x->mutex); + guard(mutex)(&ad714x->mutex); data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; ad714x->write(ad714x, AD714X_PWR_CTRL, data); - mutex_unlock(&ad714x->mutex); - return 0; } @@ -1184,7 +1180,7 @@ static int ad714x_resume(struct device *dev) struct ad714x_chip *ad714x = dev_get_drvdata(dev); dev_dbg(ad714x->dev, "%s enter\n", __func__); - mutex_lock(&ad714x->mutex); + guard(mutex)(&ad714x->mutex); /* resume to non-shutdown mode */ @@ -1197,8 +1193,6 @@ static int ad714x_resume(struct device *dev) ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); - mutex_unlock(&ad714x->mutex); - return 0; } diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 795f69edb4b2..e84649af801d 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -244,29 +244,21 @@ static int ati_remote2_open(struct input_dev *idev) if (r) { dev_err(&ar2->intf[0]->dev, "%s(): usb_autopm_get_interface() = %d\n", __func__, r); - goto fail1; + return r; } - mutex_lock(&ati_remote2_mutex); + scoped_guard(mutex, &ati_remote2_mutex) { + if (!(ar2->flags & ATI_REMOTE2_SUSPENDED)) { + r = ati_remote2_submit_urbs(ar2); + if (r) + break; + } - if (!(ar2->flags & ATI_REMOTE2_SUSPENDED)) { - r = ati_remote2_submit_urbs(ar2); - if (r) - goto fail2; + ar2->flags |= ATI_REMOTE2_OPENED; } - ar2->flags |= ATI_REMOTE2_OPENED; - - mutex_unlock(&ati_remote2_mutex); - usb_autopm_put_interface(ar2->intf[0]); - return 0; - - fail2: - mutex_unlock(&ati_remote2_mutex); - usb_autopm_put_interface(ar2->intf[0]); - fail1: return r; } @@ -276,14 +268,12 @@ static void ati_remote2_close(struct input_dev *idev) dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); - mutex_lock(&ati_remote2_mutex); + guard(mutex)(&ati_remote2_mutex); if (!(ar2->flags & ATI_REMOTE2_SUSPENDED)) ati_remote2_kill_urbs(ar2); ar2->flags &= ~ATI_REMOTE2_OPENED; - - mutex_unlock(&ati_remote2_mutex); } static void ati_remote2_input_mouse(struct ati_remote2 *ar2) @@ -713,16 +703,14 @@ static ssize_t ati_remote2_store_channel_mask(struct device *dev, return r; } - mutex_lock(&ati_remote2_mutex); - - if (mask != ar2->channel_mask) { - r = ati_remote2_setup(ar2, mask); - if (!r) - ar2->channel_mask = mask; + scoped_guard(mutex, &ati_remote2_mutex) { + if (mask != ar2->channel_mask) { + r = ati_remote2_setup(ar2, mask); + if (!r) + ar2->channel_mask = mask; + } } - mutex_unlock(&ati_remote2_mutex); - usb_autopm_put_interface(ar2->intf[0]); return r ? r : count; @@ -892,15 +880,13 @@ static int ati_remote2_suspend(struct usb_interface *interface, dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); - mutex_lock(&ati_remote2_mutex); + guard(mutex)(&ati_remote2_mutex); if (ar2->flags & ATI_REMOTE2_OPENED) ati_remote2_kill_urbs(ar2); ar2->flags |= ATI_REMOTE2_SUSPENDED; - mutex_unlock(&ati_remote2_mutex); - return 0; } @@ -917,7 +903,7 @@ static int ati_remote2_resume(struct usb_interface *interface) dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); - mutex_lock(&ati_remote2_mutex); + guard(mutex)(&ati_remote2_mutex); if (ar2->flags & ATI_REMOTE2_OPENED) r = ati_remote2_submit_urbs(ar2); @@ -925,8 +911,6 @@ static int ati_remote2_resume(struct usb_interface *interface) if (!r) ar2->flags &= ~ATI_REMOTE2_SUSPENDED; - mutex_unlock(&ati_remote2_mutex); - return r; } @@ -943,11 +927,11 @@ static int ati_remote2_reset_resume(struct usb_interface *interface) dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); - mutex_lock(&ati_remote2_mutex); + guard(mutex)(&ati_remote2_mutex); r = ati_remote2_setup(ar2, ar2->channel_mask); if (r) - goto out; + return r; if (ar2->flags & ATI_REMOTE2_OPENED) r = ati_remote2_submit_urbs(ar2); @@ -955,9 +939,6 @@ static int ati_remote2_reset_resume(struct usb_interface *interface) if (!r) ar2->flags &= ~ATI_REMOTE2_SUSPENDED; - out: - mutex_unlock(&ati_remote2_mutex); - return r; } diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index 728325a2d574..0cfe5d4a573c 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -355,6 +355,35 @@ static void cm109_submit_buzz_toggle(struct cm109_dev *dev) __func__, error); } +static void cm109_submit_ctl(struct cm109_dev *dev) +{ + int error; + + guard(spinlock_irqsave)(&dev->ctl_submit_lock); + + dev->irq_urb_pending = 0; + + if (unlikely(dev->shutdown)) + return; + + if (dev->buzzer_state) + dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; + else + dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; + + dev->ctl_data->byte[HID_OR1] = dev->keybit; + dev->ctl_data->byte[HID_OR2] = dev->keybit; + + dev->buzzer_pending = 0; + dev->ctl_urb_pending = 1; + + error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); + if (error) + dev_err(&dev->intf->dev, + "%s: usb_submit_urb (urb_ctl) failed %d\n", + __func__, error); +} + /* * IRQ handler */ @@ -362,8 +391,6 @@ static void cm109_urb_irq_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; - int error; - unsigned long flags; dev_dbg(&dev->intf->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n", dev->irq_data->byte[0], @@ -401,32 +428,7 @@ static void cm109_urb_irq_callback(struct urb *urb) } out: - - spin_lock_irqsave(&dev->ctl_submit_lock, flags); - - dev->irq_urb_pending = 0; - - if (likely(!dev->shutdown)) { - - if (dev->buzzer_state) - dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; - else - dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; - - dev->ctl_data->byte[HID_OR1] = dev->keybit; - dev->ctl_data->byte[HID_OR2] = dev->keybit; - - dev->buzzer_pending = 0; - dev->ctl_urb_pending = 1; - - error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); - if (error) - dev_err(&dev->intf->dev, - "%s: usb_submit_urb (urb_ctl) failed %d\n", - __func__, error); - } - - spin_unlock_irqrestore(&dev->ctl_submit_lock, flags); + cm109_submit_ctl(dev); } static void cm109_urb_ctl_callback(struct urb *urb) @@ -434,7 +436,6 @@ static void cm109_urb_ctl_callback(struct urb *urb) struct cm109_dev *dev = urb->context; const int status = urb->status; int error; - unsigned long flags; dev_dbg(&dev->intf->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n", dev->ctl_data->byte[0], @@ -449,35 +450,31 @@ static void cm109_urb_ctl_callback(struct urb *urb) __func__, status); } - spin_lock_irqsave(&dev->ctl_submit_lock, flags); + guard(spinlock_irqsave)(&dev->ctl_submit_lock); dev->ctl_urb_pending = 0; - if (likely(!dev->shutdown)) { - - if (dev->buzzer_pending || status) { - dev->buzzer_pending = 0; - dev->ctl_urb_pending = 1; - cm109_submit_buzz_toggle(dev); - } else if (likely(!dev->irq_urb_pending)) { - /* ask for key data */ - dev->irq_urb_pending = 1; - error = usb_submit_urb(dev->urb_irq, GFP_ATOMIC); - if (error) - dev_err(&dev->intf->dev, - "%s: usb_submit_urb (urb_irq) failed %d\n", - __func__, error); - } - } + if (unlikely(dev->shutdown)) + return; - spin_unlock_irqrestore(&dev->ctl_submit_lock, flags); + if (dev->buzzer_pending || status) { + dev->buzzer_pending = 0; + dev->ctl_urb_pending = 1; + cm109_submit_buzz_toggle(dev); + } else if (likely(!dev->irq_urb_pending)) { + /* ask for key data */ + dev->irq_urb_pending = 1; + error = usb_submit_urb(dev->urb_irq, GFP_ATOMIC); + if (error) + dev_err(&dev->intf->dev, + "%s: usb_submit_urb (urb_irq) failed %d\n", + __func__, error); + } } static void cm109_toggle_buzzer_async(struct cm109_dev *dev) { - unsigned long flags; - - spin_lock_irqsave(&dev->ctl_submit_lock, flags); + guard(spinlock_irqsave)(&dev->ctl_submit_lock); if (dev->ctl_urb_pending) { /* URB completion will resubmit */ @@ -486,8 +483,6 @@ static void cm109_toggle_buzzer_async(struct cm109_dev *dev) dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } - - spin_unlock_irqrestore(&dev->ctl_submit_lock, flags); } static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) @@ -556,32 +551,30 @@ static int cm109_input_open(struct input_dev *idev) return error; } - mutex_lock(&dev->pm_mutex); - - dev->buzzer_state = 0; - dev->key_code = -1; /* no keys pressed */ - dev->keybit = 0xf; + scoped_guard(mutex, &dev->pm_mutex) { + dev->buzzer_state = 0; + dev->key_code = -1; /* no keys pressed */ + dev->keybit = 0xf; - /* issue INIT */ - dev->ctl_data->byte[HID_OR0] = HID_OR_GPO_BUZ_SPDIF; - dev->ctl_data->byte[HID_OR1] = dev->keybit; - dev->ctl_data->byte[HID_OR2] = dev->keybit; - dev->ctl_data->byte[HID_OR3] = 0x00; + /* issue INIT */ + dev->ctl_data->byte[HID_OR0] = HID_OR_GPO_BUZ_SPDIF; + dev->ctl_data->byte[HID_OR1] = dev->keybit; + dev->ctl_data->byte[HID_OR2] = dev->keybit; + dev->ctl_data->byte[HID_OR3] = 0x00; - dev->ctl_urb_pending = 1; - error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); - if (error) { - dev->ctl_urb_pending = 0; - dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", - __func__, error); - } else { - dev->open = 1; + dev->ctl_urb_pending = 1; + error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); + if (!error) { + dev->open = 1; + return 0; + } } - mutex_unlock(&dev->pm_mutex); + dev->ctl_urb_pending = 0; + usb_autopm_put_interface(dev->intf); - if (error) - usb_autopm_put_interface(dev->intf); + dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", + __func__, error); return error; } @@ -590,17 +583,15 @@ static void cm109_input_close(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); - mutex_lock(&dev->pm_mutex); - - /* - * Once we are here event delivery is stopped so we - * don't need to worry about someone starting buzzer - * again - */ - cm109_stop_traffic(dev); - dev->open = 0; - - mutex_unlock(&dev->pm_mutex); + scoped_guard(mutex, &dev->pm_mutex) { + /* + * Once we are here event delivery is stopped so we + * don't need to worry about someone starting buzzer + * again + */ + cm109_stop_traffic(dev); + dev->open = 0; + } usb_autopm_put_interface(dev->intf); } @@ -823,9 +814,9 @@ static int cm109_usb_suspend(struct usb_interface *intf, pm_message_t message) dev_info(&intf->dev, "cm109: usb_suspend (event=%d)\n", message.event); - mutex_lock(&dev->pm_mutex); + guard(mutex)(&dev->pm_mutex); + cm109_stop_traffic(dev); - mutex_unlock(&dev->pm_mutex); return 0; } @@ -836,9 +827,9 @@ static int cm109_usb_resume(struct usb_interface *intf) dev_info(&intf->dev, "cm109: usb_resume\n"); - mutex_lock(&dev->pm_mutex); + guard(mutex)(&dev->pm_mutex); + cm109_restore_state(dev); - mutex_unlock(&dev->pm_mutex); return 0; } diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c index 0c68e924a1cc..cfc12332bee1 100644 --- a/drivers/input/misc/cma3000_d0x.c +++ b/drivers/input/misc/cma3000_d0x.c @@ -217,15 +217,13 @@ static int cma3000_open(struct input_dev *input_dev) { struct cma3000_accl_data *data = input_get_drvdata(input_dev); - mutex_lock(&data->mutex); + guard(mutex)(&data->mutex); if (!data->suspended) cma3000_poweron(data); data->opened = true; - mutex_unlock(&data->mutex); - return 0; } @@ -233,40 +231,34 @@ static void cma3000_close(struct input_dev *input_dev) { struct cma3000_accl_data *data = input_get_drvdata(input_dev); - mutex_lock(&data->mutex); + guard(mutex)(&data->mutex); if (!data->suspended) cma3000_poweroff(data); data->opened = false; - - mutex_unlock(&data->mutex); } void cma3000_suspend(struct cma3000_accl_data *data) { - mutex_lock(&data->mutex); + guard(mutex)(&data->mutex); if (!data->suspended && data->opened) cma3000_poweroff(data); data->suspended = true; - - mutex_unlock(&data->mutex); } EXPORT_SYMBOL(cma3000_suspend); void cma3000_resume(struct cma3000_accl_data *data) { - mutex_lock(&data->mutex); + guard(mutex)(&data->mutex); if (data->suspended && data->opened) cma3000_poweron(data); data->suspended = false; - - mutex_unlock(&data->mutex); } EXPORT_SYMBOL(cma3000_resume); diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c index 03bdb7c26ec0..dce3b0ec8cf3 100644 --- a/drivers/input/misc/cs40l50-vibra.c +++ b/drivers/input/misc/cs40l50-vibra.c @@ -334,11 +334,12 @@ static int cs40l50_add(struct input_dev *dev, struct ff_effect *effect, work_data.custom_len = effect->u.periodic.custom_len; work_data.vib = vib; work_data.effect = effect; - INIT_WORK(&work_data.work, cs40l50_add_worker); + INIT_WORK_ONSTACK(&work_data.work, cs40l50_add_worker); /* Push to the workqueue to serialize with playbacks */ queue_work(vib->vib_wq, &work_data.work); flush_work(&work_data.work); + destroy_work_on_stack(&work_data.work); kfree(work_data.custom_data); @@ -467,11 +468,12 @@ static int cs40l50_erase(struct input_dev *dev, int effect_id) work_data.vib = vib; work_data.effect = &dev->ff->effects[effect_id]; - INIT_WORK(&work_data.work, cs40l50_erase_worker); + INIT_WORK_ONSTACK(&work_data.work, cs40l50_erase_worker); /* Push to workqueue to serialize with playbacks */ queue_work(vib->vib_wq, &work_data.work); flush_work(&work_data.work); + destroy_work_on_stack(&work_data.work); return work_data.error; } diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c index 1629b7ea4cbd..e4a605c6af15 100644 --- a/drivers/input/misc/da7280.c +++ b/drivers/input/misc/da7280.c @@ -1263,39 +1263,37 @@ static int da7280_suspend(struct device *dev) { struct da7280_haptic *haptics = dev_get_drvdata(dev); - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); /* * Make sure no new requests will be submitted while device is * suspended. */ - spin_lock_irq(&haptics->input_dev->event_lock); - haptics->suspended = true; - spin_unlock_irq(&haptics->input_dev->event_lock); + scoped_guard(spinlock_irq, &haptics->input_dev->event_lock) { + haptics->suspended = true; + } da7280_haptic_stop(haptics); - mutex_unlock(&haptics->input_dev->mutex); - return 0; } static int da7280_resume(struct device *dev) { struct da7280_haptic *haptics = dev_get_drvdata(dev); - int retval; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); - retval = da7280_haptic_start(haptics); - if (!retval) { - spin_lock_irq(&haptics->input_dev->event_lock); + error = da7280_haptic_start(haptics); + if (error) + return error; + + scoped_guard(spinlock_irq, &haptics->input_dev->event_lock) { haptics->suspended = false; - spin_unlock_irq(&haptics->input_dev->event_lock); } - mutex_unlock(&haptics->input_dev->mutex); - return retval; + return 0; } #ifdef CONFIG_OF diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c index 7a1122e1efb9..cc23625019e3 100644 --- a/drivers/input/misc/da9052_onkey.c +++ b/drivers/input/misc/da9052_onkey.c @@ -140,8 +140,8 @@ static void da9052_onkey_remove(struct platform_device *pdev) static struct platform_driver da9052_onkey_driver = { .probe = da9052_onkey_probe, - .remove_new = da9052_onkey_remove, - .driver = { + .remove = da9052_onkey_remove, + .driver = { .name = "da9052-onkey", }, }; diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c index 871812f1b398..3e4fc2f01011 100644 --- a/drivers/input/misc/da9055_onkey.c +++ b/drivers/input/misc/da9055_onkey.c @@ -145,8 +145,8 @@ static void da9055_onkey_remove(struct platform_device *pdev) static struct platform_driver da9055_onkey_driver = { .probe = da9055_onkey_probe, - .remove_new = da9055_onkey_remove, - .driver = { + .remove = da9055_onkey_remove, + .driver = { .name = "da9055-onkey", }, }; diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c index 61b503835aa6..96cd6a078c8a 100644 --- a/drivers/input/misc/drv260x.c +++ b/drivers/input/misc/drv260x.c @@ -537,64 +537,62 @@ static int drv260x_probe(struct i2c_client *client) static int drv260x_suspend(struct device *dev) { struct drv260x_data *haptics = dev_get_drvdata(dev); - int ret = 0; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); if (input_device_enabled(haptics->input_dev)) { - ret = regmap_update_bits(haptics->regmap, - DRV260X_MODE, - DRV260X_STANDBY_MASK, - DRV260X_STANDBY); - if (ret) { + error = regmap_update_bits(haptics->regmap, + DRV260X_MODE, + DRV260X_STANDBY_MASK, + DRV260X_STANDBY); + if (error) { dev_err(dev, "Failed to set standby mode\n"); - goto out; + return error; } gpiod_set_value(haptics->enable_gpio, 0); - ret = regulator_disable(haptics->regulator); - if (ret) { + error = regulator_disable(haptics->regulator); + if (error) { dev_err(dev, "Failed to disable regulator\n"); regmap_update_bits(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY_MASK, 0); + return error; } } -out: - mutex_unlock(&haptics->input_dev->mutex); - return ret; + + return 0; } static int drv260x_resume(struct device *dev) { struct drv260x_data *haptics = dev_get_drvdata(dev); - int ret = 0; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); if (input_device_enabled(haptics->input_dev)) { - ret = regulator_enable(haptics->regulator); - if (ret) { + error = regulator_enable(haptics->regulator); + if (error) { dev_err(dev, "Failed to enable regulator\n"); - goto out; + return error; } - ret = regmap_update_bits(haptics->regmap, - DRV260X_MODE, - DRV260X_STANDBY_MASK, 0); - if (ret) { + error = regmap_update_bits(haptics->regmap, + DRV260X_MODE, + DRV260X_STANDBY_MASK, 0); + if (error) { dev_err(dev, "Failed to unset standby mode\n"); regulator_disable(haptics->regulator); - goto out; + return error; } gpiod_set_value(haptics->enable_gpio, 1); } -out: - mutex_unlock(&haptics->input_dev->mutex); - return ret; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume); diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c index f98e4d765307..46842cb8ddd7 100644 --- a/drivers/input/misc/drv2665.c +++ b/drivers/input/misc/drv2665.c @@ -15,7 +15,7 @@ #include <linux/delay.h> #include <linux/regulator/consumer.h> -/* Contol registers */ +/* Control registers */ #define DRV2665_STATUS 0x00 #define DRV2665_CTRL_1 0x01 #define DRV2665_CTRL_2 0x02 @@ -225,59 +225,57 @@ static int drv2665_probe(struct i2c_client *client) static int drv2665_suspend(struct device *dev) { struct drv2665_data *haptics = dev_get_drvdata(dev); - int ret = 0; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); if (input_device_enabled(haptics->input_dev)) { - ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2, - DRV2665_STANDBY, DRV2665_STANDBY); - if (ret) { + error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2, + DRV2665_STANDBY, DRV2665_STANDBY); + if (error) { dev_err(dev, "Failed to set standby mode\n"); regulator_disable(haptics->regulator); - goto out; + return error; } - ret = regulator_disable(haptics->regulator); - if (ret) { + error = regulator_disable(haptics->regulator); + if (error) { dev_err(dev, "Failed to disable regulator\n"); regmap_update_bits(haptics->regmap, DRV2665_CTRL_2, DRV2665_STANDBY, 0); + return error; } } -out: - mutex_unlock(&haptics->input_dev->mutex); - return ret; + + return 0; } static int drv2665_resume(struct device *dev) { struct drv2665_data *haptics = dev_get_drvdata(dev); - int ret = 0; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); if (input_device_enabled(haptics->input_dev)) { - ret = regulator_enable(haptics->regulator); - if (ret) { + error = regulator_enable(haptics->regulator); + if (error) { dev_err(dev, "Failed to enable regulator\n"); - goto out; + return error; } - ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2, - DRV2665_STANDBY, 0); - if (ret) { + error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2, + DRV2665_STANDBY, 0); + if (error) { dev_err(dev, "Failed to unset standby mode\n"); regulator_disable(haptics->regulator); - goto out; + return error; } } -out: - mutex_unlock(&haptics->input_dev->mutex); - return ret; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(drv2665_pm_ops, drv2665_suspend, drv2665_resume); diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c index ad49845374b9..f952a24ec595 100644 --- a/drivers/input/misc/drv2667.c +++ b/drivers/input/misc/drv2667.c @@ -16,7 +16,7 @@ #include <linux/delay.h> #include <linux/regulator/consumer.h> -/* Contol registers */ +/* Control registers */ #define DRV2667_STATUS 0x00 #define DRV2667_CTRL_1 0x01 #define DRV2667_CTRL_2 0x02 @@ -402,59 +402,57 @@ static int drv2667_probe(struct i2c_client *client) static int drv2667_suspend(struct device *dev) { struct drv2667_data *haptics = dev_get_drvdata(dev); - int ret = 0; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); if (input_device_enabled(haptics->input_dev)) { - ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, - DRV2667_STANDBY, DRV2667_STANDBY); - if (ret) { + error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, + DRV2667_STANDBY, DRV2667_STANDBY); + if (error) { dev_err(dev, "Failed to set standby mode\n"); regulator_disable(haptics->regulator); - goto out; + return error; } - ret = regulator_disable(haptics->regulator); - if (ret) { + error = regulator_disable(haptics->regulator); + if (error) { dev_err(dev, "Failed to disable regulator\n"); regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, DRV2667_STANDBY, 0); + return error; } } -out: - mutex_unlock(&haptics->input_dev->mutex); - return ret; + + return 0; } static int drv2667_resume(struct device *dev) { struct drv2667_data *haptics = dev_get_drvdata(dev); - int ret = 0; + int error; - mutex_lock(&haptics->input_dev->mutex); + guard(mutex)(&haptics->input_dev->mutex); if (input_device_enabled(haptics->input_dev)) { - ret = regulator_enable(haptics->regulator); - if (ret) { + error = regulator_enable(haptics->regulator); + if (error) { dev_err(dev, "Failed to enable regulator\n"); - goto out; + return error; } - ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, - DRV2667_STANDBY, 0); - if (ret) { + error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, + DRV2667_STANDBY, 0); + if (error) { dev_err(dev, "Failed to unset standby mode\n"); regulator_disable(haptics->regulator); - goto out; + return error; } } -out: - mutex_unlock(&haptics->input_dev->mutex); - return ret; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume); diff --git a/drivers/input/misc/ibm-panel.c b/drivers/input/misc/ibm-panel.c index 867ac7aa10d2..aa48f62d7ea0 100644 --- a/drivers/input/misc/ibm-panel.c +++ b/drivers/input/misc/ibm-panel.c @@ -77,12 +77,11 @@ static void ibm_panel_process_command(struct ibm_panel *panel) static int ibm_panel_i2c_slave_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { - unsigned long flags; struct ibm_panel *panel = i2c_get_clientdata(client); dev_dbg(&panel->input->dev, "event: %u data: %02x\n", event, *val); - spin_lock_irqsave(&panel->lock, flags); + guard(spinlock_irqsave)(&panel->lock); switch (event) { case I2C_SLAVE_STOP: @@ -114,8 +113,6 @@ static int ibm_panel_i2c_slave_cb(struct i2c_client *client, break; } - spin_unlock_irqrestore(&panel->lock, flags); - return 0; } diff --git a/drivers/input/misc/ideapad_slidebar.c b/drivers/input/misc/ideapad_slidebar.c index fa4e7f67d713..f6e5fc807b4d 100644 --- a/drivers/input/misc/ideapad_slidebar.c +++ b/drivers/input/misc/ideapad_slidebar.c @@ -23,7 +23,7 @@ * * The value is in byte range, however, I only figured out * how bits 0b10011001 work. Some other bits, probably, - * are meaningfull too. + * are meaningful too. * * Possible states: * @@ -95,41 +95,29 @@ static struct platform_device *slidebar_platform_dev; static u8 slidebar_pos_get(void) { - u8 res; - unsigned long flags; + guard(spinlock_irqsave)(&io_lock); - spin_lock_irqsave(&io_lock, flags); outb(0xf4, 0xff29); outb(0xbf, 0xff2a); - res = inb(0xff2b); - spin_unlock_irqrestore(&io_lock, flags); - - return res; + return inb(0xff2b); } static u8 slidebar_mode_get(void) { - u8 res; - unsigned long flags; + guard(spinlock_irqsave)(&io_lock); - spin_lock_irqsave(&io_lock, flags); outb(0xf7, 0xff29); outb(0x8b, 0xff2a); - res = inb(0xff2b); - spin_unlock_irqrestore(&io_lock, flags); - - return res; + return inb(0xff2b); } static void slidebar_mode_set(u8 mode) { - unsigned long flags; + guard(spinlock_irqsave)(&io_lock); - spin_lock_irqsave(&io_lock, flags); outb(0xf7, 0xff29); outb(0x8b, 0xff2a); outb(mode, 0xff2b); - spin_unlock_irqrestore(&io_lock, flags); } static bool slidebar_i8042_filter(unsigned char data, unsigned char str, @@ -267,7 +255,7 @@ static struct platform_driver slidebar_drv = { .driver = { .name = "ideapad_slidebar", }, - .remove_new = ideapad_remove, + .remove = ideapad_remove, }; static int __init ideapad_dmi_check(const struct dmi_system_id *id) diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 4215f9b9c2b0..d9ee14b1f451 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -739,7 +739,7 @@ static int ims_pcu_switch_to_bootloader(struct ims_pcu *pcu) { int error; - /* Execute jump to the bootoloader */ + /* Execute jump to the bootloader */ error = ims_pcu_execute_command(pcu, JUMP_TO_BTLDR, NULL, 0); if (error) { dev_err(pcu->dev, diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c index 843f8a3f3410..1851848e2cd3 100644 --- a/drivers/input/misc/iqs269a.c +++ b/drivers/input/misc/iqs269a.c @@ -365,7 +365,7 @@ static int iqs269_ati_mode_set(struct iqs269_private *iqs269, if (mode > IQS269_CHx_ENG_A_ATI_MODE_MAX) return -EINVAL; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); engine_a = be16_to_cpu(ch_reg[ch_num].engine_a); @@ -375,8 +375,6 @@ static int iqs269_ati_mode_set(struct iqs269_private *iqs269, ch_reg[ch_num].engine_a = cpu_to_be16(engine_a); iqs269->ati_current = false; - mutex_unlock(&iqs269->lock); - return 0; } @@ -389,9 +387,9 @@ static int iqs269_ati_mode_get(struct iqs269_private *iqs269, if (ch_num >= IQS269_NUM_CH) return -EINVAL; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); + engine_a = be16_to_cpu(ch_reg[ch_num].engine_a); - mutex_unlock(&iqs269->lock); engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK; *mode = (engine_a >> IQS269_CHx_ENG_A_ATI_MODE_SHIFT); @@ -429,7 +427,7 @@ static int iqs269_ati_base_set(struct iqs269_private *iqs269, return -EINVAL; } - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); @@ -439,8 +437,6 @@ static int iqs269_ati_base_set(struct iqs269_private *iqs269, ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); iqs269->ati_current = false; - mutex_unlock(&iqs269->lock); - return 0; } @@ -453,9 +449,9 @@ static int iqs269_ati_base_get(struct iqs269_private *iqs269, if (ch_num >= IQS269_NUM_CH) return -EINVAL; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); - mutex_unlock(&iqs269->lock); switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) { case IQS269_CHx_ENG_B_ATI_BASE_75: @@ -491,7 +487,7 @@ static int iqs269_ati_target_set(struct iqs269_private *iqs269, if (target > IQS269_CHx_ENG_B_ATI_TARGET_MAX) return -EINVAL; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); @@ -501,8 +497,6 @@ static int iqs269_ati_target_set(struct iqs269_private *iqs269, ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); iqs269->ati_current = false; - mutex_unlock(&iqs269->lock); - return 0; } @@ -515,10 +509,9 @@ static int iqs269_ati_target_get(struct iqs269_private *iqs269, if (ch_num >= IQS269_NUM_CH) return -EINVAL; - mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); - mutex_unlock(&iqs269->lock); + guard(mutex)(&iqs269->lock); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32; return 0; @@ -557,7 +550,6 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, const struct fwnode_handle *ch_node) { struct i2c_client *client = iqs269->client; - struct fwnode_handle *ev_node; struct iqs269_ch_reg *ch_reg; u16 engine_a, engine_b; unsigned int reg, val; @@ -734,8 +726,9 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, } for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) { - ev_node = fwnode_get_named_child_node(ch_node, - iqs269_events[i].name); + struct fwnode_handle *ev_node __free(fwnode_handle) = + fwnode_get_named_child_node(ch_node, + iqs269_events[i].name); if (!ev_node) continue; @@ -744,7 +737,6 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, dev_err(&client->dev, "Invalid channel %u threshold: %u\n", reg, val); - fwnode_handle_put(ev_node); return -EINVAL; } @@ -758,7 +750,6 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, dev_err(&client->dev, "Invalid channel %u hysteresis: %u\n", reg, val); - fwnode_handle_put(ev_node); return -EINVAL; } @@ -774,7 +765,6 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, } error = fwnode_property_read_u32(ev_node, "linux,code", &val); - fwnode_handle_put(ev_node); if (error == -EINVAL) { continue; } else if (error) { @@ -1199,7 +1189,7 @@ static int iqs269_dev_init(struct iqs269_private *iqs269) { int error; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); /* * Early revisions of silicon require the following workaround in order @@ -1210,19 +1200,19 @@ static int iqs269_dev_init(struct iqs269_private *iqs269) error = regmap_multi_reg_write(iqs269->regmap, iqs269_tws_init, ARRAY_SIZE(iqs269_tws_init)); if (error) - goto err_mutex; + return error; } error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI, IQS269_HALL_UI_ENABLE, iqs269->hall_enable ? ~0 : 0); if (error) - goto err_mutex; + return error; error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, &iqs269->sys_reg, sizeof(iqs269->sys_reg)); if (error) - goto err_mutex; + return error; /* * The following delay gives the device time to deassert its RDY output @@ -1232,10 +1222,7 @@ static int iqs269_dev_init(struct iqs269_private *iqs269) iqs269->ati_current = true; -err_mutex: - mutex_unlock(&iqs269->lock); - - return error; + return 0; } static int iqs269_input_init(struct iqs269_private *iqs269) @@ -1580,13 +1567,11 @@ static ssize_t hall_enable_store(struct device *dev, if (error) return error; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); iqs269->hall_enable = val; iqs269->ati_current = false; - mutex_unlock(&iqs269->lock); - return count; } @@ -1643,13 +1628,11 @@ static ssize_t rx_enable_store(struct device *dev, if (val > 0xFF) return -EINVAL; - mutex_lock(&iqs269->lock); + guard(mutex)(&iqs269->lock); ch_reg[iqs269->ch_num].rx_enable = val; iqs269->ati_current = false; - mutex_unlock(&iqs269->lock); - return count; } diff --git a/drivers/input/misc/iqs626a.c b/drivers/input/misc/iqs626a.c index 0dab54d3a060..7a6e6927f331 100644 --- a/drivers/input/misc/iqs626a.c +++ b/drivers/input/misc/iqs626a.c @@ -462,7 +462,6 @@ iqs626_parse_events(struct iqs626_private *iqs626, { struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg; struct i2c_client *client = iqs626->client; - struct fwnode_handle *ev_node; const char *ev_name; u8 *thresh, *hyst; unsigned int val; @@ -501,6 +500,7 @@ iqs626_parse_events(struct iqs626_private *iqs626, if (!iqs626_channels[ch_id].events[i]) continue; + struct fwnode_handle *ev_node __free(fwnode_handle) = NULL; if (ch_id == IQS626_CH_TP_2 || ch_id == IQS626_CH_TP_3) { /* * Trackpad touch events are simply described under the @@ -530,7 +530,6 @@ iqs626_parse_events(struct iqs626_private *iqs626, dev_err(&client->dev, "Invalid input type: %u\n", val); - fwnode_handle_put(ev_node); return -EINVAL; } @@ -545,7 +544,6 @@ iqs626_parse_events(struct iqs626_private *iqs626, dev_err(&client->dev, "Invalid %s channel hysteresis: %u\n", fwnode_get_name(ch_node), val); - fwnode_handle_put(ev_node); return -EINVAL; } @@ -566,7 +564,6 @@ iqs626_parse_events(struct iqs626_private *iqs626, dev_err(&client->dev, "Invalid %s channel threshold: %u\n", fwnode_get_name(ch_node), val); - fwnode_handle_put(ev_node); return -EINVAL; } @@ -575,8 +572,6 @@ iqs626_parse_events(struct iqs626_private *iqs626, else *(thresh + iqs626_events[i].th_offs) = val; } - - fwnode_handle_put(ev_node); } return 0; @@ -774,12 +769,12 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626, for (i = 0; i < iqs626_channels[ch_id].num_ch; i++) { u8 *ati_base = &sys_reg->tp_grp_reg.ch_reg_tp[i].ati_base; u8 *thresh = &sys_reg->tp_grp_reg.ch_reg_tp[i].thresh; - struct fwnode_handle *tc_node; char tc_name[10]; snprintf(tc_name, sizeof(tc_name), "channel-%d", i); - tc_node = fwnode_get_named_child_node(ch_node, tc_name); + struct fwnode_handle *tc_node __free(fwnode_handle) = + fwnode_get_named_child_node(ch_node, tc_name); if (!tc_node) continue; @@ -790,7 +785,6 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626, dev_err(&client->dev, "Invalid %s %s ATI base: %u\n", fwnode_get_name(ch_node), tc_name, val); - fwnode_handle_put(tc_node); return -EINVAL; } @@ -803,14 +797,11 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626, dev_err(&client->dev, "Invalid %s %s threshold: %u\n", fwnode_get_name(ch_node), tc_name, val); - fwnode_handle_put(tc_node); return -EINVAL; } *thresh = val; } - - fwnode_handle_put(tc_node); } if (!fwnode_property_present(ch_node, "linux,keycodes")) @@ -1233,7 +1224,6 @@ static int iqs626_parse_prop(struct iqs626_private *iqs626) { struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg; struct i2c_client *client = iqs626->client; - struct fwnode_handle *ch_node; unsigned int val; int error, i; u16 general; @@ -1375,13 +1365,13 @@ static int iqs626_parse_prop(struct iqs626_private *iqs626) sys_reg->active = 0; for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) { - ch_node = device_get_named_child_node(&client->dev, - iqs626_channels[i].name); + struct fwnode_handle *ch_node __free(fwnode_handle) = + device_get_named_child_node(&client->dev, + iqs626_channels[i].name); if (!ch_node) continue; error = iqs626_parse_channel(iqs626, ch_node, i); - fwnode_handle_put(ch_node); if (error) return error; diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c index be80a31de9f8..22022d11470d 100644 --- a/drivers/input/misc/iqs7222.c +++ b/drivers/input/misc/iqs7222.c @@ -2385,9 +2385,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, for (i = 0; i < ARRAY_SIZE(iqs7222_kp_events); i++) { const char *event_name = iqs7222_kp_events[i].name; u16 event_enable = iqs7222_kp_events[i].enable; - struct fwnode_handle *event_node; - event_node = fwnode_get_named_child_node(chan_node, event_name); + struct fwnode_handle *event_node __free(fwnode_handle) = + fwnode_get_named_child_node(chan_node, event_name); if (!event_node) continue; @@ -2408,7 +2408,6 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, dev_err(&client->dev, "Invalid %s press timeout: %u\n", fwnode_get_name(event_node), val); - fwnode_handle_put(event_node); return -EINVAL; } @@ -2418,7 +2417,6 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, dev_err(&client->dev, "Failed to read %s press timeout: %d\n", fwnode_get_name(event_node), error); - fwnode_handle_put(event_node); return error; } @@ -2429,7 +2427,6 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, dev_desc->touch_link - (i ? 0 : 2), &iqs7222->kp_type[chan_index][i], &iqs7222->kp_code[chan_index][i]); - fwnode_handle_put(event_node); if (error) return error; @@ -2604,10 +2601,10 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) { const char *event_name = iqs7222_sl_events[i].name; - struct fwnode_handle *event_node; enum iqs7222_reg_key_id reg_key; - event_node = fwnode_get_named_child_node(sldr_node, event_name); + struct fwnode_handle *event_node __free(fwnode_handle) = + fwnode_get_named_child_node(sldr_node, event_name); if (!event_node) continue; @@ -2639,7 +2636,6 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, : sldr_setup[4 + reg_offset], NULL, &iqs7222->sl_code[sldr_index][i]); - fwnode_handle_put(event_node); if (error) return error; @@ -2742,9 +2738,9 @@ static int iqs7222_parse_tpad(struct iqs7222_private *iqs7222, for (i = 0; i < ARRAY_SIZE(iqs7222_tp_events); i++) { const char *event_name = iqs7222_tp_events[i].name; - struct fwnode_handle *event_node; - event_node = fwnode_get_named_child_node(tpad_node, event_name); + struct fwnode_handle *event_node __free(fwnode_handle) = + fwnode_get_named_child_node(tpad_node, event_name); if (!event_node) continue; @@ -2760,7 +2756,6 @@ static int iqs7222_parse_tpad(struct iqs7222_private *iqs7222, iqs7222_tp_events[i].link, 1566, NULL, &iqs7222->tp_code[i]); - fwnode_handle_put(event_node); if (error) return error; @@ -2818,9 +2813,9 @@ static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222, int reg_grp_index) { struct i2c_client *client = iqs7222->client; - struct fwnode_handle *reg_grp_node; int error; + struct fwnode_handle *reg_grp_node __free(fwnode_handle) = NULL; if (iqs7222_reg_grp_names[reg_grp]) { char reg_grp_name[16]; @@ -2838,14 +2833,17 @@ static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222, error = iqs7222_parse_props(iqs7222, reg_grp_node, reg_grp_index, reg_grp, IQS7222_REG_KEY_NONE); + if (error) + return error; - if (!error && iqs7222_parse_extra[reg_grp]) + if (iqs7222_parse_extra[reg_grp]) { error = iqs7222_parse_extra[reg_grp](iqs7222, reg_grp_node, reg_grp_index); + if (error) + return error; + } - fwnode_handle_put(reg_grp_node); - - return error; + return 0; } static int iqs7222_parse_all(struct iqs7222_private *iqs7222) diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c index 837682cb2a7d..eb9788ea5215 100644 --- a/drivers/input/misc/kxtj9.c +++ b/drivers/input/misc/kxtj9.c @@ -25,7 +25,7 @@ /* CONTROL REGISTER 1 BITS */ #define PC1_OFF 0x7F #define PC1_ON (1 << 7) -/* Data ready funtion enable bit: set during probe if using irq mode */ +/* Data ready function enable bit: set during probe if using irq mode */ #define DRDYE (1 << 5) /* DATA CONTROL REGISTER BITS */ #define ODR12_5F 0 @@ -314,9 +314,8 @@ static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr, return error; /* Lock the device to prevent races with open/close (and itself) */ - mutex_lock(&input_dev->mutex); - - disable_irq(client->irq); + guard(mutex)(&input_dev->mutex); + guard(disable_irq)(&client->irq); /* * Set current interval to the greater of the minimum interval or @@ -326,9 +325,6 @@ static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr, kxtj9_update_odr(tj9, tj9->last_poll_interval); - enable_irq(client->irq); - mutex_unlock(&input_dev->mutex); - return count; } @@ -504,12 +500,11 @@ static int kxtj9_suspend(struct device *dev) struct kxtj9_data *tj9 = i2c_get_clientdata(client); struct input_dev *input_dev = tj9->input_dev; - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) kxtj9_disable(tj9); - mutex_unlock(&input_dev->mutex); return 0; } @@ -519,12 +514,11 @@ static int kxtj9_resume(struct device *dev) struct kxtj9_data *tj9 = i2c_get_clientdata(client); struct input_dev *input_dev = tj9->input_dev; - mutex_lock(&input_dev->mutex); + guard(mutex)(&input_dev->mutex); if (input_device_enabled(input_dev)) kxtj9_enable(tj9); - mutex_unlock(&input_dev->mutex); return 0; } diff --git a/drivers/input/misc/m68kspkr.c b/drivers/input/misc/m68kspkr.c index 3fe0a85c45e0..0542334df624 100644 --- a/drivers/input/misc/m68kspkr.c +++ b/drivers/input/misc/m68kspkr.c @@ -95,7 +95,7 @@ static struct platform_driver m68kspkr_platform_driver = { .name = "m68kspkr", }, .probe = m68kspkr_probe, - .remove_new = m68kspkr_remove, + .remove = m68kspkr_remove, .shutdown = m68kspkr_shutdown, }; diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c index 11cac4b7dddc..f97f341ee0bb 100644 --- a/drivers/input/misc/max8997_haptic.c +++ b/drivers/input/misc/max8997_haptic.c @@ -153,19 +153,19 @@ static void max8997_haptic_enable(struct max8997_haptic *chip) { int error; - mutex_lock(&chip->mutex); + guard(mutex)(&chip->mutex); error = max8997_haptic_set_duty_cycle(chip); if (error) { dev_err(chip->dev, "set_pwm_cycle failed, error: %d\n", error); - goto out; + return; } if (!chip->enabled) { error = regulator_enable(chip->regulator); if (error) { dev_err(chip->dev, "Failed to enable regulator\n"); - goto out; + return; } max8997_haptic_configure(chip); if (chip->mode == MAX8997_EXTERNAL_MODE) { @@ -173,19 +173,16 @@ static void max8997_haptic_enable(struct max8997_haptic *chip) if (error) { dev_err(chip->dev, "Failed to enable PWM\n"); regulator_disable(chip->regulator); - goto out; + return; } } chip->enabled = true; } - -out: - mutex_unlock(&chip->mutex); } static void max8997_haptic_disable(struct max8997_haptic *chip) { - mutex_lock(&chip->mutex); + guard(mutex)(&chip->mutex); if (chip->enabled) { chip->enabled = false; @@ -194,8 +191,6 @@ static void max8997_haptic_disable(struct max8997_haptic *chip) pwm_disable(chip->pwm); regulator_disable(chip->regulator); } - - mutex_unlock(&chip->mutex); } static void max8997_haptic_play_effect_work(struct work_struct *work) @@ -389,7 +384,7 @@ static struct platform_driver max8997_haptic_driver = { .pm = pm_sleep_ptr(&max8997_haptic_pm_ops), }, .probe = max8997_haptic_probe, - .remove_new = max8997_haptic_remove, + .remove = max8997_haptic_remove, .id_table = max8997_haptic_id, }; module_platform_driver(max8997_haptic_driver); diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c index 1c8c939638f6..1c7faa9b7afe 100644 --- a/drivers/input/misc/mc13783-pwrbutton.c +++ b/drivers/input/misc/mc13783-pwrbutton.c @@ -253,7 +253,7 @@ static void mc13783_pwrbutton_remove(struct platform_device *pdev) static struct platform_driver mc13783_pwrbutton_driver = { .probe = mc13783_pwrbutton_probe, - .remove_new = mc13783_pwrbutton_remove, + .remove = mc13783_pwrbutton_remove, .driver = { .name = "mc13783-pwrbutton", }, diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c index 06d5972e8e84..39fc451c56e9 100644 --- a/drivers/input/misc/palmas-pwrbutton.c +++ b/drivers/input/misc/palmas-pwrbutton.c @@ -310,7 +310,7 @@ MODULE_DEVICE_TABLE(of, of_palmas_pwr_match); static struct platform_driver palmas_pwron_driver = { .probe = palmas_pwron_probe, - .remove_new = palmas_pwron_remove, + .remove = palmas_pwron_remove, .driver = { .name = "palmas_pwrbutton", .of_match_table = of_match_ptr(of_palmas_pwr_match), diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c index f8954a2cab24..fe43fd72ba7b 100644 --- a/drivers/input/misc/pcap_keys.c +++ b/drivers/input/misc/pcap_keys.c @@ -112,7 +112,7 @@ static void pcap_keys_remove(struct platform_device *pdev) static struct platform_driver pcap_keys_device_driver = { .probe = pcap_keys_probe, - .remove_new = pcap_keys_remove, + .remove = pcap_keys_remove, .driver = { .name = "pcap-keys", } diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c index c5c5fe236c18..6d046e236ba6 100644 --- a/drivers/input/misc/pcf50633-input.c +++ b/drivers/input/misc/pcf50633-input.c @@ -103,7 +103,7 @@ static struct platform_driver pcf50633_input_driver = { .name = "pcf50633-input", }, .probe = pcf50633_input_probe, - .remove_new = pcf50633_input_remove, + .remove = pcf50633_input_remove, }; module_platform_driver(pcf50633_input_driver); diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 897854fd245f..0467808402f2 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -127,7 +127,7 @@ static struct platform_driver pcspkr_platform_driver = { .pm = &pcspkr_pm_ops, }, .probe = pcspkr_probe, - .remove_new = pcspkr_remove, + .remove = pcspkr_remove, .shutdown = pcspkr_shutdown, }; module_platform_driver(pcspkr_platform_driver); diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index bab710023d8f..d0c46665e527 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -465,7 +465,7 @@ MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table); static struct platform_driver pm8941_pwrkey_driver = { .probe = pm8941_pwrkey_probe, - .remove_new = pm8941_pwrkey_remove, + .remove = pm8941_pwrkey_remove, .driver = { .name = "pm8941-pwrkey", .pm = pm_sleep_ptr(&pm8941_pwr_key_pm_ops), diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c index 4b039abffc4b..ecb92ee5ebbc 100644 --- a/drivers/input/misc/powermate.c +++ b/drivers/input/misc/powermate.c @@ -194,22 +194,18 @@ static void powermate_sync_state(struct powermate_device *pm) static void powermate_config_complete(struct urb *urb) { struct powermate_device *pm = urb->context; - unsigned long flags; if (urb->status) printk(KERN_ERR "powermate: config urb returned %d\n", urb->status); - spin_lock_irqsave(&pm->lock, flags); + guard(spinlock_irqsave)(&pm->lock); powermate_sync_state(pm); - spin_unlock_irqrestore(&pm->lock, flags); } /* Set the LED up as described and begin the sync with the hardware if required */ static void powermate_pulse_led(struct powermate_device *pm, int static_brightness, int pulse_speed, int pulse_table, int pulse_asleep, int pulse_awake) { - unsigned long flags; - if (pulse_speed < 0) pulse_speed = 0; if (pulse_table < 0) @@ -222,8 +218,7 @@ static void powermate_pulse_led(struct powermate_device *pm, int static_brightne pulse_asleep = !!pulse_asleep; pulse_awake = !!pulse_awake; - - spin_lock_irqsave(&pm->lock, flags); + guard(spinlock_irqsave)(&pm->lock); /* mark state updates which are required */ if (static_brightness != pm->static_brightness) { @@ -245,8 +240,6 @@ static void powermate_pulse_led(struct powermate_device *pm, int static_brightne } powermate_sync_state(pm); - - spin_unlock_irqrestore(&pm->lock, flags); } /* Callback from the Input layer when an event arrives from userspace to configure the LED */ diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c index 5b9aedf4362f..0e19e97d98ec 100644 --- a/drivers/input/misc/pwm-beeper.c +++ b/drivers/input/misc/pwm-beeper.c @@ -203,9 +203,9 @@ static int pwm_beeper_suspend(struct device *dev) * beeper->suspended, but to ensure that pwm_beeper_event * does not re-submit work once flag is set. */ - spin_lock_irq(&beeper->input->event_lock); - beeper->suspended = true; - spin_unlock_irq(&beeper->input->event_lock); + scoped_guard(spinlock_irq, &beeper->input->event_lock) { + beeper->suspended = true; + } pwm_beeper_stop(beeper); @@ -216,9 +216,9 @@ static int pwm_beeper_resume(struct device *dev) { struct pwm_beeper *beeper = dev_get_drvdata(dev); - spin_lock_irq(&beeper->input->event_lock); - beeper->suspended = false; - spin_unlock_irq(&beeper->input->event_lock); + scoped_guard(spinlock_irq, &beeper->input->event_lock) { + beeper->suspended = false; + } /* Let worker figure out if we should resume beeping */ schedule_work(&beeper->work); diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c index 02f73b7c0462..3666ba6d1f30 100644 --- a/drivers/input/misc/regulator-haptic.c +++ b/drivers/input/misc/regulator-haptic.c @@ -83,12 +83,10 @@ static void regulator_haptic_work(struct work_struct *work) struct regulator_haptic *haptic = container_of(work, struct regulator_haptic, work); - mutex_lock(&haptic->mutex); + guard(mutex)(&haptic->mutex); if (!haptic->suspended) regulator_haptic_set_voltage(haptic, haptic->magnitude); - - mutex_unlock(&haptic->mutex); } static int regulator_haptic_play_effect(struct input_dev *input, void *data, @@ -205,19 +203,15 @@ static int regulator_haptic_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct regulator_haptic *haptic = platform_get_drvdata(pdev); - int error; - error = mutex_lock_interruptible(&haptic->mutex); - if (error) - return error; - - regulator_haptic_set_voltage(haptic, 0); + scoped_guard(mutex_intr, &haptic->mutex) { + regulator_haptic_set_voltage(haptic, 0); + haptic->suspended = true; - haptic->suspended = true; - - mutex_unlock(&haptic->mutex); + return 0; + } - return 0; + return -EINTR; } static int regulator_haptic_resume(struct device *dev) @@ -226,7 +220,7 @@ static int regulator_haptic_resume(struct device *dev) struct regulator_haptic *haptic = platform_get_drvdata(pdev); unsigned int magnitude; - mutex_lock(&haptic->mutex); + guard(mutex)(&haptic->mutex); haptic->suspended = false; @@ -234,8 +228,6 @@ static int regulator_haptic_resume(struct device *dev) if (magnitude) regulator_haptic_set_voltage(haptic, magnitude); - mutex_unlock(&haptic->mutex); - return 0; } diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index e94cab8133be..f706e1997417 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c @@ -106,7 +106,7 @@ static irqreturn_t rotary_encoder_irq(int irq, void *dev_id) struct rotary_encoder *encoder = dev_id; unsigned int state; - mutex_lock(&encoder->access_mutex); + guard(mutex)(&encoder->access_mutex); state = rotary_encoder_get_state(encoder); @@ -129,8 +129,6 @@ static irqreturn_t rotary_encoder_irq(int irq, void *dev_id) break; } - mutex_unlock(&encoder->access_mutex); - return IRQ_HANDLED; } @@ -139,7 +137,7 @@ static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id) struct rotary_encoder *encoder = dev_id; unsigned int state; - mutex_lock(&encoder->access_mutex); + guard(mutex)(&encoder->access_mutex); state = rotary_encoder_get_state(encoder); @@ -152,8 +150,6 @@ static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id) } } - mutex_unlock(&encoder->access_mutex); - return IRQ_HANDLED; } @@ -162,22 +158,19 @@ static irqreturn_t rotary_encoder_quarter_period_irq(int irq, void *dev_id) struct rotary_encoder *encoder = dev_id; unsigned int state; - mutex_lock(&encoder->access_mutex); + guard(mutex)(&encoder->access_mutex); state = rotary_encoder_get_state(encoder); - if ((encoder->last_stable + 1) % 4 == state) + if ((encoder->last_stable + 1) % 4 == state) { encoder->dir = 1; - else if (encoder->last_stable == (state + 1) % 4) + rotary_encoder_report_event(encoder); + } else if (encoder->last_stable == (state + 1) % 4) { encoder->dir = -1; - else - goto out; - - rotary_encoder_report_event(encoder); + rotary_encoder_report_event(encoder); + } -out: encoder->last_stable = state; - mutex_unlock(&encoder->access_mutex); return IRQ_HANDLED; } diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 5c5d407fe965..b8cad415c62c 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -515,7 +515,7 @@ static const struct soc_device_data soc_device_INT33D3 = { }; /* - * Button info for Microsoft Surface 3 (non pro), this is indentical to + * Button info for Microsoft Surface 3 (non pro), this is identical to * the PNP0C40 info except that the home button is active-high. * * The Surface 3 Pro also has a MSHW0028 ACPI device, but that uses a custom @@ -612,7 +612,7 @@ MODULE_DEVICE_TABLE(acpi, soc_button_acpi_match); static struct platform_driver soc_button_driver = { .probe = soc_button_probe, - .remove_new = soc_button_remove, + .remove = soc_button_remove, .driver = { .name = KBUILD_MODNAME, .acpi_match_table = ACPI_PTR(soc_button_acpi_match), diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index 20020cbc0752..8d7303fc13bc 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -69,7 +69,6 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent); struct bbc_beep_info *info = &state->u.bbc; unsigned int count = 0; - unsigned long flags; if (type != EV_SND) return -1; @@ -85,7 +84,7 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int count = bbc_count_to_reg(info, count); - spin_lock_irqsave(&state->lock, flags); + guard(spinlock_irqsave)(&state->lock); if (count) { sbus_writeb(0x01, info->regs + 0); @@ -97,8 +96,6 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int sbus_writeb(0x00, info->regs + 0); } - spin_unlock_irqrestore(&state->lock, flags); - return 0; } @@ -107,7 +104,6 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent); struct grover_beep_info *info = &state->u.grover; unsigned int count = 0; - unsigned long flags; if (type != EV_SND) return -1; @@ -121,7 +117,7 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned if (value > 20 && value < 32767) count = 1193182 / value; - spin_lock_irqsave(&state->lock, flags); + guard(spinlock_irqsave)(&state->lock); if (count) { /* enable counter 2 */ @@ -136,8 +132,6 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned sbus_writeb(sbus_readb(info->enable_reg) & 0xFC, info->enable_reg); } - spin_unlock_irqrestore(&state->lock, flags); - return 0; } @@ -188,47 +182,38 @@ static int bbc_beep_probe(struct platform_device *op) { struct sparcspkr_state *state; struct bbc_beep_info *info; - struct device_node *dp; - int err = -ENOMEM; + int err; - state = kzalloc(sizeof(*state), GFP_KERNEL); + state = devm_kzalloc(&op->dev, sizeof(*state), GFP_KERNEL); if (!state) - goto out_err; + return -ENOMEM; state->name = "Sparc BBC Speaker"; state->event = bbc_spkr_event; spin_lock_init(&state->lock); - dp = of_find_node_by_path("/"); - err = -ENODEV; + struct device_node *dp __free(device_node) = of_find_node_by_path("/"); if (!dp) - goto out_free; + return -ENODEV; info = &state->u.bbc; info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0); - of_node_put(dp); if (!info->clock_freq) - goto out_free; + return -ENODEV; info->regs = of_ioremap(&op->resource[0], 0, 6, "bbc beep"); if (!info->regs) - goto out_free; + return -ENODEV; platform_set_drvdata(op, state); err = sparcspkr_probe(&op->dev); - if (err) - goto out_clear_drvdata; + if (err) { + of_iounmap(&op->resource[0], info->regs, 6); + return err; + } return 0; - -out_clear_drvdata: - of_iounmap(&op->resource[0], info->regs, 6); - -out_free: - kfree(state); -out_err: - return err; } static void bbc_remove(struct platform_device *op) @@ -243,8 +228,6 @@ static void bbc_remove(struct platform_device *op) input_unregister_device(input_dev); of_iounmap(&op->resource[0], info->regs, 6); - - kfree(state); } static const struct of_device_id bbc_beep_match[] = { @@ -262,7 +245,7 @@ static struct platform_driver bbc_beep_driver = { .of_match_table = bbc_beep_match, }, .probe = bbc_beep_probe, - .remove_new = bbc_remove, + .remove = bbc_remove, .shutdown = sparcspkr_shutdown, }; @@ -272,9 +255,9 @@ static int grover_beep_probe(struct platform_device *op) struct grover_beep_info *info; int err = -ENOMEM; - state = kzalloc(sizeof(*state), GFP_KERNEL); + state = devm_kzalloc(&op->dev, sizeof(*state), GFP_KERNEL); if (!state) - goto out_err; + return err; state->name = "Sparc Grover Speaker"; state->event = grover_spkr_event; @@ -283,7 +266,7 @@ static int grover_beep_probe(struct platform_device *op) info = &state->u.grover; info->freq_regs = of_ioremap(&op->resource[2], 0, 2, "grover beep freq"); if (!info->freq_regs) - goto out_free; + return err; info->enable_reg = of_ioremap(&op->resource[3], 0, 1, "grover beep enable"); if (!info->enable_reg) @@ -302,9 +285,7 @@ out_clear_drvdata: out_unmap_freq_regs: of_iounmap(&op->resource[2], info->freq_regs, 2); -out_free: - kfree(state); -out_err: + return err; } @@ -321,8 +302,6 @@ static void grover_remove(struct platform_device *op) of_iounmap(&op->resource[3], info->enable_reg, 1); of_iounmap(&op->resource[2], info->freq_regs, 2); - - kfree(state); } static const struct of_device_id grover_beep_match[] = { @@ -340,7 +319,7 @@ static struct platform_driver grover_beep_driver = { .of_match_table = grover_beep_match, }, .probe = grover_beep_probe, - .remove_new = grover_remove, + .remove = grover_remove, .shutdown = sparcspkr_shutdown, }; diff --git a/drivers/input/misc/tps65219-pwrbutton.c b/drivers/input/misc/tps65219-pwrbutton.c index eeb9f2334ab4..7a58bae4f1a0 100644 --- a/drivers/input/misc/tps65219-pwrbutton.c +++ b/drivers/input/misc/tps65219-pwrbutton.c @@ -137,7 +137,7 @@ MODULE_DEVICE_TABLE(platform, tps65219_pwrbtn_id_table); static struct platform_driver tps65219_pb_driver = { .probe = tps65219_pb_probe, - .remove_new = tps65219_pb_remove, + .remove = tps65219_pb_remove, .driver = { .name = "tps65219_pwrbutton", }, diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index 101548b35ee3..5fa7d4a7da36 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -165,15 +165,10 @@ static DEFINE_SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops, static bool twl4030_vibra_check_coexist(struct device_node *parent) { - struct device_node *node; + struct device_node *node __free(device_node) = + of_get_child_by_name(parent, "codec"); - node = of_get_child_by_name(parent, "codec"); - if (node) { - of_node_put(node); - return true; - } - - return false; + return node != NULL; } static int twl4030_vibra_probe(struct platform_device *pdev) diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 78f0b63e5c20..afed9af65bf9 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c @@ -229,14 +229,13 @@ static DEFINE_SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, static int twl6040_vibra_probe(struct platform_device *pdev) { struct device *twl6040_core_dev = pdev->dev.parent; - struct device_node *twl6040_core_node; struct vibra_info *info; int vddvibl_uV = 0; int vddvibr_uV = 0; int error; - twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node, - "vibra"); + struct device_node *twl6040_core_node __free(device_node) = + of_get_child_by_name(twl6040_core_dev->of_node, "vibra"); if (!twl6040_core_node) { dev_err(&pdev->dev, "parent of node is missing?\n"); return -EINVAL; @@ -244,7 +243,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev) info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) { - of_node_put(twl6040_core_node); dev_err(&pdev->dev, "couldn't allocate memory\n"); return -ENOMEM; } @@ -264,8 +262,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev) of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV", &vddvibl_uV); of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV", &vddvibr_uV); - of_node_put(twl6040_core_node); - if ((!info->vibldrv_res && !info->viblmotor_res) || (!info->vibrdrv_res && !info->vibrmotor_res)) { dev_err(info->dev, "invalid vibra driver/motor resistance\n"); diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index 5a64557920fa..1b2bd2139aaa 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c @@ -271,7 +271,7 @@ static struct key_entry keymap_fs_amilo_pro_v8210[] __initdata = { { KE_BLUETOOTH, 0x30 }, /* Fn+F10 */ { KE_KEY, 0x31, {KEY_MAIL} }, /* mail button */ { KE_KEY, 0x36, {KEY_WWW} }, /* www button */ - { KE_WIFI, 0x78 }, /* satelite dish button */ + { KE_WIFI, 0x78 }, /* satellite dish button */ { KE_END, FE_WIFI_LED } }; @@ -1334,7 +1334,7 @@ static struct platform_driver wistron_driver = { .pm = pm_sleep_ptr(&wistron_pm_ops), }, .probe = wistron_probe, - .remove_new = wistron_remove, + .remove = wistron_remove, }; static int __init wb_module_init(void) diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c index e4a06c73b72d..18eb319bc6f7 100644 --- a/drivers/input/misc/wm831x-on.c +++ b/drivers/input/misc/wm831x-on.c @@ -134,7 +134,7 @@ static void wm831x_on_remove(struct platform_device *pdev) static struct platform_driver wm831x_on_driver = { .probe = wm831x_on_probe, - .remove_new = wm831x_on_remove, + .remove = wm831x_on_remove, .driver = { .name = "wm831x-on", }, diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 8866bf65d347..08dc53ae1b3c 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c @@ -377,7 +377,7 @@ send_update: if (len > sizeof(yld->ctl_data->data)) len = sizeof(yld->ctl_data->data); - /* Combine up to <len> consecutive LCD bytes in a singe request + /* Combine up to <len> consecutive LCD bytes in a single request */ yld->ctl_data->cmd = CMD_LCD; yld->ctl_data->offset = cpu_to_be16(ix); @@ -614,7 +614,7 @@ static ssize_t show_line3(struct device *dev, struct device_attribute *attr, return show_line(dev, buf, LCD_LINE3_OFFSET, LCD_LINE4_OFFSET); } -/* Writing to /sys/../lineX will set the coresponding LCD line. +/* Writing to /sys/../lineX will set the corresponding LCD line. * - Excess characters are ignored. * - If less characters are written than allowed, the remaining digits are * unchanged. diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 4e37fc3f1a9e..0728b5c08f02 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1585,7 +1585,7 @@ static void alps_flush_packet(struct timer_list *t) struct alps_data *priv = from_timer(priv, t, timer); struct psmouse *psmouse = priv->psmouse; - serio_pause_rx(psmouse->ps2dev.serio); + guard(serio_pause_rx)(psmouse->ps2dev.serio); if (psmouse->pktcnt == psmouse->pktsize) { @@ -1605,8 +1605,6 @@ static void alps_flush_packet(struct timer_list *t) } psmouse->pktcnt = 0; } - - serio_continue_rx(psmouse->ps2dev.serio); } static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c index 2fbbaeb76d70..d203c2a6c438 100644 --- a/drivers/input/mouse/amimouse.c +++ b/drivers/input/mouse/amimouse.c @@ -139,7 +139,7 @@ static void __exit amimouse_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver amimouse_driver __refdata = { - .remove_new = __exit_p(amimouse_remove), + .remove = __exit_p(amimouse_remove), .driver = { .name = "amiga-mouse", }, diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c index 221a553f45cd..654b38d249f3 100644 --- a/drivers/input/mouse/byd.c +++ b/drivers/input/mouse/byd.c @@ -254,13 +254,12 @@ static void byd_clear_touch(struct timer_list *t) struct byd_data *priv = from_timer(priv, t, timer); struct psmouse *psmouse = priv->psmouse; - serio_pause_rx(psmouse->ps2dev.serio); + guard(serio_pause_rx)(psmouse->ps2dev.serio); + priv->touch = false; byd_report_input(psmouse); - serio_continue_rx(psmouse->ps2dev.serio); - /* * Move cursor back to center of pad when we lose touch - this * specifically improves user experience when moving cursor with one diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 7521981274bd..a841883660fb 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -541,7 +541,8 @@ static int elan_update_firmware(struct elan_tp_data *data, dev_dbg(&client->dev, "Starting firmware update....\n"); - disable_irq(client->irq); + guard(disable_irq)(&client->irq); + data->in_fw_update = true; retval = __elan_update_firmware(data, fw); @@ -555,7 +556,6 @@ static int elan_update_firmware(struct elan_tp_data *data, } data->in_fw_update = false; - enable_irq(client->irq); return retval; } @@ -621,8 +621,6 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, const char *buf, size_t count) { struct elan_tp_data *data = dev_get_drvdata(dev); - const struct firmware *fw; - char *fw_name; int error; const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; @@ -631,15 +629,16 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, return -EINVAL; /* Look for a firmware with the product id appended. */ - fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); + const char *fw_name __free(kfree) = + kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { dev_err(dev, "failed to allocate memory for firmware name\n"); return -ENOMEM; } dev_info(dev, "requesting fw '%s'\n", fw_name); + const struct firmware *fw __free(firmware) = NULL; error = request_firmware(&fw, fw_name, dev); - kfree(fw_name); if (error) { dev_err(dev, "failed to request firmware: %d\n", error); return error; @@ -651,46 +650,36 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, dev_err(dev, "signature mismatch (expected %*ph, got %*ph)\n", (int)sizeof(signature), signature, (int)sizeof(signature), fw_signature); - error = -EBADF; - goto out_release_fw; + return -EBADF; } - error = mutex_lock_interruptible(&data->sysfs_mutex); - if (error) - goto out_release_fw; - - error = elan_update_firmware(data, fw); - - mutex_unlock(&data->sysfs_mutex); + scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { + error = elan_update_firmware(data, fw); + if (error) + return error; + } -out_release_fw: - release_firmware(fw); - return error ?: count; + return count; } -static ssize_t calibrate_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int elan_calibrate(struct elan_tp_data *data) { - struct i2c_client *client = to_i2c_client(dev); - struct elan_tp_data *data = i2c_get_clientdata(client); + struct i2c_client *client = data->client; + struct device *dev = &client->dev; int tries = 20; int retval; int error; u8 val[ETP_CALIBRATE_MAX_LEN]; - retval = mutex_lock_interruptible(&data->sysfs_mutex); - if (retval) - return retval; - - disable_irq(client->irq); + guard(disable_irq)(&client->irq); data->mode |= ETP_ENABLE_CALIBRATE; retval = data->ops->set_mode(client, data->mode); if (retval) { + data->mode &= ~ETP_ENABLE_CALIBRATE; dev_err(dev, "failed to enable calibration mode: %d\n", retval); - goto out; + return retval; } retval = data->ops->calibrate(client); @@ -728,10 +717,24 @@ out_disable_calibrate: if (!retval) retval = error; } -out: - enable_irq(client->irq); - mutex_unlock(&data->sysfs_mutex); - return retval ?: count; + return retval; +} + +static ssize_t calibrate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct elan_tp_data *data = i2c_get_clientdata(client); + int error; + + scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { + error = elan_calibrate(data); + if (error) + return error; + } + + return count; } static ssize_t elan_sysfs_read_mode(struct device *dev, @@ -743,16 +746,11 @@ static ssize_t elan_sysfs_read_mode(struct device *dev, int error; enum tp_mode mode; - error = mutex_lock_interruptible(&data->sysfs_mutex); - if (error) - return error; - - error = data->ops->iap_get_mode(data->client, &mode); - - mutex_unlock(&data->sysfs_mutex); - - if (error) - return error; + scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { + error = data->ops->iap_get_mode(data->client, &mode); + if (error) + return error; + } return sysfs_emit(buf, "%d\n", (int)mode); } @@ -783,44 +781,40 @@ static const struct attribute_group elan_sysfs_group = { .attrs = elan_sysfs_entries, }; -static ssize_t acquire_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int elan_acquire_baseline(struct elan_tp_data *data) { - struct i2c_client *client = to_i2c_client(dev); - struct elan_tp_data *data = i2c_get_clientdata(client); - int error; + struct i2c_client *client = data->client; + struct device *dev = &client->dev; int retval; + int error; - retval = mutex_lock_interruptible(&data->sysfs_mutex); - if (retval) - return retval; - - disable_irq(client->irq); + guard(disable_irq)(&client->irq); data->baseline_ready = false; data->mode |= ETP_ENABLE_CALIBRATE; - retval = data->ops->set_mode(data->client, data->mode); + retval = data->ops->set_mode(client, data->mode); if (retval) { + data->mode &= ~ETP_ENABLE_CALIBRATE; dev_err(dev, "Failed to enable calibration mode to get baseline: %d\n", retval); - goto out; + return retval; } msleep(250); - retval = data->ops->get_baseline_data(data->client, true, + retval = data->ops->get_baseline_data(client, true, &data->max_baseline); if (retval) { - dev_err(dev, "Failed to read max baseline form device: %d\n", + dev_err(dev, "Failed to read max baseline from device: %d\n", retval); goto out_disable_calibrate; } - retval = data->ops->get_baseline_data(data->client, false, + retval = data->ops->get_baseline_data(client, false, &data->min_baseline); if (retval) { - dev_err(dev, "Failed to read min baseline form device: %d\n", + dev_err(dev, "Failed to read min baseline from device: %d\n", retval); goto out_disable_calibrate; } @@ -829,17 +823,31 @@ static ssize_t acquire_store(struct device *dev, struct device_attribute *attr, out_disable_calibrate: data->mode &= ~ETP_ENABLE_CALIBRATE; - error = data->ops->set_mode(data->client, data->mode); + error = data->ops->set_mode(client, data->mode); if (error) { dev_err(dev, "Failed to disable calibration mode after acquiring baseline: %d\n", error); if (!retval) retval = error; } -out: - enable_irq(client->irq); - mutex_unlock(&data->sysfs_mutex); - return retval ?: count; + + return retval; +} + +static ssize_t acquire_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct elan_tp_data *data = i2c_get_clientdata(client); + int error; + + scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { + error = elan_acquire_baseline(data); + if (error) + return error; + } + + return count; } static ssize_t min_show(struct device *dev, @@ -847,22 +855,15 @@ static ssize_t min_show(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); - int retval; - retval = mutex_lock_interruptible(&data->sysfs_mutex); - if (retval) - return retval; + scoped_guard(mutex_intr, &data->sysfs_mutex) { + if (!data->baseline_ready) + return -ENODATA; - if (!data->baseline_ready) { - retval = -ENODATA; - goto out; + return sysfs_emit(buf, "%d", data->min_baseline); } - retval = sysfs_emit(buf, "%d", data->min_baseline); - -out: - mutex_unlock(&data->sysfs_mutex); - return retval; + return -EINTR; } static ssize_t max_show(struct device *dev, @@ -870,25 +871,17 @@ static ssize_t max_show(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); - int retval; - retval = mutex_lock_interruptible(&data->sysfs_mutex); - if (retval) - return retval; + scoped_guard(mutex_intr, &data->sysfs_mutex) { + if (!data->baseline_ready) + return -ENODATA; - if (!data->baseline_ready) { - retval = -ENODATA; - goto out; + return sysfs_emit(buf, "%d", data->max_baseline); } - retval = sysfs_emit(buf, "%d", data->max_baseline); - -out: - mutex_unlock(&data->sysfs_mutex); - return retval; + return -EINTR; } - static DEVICE_ATTR_WO(acquire); static DEVICE_ATTR_RO(min); static DEVICE_ATTR_RO(max); @@ -1323,43 +1316,54 @@ static int elan_probe(struct i2c_client *client) return 0; } +static int __elan_suspend(struct elan_tp_data *data) +{ + struct i2c_client *client = data->client; + int error; + + if (device_may_wakeup(&client->dev)) + return elan_sleep(data); + + /* Touchpad is not a wakeup source */ + error = elan_set_power(data, false); + if (error) + return error; + + error = regulator_disable(data->vcc); + if (error) { + dev_err(&client->dev, + "failed to disable regulator when suspending: %d\n", + error); + /* Attempt to power the chip back up */ + elan_set_power(data, true); + return error; + } + + return 0; +} + static int elan_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); - int ret; + int error; /* * We are taking the mutex to make sure sysfs operations are * complete before we attempt to bring the device into low[er] * power mode. */ - ret = mutex_lock_interruptible(&data->sysfs_mutex); - if (ret) - return ret; - - disable_irq(client->irq); + scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { + disable_irq(client->irq); - if (device_may_wakeup(dev)) { - ret = elan_sleep(data); - } else { - ret = elan_set_power(data, false); - if (ret) - goto err; - - ret = regulator_disable(data->vcc); - if (ret) { - dev_err(dev, "error %d disabling regulator\n", ret); - /* Attempt to power the chip back up */ - elan_set_power(data, true); + error = __elan_suspend(data); + if (error) { + enable_irq(client->irq); + return error; } } -err: - if (ret) - enable_irq(client->irq); - mutex_unlock(&data->sysfs_mutex); - return ret; + return 0; } static int elan_resume(struct device *dev) diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 15cf4463b64e..a9057d124a88 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -628,12 +628,11 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size, const u8 *page, u16 checksum, int idx) { struct device *dev = &client->dev; - u8 *page_store; u8 val[3]; u16 result; int ret, error; - page_store = kmalloc(fw_page_size + 4, GFP_KERNEL); + u8 *page_store __free(kfree) = kmalloc(fw_page_size + 4, GFP_KERNEL); if (!page_store) return -ENOMEM; @@ -647,7 +646,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size, if (ret != fw_page_size + 4) { error = ret < 0 ? ret : -EIO; dev_err(dev, "Failed to write page %d: %d\n", idx, error); - goto exit; + return error; } /* Wait for F/W to update one page ROM data. */ @@ -656,20 +655,17 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size, error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); if (error) { dev_err(dev, "Failed to read IAP write result: %d\n", error); - goto exit; + return error; } result = le16_to_cpup((__le16 *)val); if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) { dev_err(dev, "IAP reports failed write: %04hx\n", result); - error = -EIO; - goto exit; + return -EIO; } -exit: - kfree(page_store); - return error; + return 0; } static int elan_i2c_finish_fw_update(struct i2c_client *client, diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c index 2a2459b1b4f2..93420f07b7d0 100644 --- a/drivers/input/mouse/psmouse-smbus.c +++ b/drivers/input/mouse/psmouse-smbus.c @@ -35,7 +35,7 @@ static void psmouse_smbus_check_adapter(struct i2c_adapter *adapter) if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_HOST_NOTIFY)) return; - mutex_lock(&psmouse_smbus_mutex); + guard(mutex)(&psmouse_smbus_mutex); list_for_each_entry(smbdev, &psmouse_smbus_list, node) { if (smbdev->dead) @@ -55,15 +55,13 @@ static void psmouse_smbus_check_adapter(struct i2c_adapter *adapter) "SMBus candidate adapter appeared, triggering rescan\n"); serio_rescan(smbdev->psmouse->ps2dev.serio); } - - mutex_unlock(&psmouse_smbus_mutex); } static void psmouse_smbus_detach_i2c_client(struct i2c_client *client) { struct psmouse_smbus_dev *smbdev, *tmp; - mutex_lock(&psmouse_smbus_mutex); + guard(mutex)(&psmouse_smbus_mutex); list_for_each_entry_safe(smbdev, tmp, &psmouse_smbus_list, node) { if (smbdev->client != client) @@ -85,8 +83,6 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client) kfree(smbdev); } } - - mutex_unlock(&psmouse_smbus_mutex); } static int psmouse_smbus_notifier_call(struct notifier_block *nb, @@ -171,7 +167,7 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse) { struct psmouse_smbus_dev *smbdev = psmouse->private; - mutex_lock(&psmouse_smbus_mutex); + guard(mutex)(&psmouse_smbus_mutex); if (smbdev->dead) { list_del(&smbdev->node); @@ -186,8 +182,6 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse) psmouse_smbus_schedule_remove(smbdev->client); } - mutex_unlock(&psmouse_smbus_mutex); - psmouse->private = NULL; } @@ -219,7 +213,7 @@ void psmouse_smbus_cleanup(struct psmouse *psmouse) { struct psmouse_smbus_dev *smbdev, *tmp; - mutex_lock(&psmouse_smbus_mutex); + guard(mutex)(&psmouse_smbus_mutex); list_for_each_entry_safe(smbdev, tmp, &psmouse_smbus_list, node) { if (psmouse == smbdev->psmouse) { @@ -227,8 +221,6 @@ void psmouse_smbus_cleanup(struct psmouse *psmouse) kfree(smbdev); } } - - mutex_unlock(&psmouse_smbus_mutex); } int psmouse_smbus_init(struct psmouse *psmouse, @@ -267,9 +259,9 @@ int psmouse_smbus_init(struct psmouse *psmouse, psmouse->disconnect = psmouse_smbus_disconnect; psmouse->resync_time = 0; - mutex_lock(&psmouse_smbus_mutex); - list_add_tail(&smbdev->node, &psmouse_smbus_list); - mutex_unlock(&psmouse_smbus_mutex); + scoped_guard(mutex, &psmouse_smbus_mutex) { + list_add_tail(&smbdev->node, &psmouse_smbus_list); + } /* Bind to already existing adapters right away */ error = i2c_for_each_dev(smbdev, psmouse_smbus_create_companion); @@ -293,9 +285,9 @@ int psmouse_smbus_init(struct psmouse *psmouse, smbdev->board.platform_data = NULL; if (error < 0 || !leave_breadcrumbs) { - mutex_lock(&psmouse_smbus_mutex); - list_del(&smbdev->node); - mutex_unlock(&psmouse_smbus_mutex); + scoped_guard(mutex, &psmouse_smbus_mutex) { + list_del(&smbdev->node); + } kfree(smbdev); } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 380aa1614442..2735f86c23cc 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -650,9 +650,8 @@ static int synaptics_pt_start(struct serio *serio) struct psmouse *parent = psmouse_from_serio(serio->parent); struct synaptics_data *priv = parent->private; - serio_pause_rx(parent->ps2dev.serio); + guard(serio_pause_rx)(parent->ps2dev.serio); priv->pt_port = serio; - serio_continue_rx(parent->ps2dev.serio); return 0; } @@ -662,9 +661,8 @@ static void synaptics_pt_stop(struct serio *serio) struct psmouse *parent = psmouse_from_serio(serio->parent); struct synaptics_data *priv = parent->private; - serio_pause_rx(parent->ps2dev.serio); + guard(serio_pause_rx)(parent->ps2dev.serio); priv->pt_port = NULL; - serio_continue_rx(parent->ps2dev.serio); } static int synaptics_is_pt_packet(u8 *buf) diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 08533d1b1b16..899aee598632 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -21,7 +21,7 @@ #define SYN_QUE_EXT_MIN_COORDS 0x0f #define SYN_QUE_MEXT_CAPAB_10 0x10 -/* synatics modes */ +/* synaptics modes */ #define SYN_BIT_ABSOLUTE_MODE BIT(7) #define SYN_BIT_HIGH_RATE BIT(6) #define SYN_BIT_SLEEP_MODE BIT(3) diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c index 1e11ea30d7bd..e1157ff0f00a 100644 --- a/drivers/input/rmi4/rmi_f03.c +++ b/drivers/input/rmi4/rmi_f03.c @@ -61,14 +61,14 @@ void rmi_f03_commit_buttons(struct rmi_function *fn) struct f03_data *f03 = dev_get_drvdata(&fn->dev); struct serio *serio = f03->serio; - serio_pause_rx(serio); + guard(serio_pause_rx)(serio); + if (serio->drv) { serio->drv->interrupt(serio, PSMOUSE_OOB_EXTRA_BTNS, SERIO_OOB_DATA); serio->drv->interrupt(serio, f03->overwrite_buttons, SERIO_OOB_DATA); } - serio_continue_rx(serio); } static int rmi_f03_pt_write(struct serio *id, unsigned char val) diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index e2468bc04a5c..d760af4cc12e 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -246,7 +246,6 @@ static int rmi_f34_update_firmware(struct f34_data *f34, (const struct rmi_f34_firmware *)fw->data; u32 image_size = le32_to_cpu(syn_fw->image_size); u32 config_size = le32_to_cpu(syn_fw->config_size); - int ret; BUILD_BUG_ON(offsetof(struct rmi_f34_firmware, data) != F34_FW_IMAGE_OFFSET); @@ -267,8 +266,7 @@ static int rmi_f34_update_firmware(struct f34_data *f34, dev_err(&f34->fn->dev, "Bad firmware image: fw size %d, expected %d\n", image_size, f34->v5.fw_blocks * f34->v5.block_size); - ret = -EILSEQ; - goto out; + return -EILSEQ; } if (config_size && @@ -277,25 +275,18 @@ static int rmi_f34_update_firmware(struct f34_data *f34, "Bad firmware image: config size %d, expected %d\n", config_size, f34->v5.config_blocks * f34->v5.block_size); - ret = -EILSEQ; - goto out; + return -EILSEQ; } if (image_size && !config_size) { dev_err(&f34->fn->dev, "Bad firmware image: no config data\n"); - ret = -EILSEQ; - goto out; + return -EILSEQ; } dev_info(&f34->fn->dev, "Firmware image OK\n"); - mutex_lock(&f34->v5.flash_mutex); - - ret = rmi_f34_flash_firmware(f34, syn_fw); - mutex_unlock(&f34->v5.flash_mutex); - -out: - return ret; + guard(mutex)(&f34->v5.flash_mutex); + return rmi_f34_flash_firmware(f34, syn_fw); } static int rmi_f34_status(struct rmi_function *fn) @@ -461,9 +452,8 @@ static ssize_t rmi_driver_update_fw_store(struct device *dev, { struct rmi_driver_data *data = dev_get_drvdata(dev); char fw_name[NAME_MAX]; - const struct firmware *fw; size_t copy_count = count; - int ret; + int error; if (count == 0 || count >= NAME_MAX) return -EINVAL; @@ -474,17 +464,18 @@ static ssize_t rmi_driver_update_fw_store(struct device *dev, memcpy(fw_name, buf, copy_count); fw_name[copy_count] = '\0'; - ret = request_firmware(&fw, fw_name, dev); - if (ret) - return ret; + const struct firmware *fw __free(firmware) = NULL; + error = request_firmware(&fw, fw_name, dev); + if (error) + return error; dev_info(dev, "Flashing %s\n", fw_name); - ret = rmi_firmware_update(data, fw); - - release_firmware(fw); + error = rmi_firmware_update(data, fw); + if (error) + return error; - return ret ?: count; + return count; } static DEVICE_ATTR(update_fw, 0200, NULL, rmi_driver_update_fw_store); diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c index 611eb9fe2d04..aa445b1941e9 100644 --- a/drivers/input/serio/altera_ps2.c +++ b/drivers/input/serio/altera_ps2.c @@ -146,7 +146,7 @@ MODULE_DEVICE_TABLE(of, altera_ps2_match); */ static struct platform_driver altera_ps2_driver = { .probe = altera_ps2_probe, - .remove_new = altera_ps2_remove, + .remove = altera_ps2_remove, .driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(altera_ps2_match), diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c index 0bd6ae106809..81b3a053df81 100644 --- a/drivers/input/serio/ams_delta_serio.c +++ b/drivers/input/serio/ams_delta_serio.c @@ -182,7 +182,7 @@ static void ams_delta_serio_exit(struct platform_device *pdev) static struct platform_driver ams_delta_serio_driver = { .probe = ams_delta_serio_init, - .remove_new = ams_delta_serio_exit, + .remove = ams_delta_serio_exit, .driver = { .name = DRIVER_NAME }, diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c index 4015e75fcb90..b815337be2f4 100644 --- a/drivers/input/serio/apbps2.c +++ b/drivers/input/serio/apbps2.c @@ -208,7 +208,7 @@ static struct platform_driver apbps2_of_driver = { .of_match_table = apbps2_of_match, }, .probe = apbps2_of_probe, - .remove_new = apbps2_of_remove, + .remove = apbps2_of_remove, }; module_platform_driver(apbps2_of_driver); diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c index a9180a005872..e991c72296c9 100644 --- a/drivers/input/serio/arc_ps2.c +++ b/drivers/input/serio/arc_ps2.c @@ -260,7 +260,7 @@ static struct platform_driver arc_ps2_driver = { .of_match_table = of_match_ptr(arc_ps2_match), }, .probe = arc_ps2_probe, - .remove_new = arc_ps2_remove, + .remove = arc_ps2_remove, }; module_platform_driver(arc_ps2_driver); diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c index 6834440b37f6..053a15988c45 100644 --- a/drivers/input/serio/ct82c710.c +++ b/drivers/input/serio/ct82c710.c @@ -190,7 +190,7 @@ static struct platform_driver ct82c710_driver = { .name = "ct82c710", }, .probe = ct82c710_probe, - .remove_new = ct82c710_remove, + .remove = ct82c710_remove, }; diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index d94c01eb3fc9..4fada5bc2a38 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -145,7 +145,6 @@ static void gscps2_flush(struct gscps2port *ps2port) static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data) { - unsigned long flags; char __iomem *addr = ps2port->addr; if (!wait_TBE(addr)) { @@ -156,9 +155,8 @@ static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data) while (gscps2_readb_status(addr) & GSC_STAT_RBNE) /* wait */; - spin_lock_irqsave(&ps2port->lock, flags); - writeb(data, addr+GSC_XMTDATA); - spin_unlock_irqrestore(&ps2port->lock, flags); + scoped_guard(spinlock_irqsave, &ps2port->lock) + writeb(data, addr+GSC_XMTDATA); /* this is ugly, but due to timing of the port it seems to be necessary. */ mdelay(6); @@ -177,19 +175,19 @@ static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data) static void gscps2_enable(struct gscps2port *ps2port, int enable) { - unsigned long flags; u8 data; /* now enable/disable the port */ - spin_lock_irqsave(&ps2port->lock, flags); - gscps2_flush(ps2port); - data = gscps2_readb_control(ps2port->addr); - if (enable) - data |= GSC_CTRL_ENBL; - else - data &= ~GSC_CTRL_ENBL; - gscps2_writeb_control(data, ps2port->addr); - spin_unlock_irqrestore(&ps2port->lock, flags); + scoped_guard(spinlock_irqsave, &ps2port->lock) { + gscps2_flush(ps2port); + data = gscps2_readb_control(ps2port->addr); + if (enable) + data |= GSC_CTRL_ENBL; + else + data &= ~GSC_CTRL_ENBL; + gscps2_writeb_control(data, ps2port->addr); + } + wait_TBE(ps2port->addr); gscps2_flush(ps2port); } @@ -200,18 +198,57 @@ static void gscps2_enable(struct gscps2port *ps2port, int enable) static void gscps2_reset(struct gscps2port *ps2port) { - unsigned long flags; - /* reset the interface */ - spin_lock_irqsave(&ps2port->lock, flags); + guard(spinlock_irqsave)(&ps2port->lock); gscps2_flush(ps2port); writeb(0xff, ps2port->addr + GSC_RESET); gscps2_flush(ps2port); - spin_unlock_irqrestore(&ps2port->lock, flags); } static LIST_HEAD(ps2port_list); +static void gscps2_read_data(struct gscps2port *ps2port) +{ + u8 status; + + do { + status = gscps2_readb_status(ps2port->addr); + if (!(status & GSC_STAT_RBNE)) + break; + + ps2port->buffer[ps2port->append].str = status; + ps2port->buffer[ps2port->append].data = + gscps2_readb_input(ps2port->addr); + } while (true); +} + +static bool gscps2_report_data(struct gscps2port *ps2port) +{ + unsigned int rxflags; + u8 data, status; + + while (ps2port->act != ps2port->append) { + /* + * Did new data arrived while we read existing data ? + * If yes, exit now and let the new irq handler start + * over again. + */ + if (gscps2_readb_status(ps2port->addr) & GSC_STAT_CMPINTR) + return true; + + status = ps2port->buffer[ps2port->act].str; + data = ps2port->buffer[ps2port->act].data; + + ps2port->act = (ps2port->act + 1) & BUFFER_SIZE; + rxflags = ((status & GSC_STAT_TERR) ? SERIO_TIMEOUT : 0 ) | + ((status & GSC_STAT_PERR) ? SERIO_PARITY : 0 ); + + serio_interrupt(ps2port->port, data, rxflags); + } + + return false; +} + /** * gscps2_interrupt() - Interruption service routine * @@ -229,47 +266,18 @@ static irqreturn_t gscps2_interrupt(int irq, void *dev) struct gscps2port *ps2port; list_for_each_entry(ps2port, &ps2port_list, node) { + guard(spinlock_irqsave)(&ps2port->lock); - unsigned long flags; - spin_lock_irqsave(&ps2port->lock, flags); - - while ( (ps2port->buffer[ps2port->append].str = - gscps2_readb_status(ps2port->addr)) & GSC_STAT_RBNE ) { - ps2port->buffer[ps2port->append].data = - gscps2_readb_input(ps2port->addr); - ps2port->append = ((ps2port->append+1) & BUFFER_SIZE); - } - - spin_unlock_irqrestore(&ps2port->lock, flags); - + gscps2_read_data(ps2port); } /* list_for_each_entry */ /* all data was read from the ports - now report the data to upper layer */ - list_for_each_entry(ps2port, &ps2port_list, node) { - - while (ps2port->act != ps2port->append) { - - unsigned int rxflags; - u8 data, status; - - /* Did new data arrived while we read existing data ? - If yes, exit now and let the new irq handler start over again */ - if (gscps2_readb_status(ps2port->addr) & GSC_STAT_CMPINTR) - return IRQ_HANDLED; - - status = ps2port->buffer[ps2port->act].str; - data = ps2port->buffer[ps2port->act].data; - - ps2port->act = ((ps2port->act+1) & BUFFER_SIZE); - rxflags = ((status & GSC_STAT_TERR) ? SERIO_TIMEOUT : 0 ) | - ((status & GSC_STAT_PERR) ? SERIO_PARITY : 0 ); - - serio_interrupt(ps2port->port, data, rxflags); - - } /* while() */ - - } /* list_for_each_entry */ + if (gscps2_report_data(ps2port)) { + /* More data ready - break early to restart interrupt */ + break; + } + } return IRQ_HANDLED; } diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 31d9dacd2fd1..0ee7505427ac 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -102,7 +102,6 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev, { struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); struct synth_kbd_keystroke *ks_msg; - unsigned long flags; u32 msg_type = __le32_to_cpu(msg->header.type); u32 info; u16 scan_code; @@ -147,21 +146,22 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev, /* * Inject the information through the serio interrupt. */ - spin_lock_irqsave(&kbd_dev->lock, flags); - if (kbd_dev->started) { - if (info & IS_E0) - serio_interrupt(kbd_dev->hv_serio, - XTKBD_EMUL0, 0); - if (info & IS_E1) - serio_interrupt(kbd_dev->hv_serio, - XTKBD_EMUL1, 0); - scan_code = __le16_to_cpu(ks_msg->make_code); - if (info & IS_BREAK) - scan_code |= XTKBD_RELEASE; + scoped_guard(spinlock_irqsave, &kbd_dev->lock) { + if (kbd_dev->started) { + if (info & IS_E0) + serio_interrupt(kbd_dev->hv_serio, + XTKBD_EMUL0, 0); + if (info & IS_E1) + serio_interrupt(kbd_dev->hv_serio, + XTKBD_EMUL1, 0); + scan_code = __le16_to_cpu(ks_msg->make_code); + if (info & IS_BREAK) + scan_code |= XTKBD_RELEASE; - serio_interrupt(kbd_dev->hv_serio, scan_code, 0); + serio_interrupt(kbd_dev->hv_serio, + scan_code, 0); + } } - spin_unlock_irqrestore(&kbd_dev->lock, flags); /* * Only trigger a wakeup on key down, otherwise @@ -292,11 +292,10 @@ static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev) static int hv_kbd_start(struct serio *serio) { struct hv_kbd_dev *kbd_dev = serio->port_data; - unsigned long flags; - spin_lock_irqsave(&kbd_dev->lock, flags); + guard(spinlock_irqsave)(&kbd_dev->lock); + kbd_dev->started = true; - spin_unlock_irqrestore(&kbd_dev->lock, flags); return 0; } @@ -304,11 +303,10 @@ static int hv_kbd_start(struct serio *serio) static void hv_kbd_stop(struct serio *serio) { struct hv_kbd_dev *kbd_dev = serio->port_data; - unsigned long flags; - spin_lock_irqsave(&kbd_dev->lock, flags); + guard(spinlock_irqsave)(&kbd_dev->lock); + kbd_dev->started = false; - spin_unlock_irqrestore(&kbd_dev->lock, flags); } static int hv_kbd_probe(struct hv_device *hv_dev, diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 34d1f07ea4c3..127cfdc8668a 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -90,7 +90,7 @@ static inline void i8042_write_command(int val) * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored. * This allows entries to overwrite vendor wide quirks on a per device basis. * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR - * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries. + * and/or DMI_BOARD_VENDOR to make it easier to avoid duplicate entries. */ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { { diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index c2fda54dc384..0f97158fd14e 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -101,23 +101,15 @@ static struct platform_driver sparc_i8042_driver = { .of_match_table = sparc_i8042_match, }, .probe = sparc_i8042_probe, - .remove_new = sparc_i8042_remove, + .remove = sparc_i8042_remove, }; static bool i8042_is_mr_coffee(void) { - struct device_node *root; - const char *name; - bool is_mr_coffee; + struct device_node *root __free(device_node) = of_find_node_by_path("/"); + const char *name = of_get_property(root, "name", NULL); - root = of_find_node_by_path("/"); - - name = of_get_property(root, "name", NULL); - is_mr_coffee = name && !strcmp(name, "SUNW,JavaStation-1"); - - of_node_put(root); - - return is_mr_coffee; + return name && !strcmp(name, "SUNW,JavaStation-1"); } static int __init i8042_platform_init(void) diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 8ec4872b4471..509330a27880 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -178,7 +178,7 @@ static unsigned char i8042_suppress_kbd_ack; static struct platform_device *i8042_platform_device; static struct notifier_block i8042_kbd_bind_notifier_block; -static irqreturn_t i8042_interrupt(int irq, void *dev_id); +static bool i8042_handle_data(int irq); static bool (*i8042_platform_filter)(unsigned char data, unsigned char str, struct serio *serio); @@ -197,42 +197,26 @@ EXPORT_SYMBOL(i8042_unlock_chip); int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { - unsigned long flags; - int ret = 0; + guard(spinlock_irqsave)(&i8042_lock); - spin_lock_irqsave(&i8042_lock, flags); - - if (i8042_platform_filter) { - ret = -EBUSY; - goto out; - } + if (i8042_platform_filter) + return -EBUSY; i8042_platform_filter = filter; - -out: - spin_unlock_irqrestore(&i8042_lock, flags); - return ret; + return 0; } EXPORT_SYMBOL(i8042_install_filter); int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *port)) { - unsigned long flags; - int ret = 0; + guard(spinlock_irqsave)(&i8042_lock); - spin_lock_irqsave(&i8042_lock, flags); - - if (i8042_platform_filter != filter) { - ret = -EINVAL; - goto out; - } + if (i8042_platform_filter != filter) + return -EINVAL; i8042_platform_filter = NULL; - -out: - spin_unlock_irqrestore(&i8042_lock, flags); - return ret; + return 0; } EXPORT_SYMBOL(i8042_remove_filter); @@ -271,28 +255,22 @@ static int i8042_wait_write(void) static int i8042_flush(void) { - unsigned long flags; unsigned char data, str; int count = 0; - int retval = 0; - spin_lock_irqsave(&i8042_lock, flags); + guard(spinlock_irqsave)(&i8042_lock); while ((str = i8042_read_status()) & I8042_STR_OBF) { - if (count++ < I8042_BUFFER_SIZE) { - udelay(50); - data = i8042_read_data(); - dbg("%02x <- i8042 (flush, %s)\n", - data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); - } else { - retval = -EIO; - break; - } - } + if (count++ >= I8042_BUFFER_SIZE) + return -EIO; - spin_unlock_irqrestore(&i8042_lock, flags); + udelay(50); + data = i8042_read_data(); + dbg("%02x <- i8042 (flush, %s)\n", + data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); + } - return retval; + return 0; } /* @@ -349,17 +327,12 @@ static int __i8042_command(unsigned char *param, int command) int i8042_command(unsigned char *param, int command) { - unsigned long flags; - int retval; - if (!i8042_present) return -1; - spin_lock_irqsave(&i8042_lock, flags); - retval = __i8042_command(param, command); - spin_unlock_irqrestore(&i8042_lock, flags); + guard(spinlock_irqsave)(&i8042_lock); - return retval; + return __i8042_command(param, command); } EXPORT_SYMBOL(i8042_command); @@ -369,19 +342,18 @@ EXPORT_SYMBOL(i8042_command); static int i8042_kbd_write(struct serio *port, unsigned char c) { - unsigned long flags; - int retval = 0; + int error; - spin_lock_irqsave(&i8042_lock, flags); + guard(spinlock_irqsave)(&i8042_lock); - if (!(retval = i8042_wait_write())) { - dbg("%02x -> i8042 (kbd-data)\n", c); - i8042_write_data(c); - } + error = i8042_wait_write(); + if (error) + return error; - spin_unlock_irqrestore(&i8042_lock, flags); + dbg("%02x -> i8042 (kbd-data)\n", c); + i8042_write_data(c); - return retval; + return 0; } /* @@ -434,7 +406,7 @@ static void i8042_port_close(struct serio *serio) * See if there is any data appeared while we were messing with * port state. */ - i8042_interrupt(0, NULL); + i8042_handle_data(0); } /* @@ -460,9 +432,8 @@ static int i8042_start(struct serio *serio) device_set_wakeup_enable(&serio->dev, true); } - spin_lock_irq(&i8042_lock); + guard(spinlock_irq)(&i8042_lock); port->exists = true; - spin_unlock_irq(&i8042_lock); return 0; } @@ -476,10 +447,10 @@ static void i8042_stop(struct serio *serio) { struct i8042_port *port = serio->port_data; - spin_lock_irq(&i8042_lock); - port->exists = false; - port->serio = NULL; - spin_unlock_irq(&i8042_lock); + scoped_guard(spinlock_irq, &i8042_lock) { + port->exists = false; + port->serio = NULL; + } /* * We need to make sure that interrupt handler finishes using @@ -518,44 +489,10 @@ static bool i8042_filter(unsigned char data, unsigned char str, } /* - * i8042_interrupt() is the most important function in this driver - - * it handles the interrupts from the i8042, and sends incoming bytes - * to the upper layers. - */ - -static irqreturn_t i8042_interrupt(int irq, void *dev_id) -{ - struct i8042_port *port; - struct serio *serio; - unsigned long flags; - unsigned char str, data; - unsigned int dfl; - unsigned int port_no; - bool filtered; - int ret = 1; - - spin_lock_irqsave(&i8042_lock, flags); - - str = i8042_read_status(); - if (unlikely(~str & I8042_STR_OBF)) { - spin_unlock_irqrestore(&i8042_lock, flags); - if (irq) - dbg("Interrupt %d, without any data\n", irq); - ret = 0; - goto out; - } - - data = i8042_read_data(); - - if (i8042_mux_present && (str & I8042_STR_AUXDATA)) { - static unsigned long last_transmit; - static unsigned char last_str; - - dfl = 0; - if (str & I8042_STR_MUXERR) { - dbg("MUX error, status is %02x, data is %02x\n", - str, data); -/* + * i8042_handle_mux() handles case when data is coming from one of + * the multiplexed ports. It would be simple if not for quirks with + * handling errors: + * * When MUXERR condition is signalled the data register can only contain * 0xfd, 0xfe or 0xff if implementation follows the spec. Unfortunately * it is not always the case. Some KBCs also report 0xfc when there is @@ -567,50 +504,106 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) * rest assume that the data came from the same serio last byte * was transmitted (if transmission happened not too long ago). */ - - switch (data) { - default: - if (time_before(jiffies, last_transmit + HZ/10)) { - str = last_str; - break; - } - fallthrough; /* report timeout */ - case 0xfc: - case 0xfd: - case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break; - case 0xff: dfl = SERIO_PARITY; data = 0xfe; break; +static int i8042_handle_mux(u8 str, u8 *data, unsigned int *dfl) +{ + static unsigned long last_transmit; + static unsigned long last_port; + unsigned int mux_port; + + mux_port = (str >> 6) & 3; + *dfl = 0; + + if (str & I8042_STR_MUXERR) { + dbg("MUX error, status is %02x, data is %02x\n", + str, *data); + + switch (*data) { + default: + if (time_before(jiffies, last_transmit + HZ/10)) { + mux_port = last_port; + break; } + fallthrough; /* report timeout */ + case 0xfc: + case 0xfd: + case 0xfe: + *dfl = SERIO_TIMEOUT; + *data = 0xfe; + break; + case 0xff: + *dfl = SERIO_PARITY; + *data = 0xfe; + break; } + } - port_no = I8042_MUX_PORT_NO + ((str >> 6) & 3); - last_str = str; - last_transmit = jiffies; - } else { + last_port = mux_port; + last_transmit = jiffies; - dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) | - ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0); + return I8042_MUX_PORT_NO + mux_port; +} - port_no = (str & I8042_STR_AUXDATA) ? - I8042_AUX_PORT_NO : I8042_KBD_PORT_NO; - } +/* + * i8042_handle_data() is the most important function in this driver - + * it reads the data from the i8042, determines its destination serio + * port, and sends received byte to the upper layers. + * + * Returns true if there was data waiting, false otherwise. + */ +static bool i8042_handle_data(int irq) +{ + struct i8042_port *port; + struct serio *serio; + unsigned char str, data; + unsigned int dfl; + unsigned int port_no; + bool filtered; + + scoped_guard(spinlock_irqsave, &i8042_lock) { + str = i8042_read_status(); + if (unlikely(~str & I8042_STR_OBF)) + return false; + + data = i8042_read_data(); + + if (i8042_mux_present && (str & I8042_STR_AUXDATA)) { + port_no = i8042_handle_mux(str, &data, &dfl); + } else { + + dfl = (str & I8042_STR_PARITY) ? SERIO_PARITY : 0; + if ((str & I8042_STR_TIMEOUT) && !i8042_notimeout) + dfl |= SERIO_TIMEOUT; - port = &i8042_ports[port_no]; - serio = port->exists ? port->serio : NULL; + port_no = (str & I8042_STR_AUXDATA) ? + I8042_AUX_PORT_NO : I8042_KBD_PORT_NO; + } - filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n", - port_no, irq, - dfl & SERIO_PARITY ? ", bad parity" : "", - dfl & SERIO_TIMEOUT ? ", timeout" : ""); + port = &i8042_ports[port_no]; + serio = port->exists ? port->serio : NULL; - filtered = i8042_filter(data, str, serio); + filter_dbg(port->driver_bound, + data, "<- i8042 (interrupt, %d, %d%s%s)\n", + port_no, irq, + dfl & SERIO_PARITY ? ", bad parity" : "", + dfl & SERIO_TIMEOUT ? ", timeout" : ""); - spin_unlock_irqrestore(&i8042_lock, flags); + filtered = i8042_filter(data, str, serio); + } if (likely(serio && !filtered)) serio_interrupt(serio, data, dfl); - out: - return IRQ_RETVAL(ret); + return true; +} + +static irqreturn_t i8042_interrupt(int irq, void *dev_id) +{ + if (unlikely(!i8042_handle_data(irq))) { + dbg("Interrupt %d, without any data\n", irq); + return IRQ_NONE; + } + + return IRQ_HANDLED; } /* @@ -753,24 +746,22 @@ static bool i8042_irq_being_tested; static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id) { - unsigned long flags; unsigned char str, data; - int ret = 0; - spin_lock_irqsave(&i8042_lock, flags); + guard(spinlock_irqsave)(&i8042_lock); + str = i8042_read_status(); - if (str & I8042_STR_OBF) { - data = i8042_read_data(); - dbg("%02x <- i8042 (aux_test_irq, %s)\n", - data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); - if (i8042_irq_being_tested && - data == 0xa5 && (str & I8042_STR_AUXDATA)) - complete(&i8042_aux_irq_delivered); - ret = 1; - } - spin_unlock_irqrestore(&i8042_lock, flags); + if (!(str & I8042_STR_OBF)) + return IRQ_NONE; + + data = i8042_read_data(); + dbg("%02x <- i8042 (aux_test_irq, %s)\n", + data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); + + if (i8042_irq_being_tested && data == 0xa5 && (str & I8042_STR_AUXDATA)) + complete(&i8042_aux_irq_delivered); - return IRQ_RETVAL(ret); + return IRQ_HANDLED; } /* @@ -811,7 +802,6 @@ static int i8042_check_aux(void) int retval = -1; bool irq_registered = false; bool aux_loop_broken = false; - unsigned long flags; unsigned char param; /* @@ -895,18 +885,15 @@ static int i8042_check_aux(void) if (i8042_enable_aux_port()) goto out; - spin_lock_irqsave(&i8042_lock, flags); - - init_completion(&i8042_aux_irq_delivered); - i8042_irq_being_tested = true; + scoped_guard(spinlock_irqsave, &i8042_lock) { + init_completion(&i8042_aux_irq_delivered); + i8042_irq_being_tested = true; - param = 0xa5; - retval = __i8042_command(¶m, I8042_CMD_AUX_LOOP & 0xf0ff); - - spin_unlock_irqrestore(&i8042_lock, flags); - - if (retval) - goto out; + param = 0xa5; + retval = __i8042_command(¶m, I8042_CMD_AUX_LOOP & 0xf0ff); + if (retval) + goto out; + } if (wait_for_completion_timeout(&i8042_aux_irq_delivered, msecs_to_jiffies(250)) == 0) { @@ -994,7 +981,6 @@ static int i8042_controller_selftest(void) static int i8042_controller_init(void) { - unsigned long flags; int n = 0; unsigned char ctr[2]; @@ -1031,14 +1017,14 @@ static int i8042_controller_init(void) * Handle keylock. */ - spin_lock_irqsave(&i8042_lock, flags); - if (~i8042_read_status() & I8042_STR_KEYLOCK) { - if (i8042_unlock) - i8042_ctr |= I8042_CTR_IGNKEYLOCK; - else - pr_warn("Warning: Keylock active\n"); + scoped_guard(spinlock_irqsave, &i8042_lock) { + if (~i8042_read_status() & I8042_STR_KEYLOCK) { + if (i8042_unlock) + i8042_ctr |= I8042_CTR_IGNKEYLOCK; + else + pr_warn("Warning: Keylock active\n"); + } } - spin_unlock_irqrestore(&i8042_lock, flags); /* * If the chip is configured into nontranslated mode by the BIOS, don't @@ -1216,13 +1202,14 @@ static int i8042_controller_resume(bool s2r_wants_reset) if (i8042_mux_present) { if (i8042_set_mux_mode(true, NULL) || i8042_enable_mux_ports()) pr_warn("failed to resume active multiplexor, mouse won't work\n"); - } else if (i8042_ports[I8042_AUX_PORT_NO].serio) + } else if (i8042_ports[I8042_AUX_PORT_NO].serio) { i8042_enable_aux_port(); + } if (i8042_ports[I8042_KBD_PORT_NO].serio) i8042_enable_kbd_port(); - i8042_interrupt(0, NULL); + i8042_handle_data(0); return 0; } @@ -1253,7 +1240,7 @@ static int i8042_pm_suspend(struct device *dev) static int i8042_pm_resume_noirq(struct device *dev) { if (i8042_forcenorestore || !pm_resume_via_firmware()) - i8042_interrupt(0, NULL); + i8042_handle_data(0); return 0; } @@ -1290,7 +1277,7 @@ static int i8042_pm_resume(struct device *dev) static int i8042_pm_thaw(struct device *dev) { - i8042_interrupt(0, NULL); + i8042_handle_data(0); return 0; } @@ -1603,7 +1590,7 @@ static struct platform_driver i8042_driver = { #endif }, .probe = i8042_probe, - .remove_new = i8042_remove, + .remove = i8042_remove, .shutdown = i8042_shutdown, }; diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c index 676b0bda3d72..d2c7ffb9a946 100644 --- a/drivers/input/serio/ioc3kbd.c +++ b/drivers/input/serio/ioc3kbd.c @@ -208,7 +208,7 @@ MODULE_DEVICE_TABLE(platform, ioc3kbd_id_table); static struct platform_driver ioc3kbd_driver = { .probe = ioc3kbd_probe, - .remove_new = ioc3kbd_remove, + .remove = ioc3kbd_remove, .id_table = ioc3kbd_id_table, .driver = { .name = "ioc3-kbd", diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 6d78a1fe00c1..c22ea532276e 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -108,13 +108,11 @@ int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout) { int retval; - serio_pause_rx(ps2dev->serio); + guard(serio_pause_rx)(ps2dev->serio); retval = ps2_do_sendbyte(ps2dev, byte, timeout, 1); dev_dbg(&ps2dev->serio->dev, "%02x - %x\n", byte, ps2dev->nak); - serio_continue_rx(ps2dev->serio); - return retval; } EXPORT_SYMBOL(ps2_sendbyte); @@ -162,10 +160,10 @@ void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout) ps2_begin_command(ps2dev); - serio_pause_rx(ps2dev->serio); - ps2dev->flags = PS2_FLAG_CMD; - ps2dev->cmdcnt = maxbytes; - serio_continue_rx(ps2dev->serio); + scoped_guard(serio_pause_rx, ps2dev->serio) { + ps2dev->flags = PS2_FLAG_CMD; + ps2dev->cmdcnt = maxbytes; + } wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD), @@ -224,9 +222,9 @@ static int ps2_adjust_timeout(struct ps2dev *ps2dev, * use alternative probe to detect it. */ if (ps2dev->cmdbuf[1] == 0xaa) { - serio_pause_rx(ps2dev->serio); - ps2dev->flags = 0; - serio_continue_rx(ps2dev->serio); + scoped_guard(serio_pause_rx, ps2dev->serio) + ps2dev->flags = 0; + timeout = 0; } @@ -235,9 +233,9 @@ static int ps2_adjust_timeout(struct ps2dev *ps2dev, * won't be 2nd byte of ID response. */ if (!ps2_is_keyboard_id(ps2dev->cmdbuf[1])) { - serio_pause_rx(ps2dev->serio); - ps2dev->flags = ps2dev->cmdcnt = 0; - serio_continue_rx(ps2dev->serio); + scoped_guard(serio_pause_rx, ps2dev->serio) + ps2dev->flags = ps2dev->cmdcnt = 0; + timeout = 0; } break; @@ -283,6 +281,10 @@ int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command) memcpy(send_param, param, send); + /* + * Not using guard notation because we need to break critical + * section below while waiting for the response. + */ serio_pause_rx(ps2dev->serio); ps2dev->cmdcnt = receive; diff --git a/drivers/input/serio/maceps2.c b/drivers/input/serio/maceps2.c index 42ac1eb94866..3d28a5cddd61 100644 --- a/drivers/input/serio/maceps2.c +++ b/drivers/input/serio/maceps2.c @@ -159,7 +159,7 @@ static struct platform_driver maceps2_driver = { .name = "maceps2", }, .probe = maceps2_probe, - .remove_new = maceps2_remove, + .remove = maceps2_remove, }; static int __init maceps2_init(void) diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c index 0ad95e880cc2..a24324830021 100644 --- a/drivers/input/serio/olpc_apsp.c +++ b/drivers/input/serio/olpc_apsp.c @@ -256,7 +256,7 @@ MODULE_DEVICE_TABLE(of, olpc_apsp_dt_ids); static struct platform_driver olpc_apsp_driver = { .probe = olpc_apsp_probe, - .remove_new = olpc_apsp_remove, + .remove = olpc_apsp_remove, .driver = { .name = "olpc-apsp", .of_match_table = olpc_apsp_dt_ids, diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index 3a431395c464..93769910ce24 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c @@ -133,12 +133,12 @@ static int ps2_gpio_write(struct serio *serio, unsigned char val) int ret = 0; if (in_task()) { - mutex_lock(&drvdata->tx.mutex); + guard(mutex)(&drvdata->tx.mutex); + __ps2_gpio_write(serio, val); if (!wait_for_completion_timeout(&drvdata->tx.complete, msecs_to_jiffies(10000))) ret = SERIO_TIMEOUT; - mutex_unlock(&drvdata->tx.mutex); } else { __ps2_gpio_write(serio, val); } @@ -491,7 +491,7 @@ MODULE_DEVICE_TABLE(of, ps2_gpio_match); static struct platform_driver ps2_gpio_driver = { .probe = ps2_gpio_probe, - .remove_new = ps2_gpio_remove, + .remove = ps2_gpio_remove, .driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(ps2_gpio_match), diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c index 937ecdea491d..b96cee52fc52 100644 --- a/drivers/input/serio/ps2mult.c +++ b/drivers/input/serio/ps2mult.c @@ -76,9 +76,8 @@ static int ps2mult_serio_write(struct serio *serio, unsigned char data) struct ps2mult *psm = serio_get_drvdata(mx_port); struct ps2mult_port *port = serio->port_data; bool need_escape; - unsigned long flags; - spin_lock_irqsave(&psm->lock, flags); + guard(spinlock_irqsave)(&psm->lock); if (psm->out_port != port) ps2mult_select_port(psm, port); @@ -93,8 +92,6 @@ static int ps2mult_serio_write(struct serio *serio, unsigned char data) serio_write(mx_port, data); - spin_unlock_irqrestore(&psm->lock, flags); - return 0; } @@ -102,11 +99,10 @@ static int ps2mult_serio_start(struct serio *serio) { struct ps2mult *psm = serio_get_drvdata(serio->parent); struct ps2mult_port *port = serio->port_data; - unsigned long flags; - spin_lock_irqsave(&psm->lock, flags); + guard(spinlock_irqsave)(&psm->lock); + port->registered = true; - spin_unlock_irqrestore(&psm->lock, flags); return 0; } @@ -115,11 +111,10 @@ static void ps2mult_serio_stop(struct serio *serio) { struct ps2mult *psm = serio_get_drvdata(serio->parent); struct ps2mult_port *port = serio->port_data; - unsigned long flags; - spin_lock_irqsave(&psm->lock, flags); + guard(spinlock_irqsave)(&psm->lock); + port->registered = false; - spin_unlock_irqrestore(&psm->lock, flags); } static int ps2mult_create_port(struct ps2mult *psm, int i) @@ -148,16 +143,12 @@ static int ps2mult_create_port(struct ps2mult *psm, int i) static void ps2mult_reset(struct ps2mult *psm) { - unsigned long flags; - - spin_lock_irqsave(&psm->lock, flags); + guard(spinlock_irqsave)(&psm->lock); serio_write(psm->mx_serio, PS2MULT_SESSION_END); serio_write(psm->mx_serio, PS2MULT_SESSION_START); ps2mult_select_port(psm, &psm->ports[PS2MULT_KBD_PORT]); - - spin_unlock_irqrestore(&psm->lock, flags); } static int ps2mult_connect(struct serio *serio, struct serio_driver *drv) @@ -234,11 +225,10 @@ static irqreturn_t ps2mult_interrupt(struct serio *serio, { struct ps2mult *psm = serio_get_drvdata(serio); struct ps2mult_port *in_port; - unsigned long flags; dev_dbg(&serio->dev, "Received %02x flags %02x\n", data, dfl); - spin_lock_irqsave(&psm->lock, flags); + guard(spinlock_irqsave)(&psm->lock); if (psm->escape) { psm->escape = false; @@ -285,7 +275,6 @@ static irqreturn_t ps2mult_interrupt(struct serio *serio, } out: - spin_unlock_irqrestore(&psm->lock, flags); return IRQ_HANDLED; } diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c index cd4d5be946a3..ae55c4de092f 100644 --- a/drivers/input/serio/q40kbd.c +++ b/drivers/input/serio/q40kbd.c @@ -39,17 +39,14 @@ struct q40kbd { static irqreturn_t q40kbd_interrupt(int irq, void *dev_id) { struct q40kbd *q40kbd = dev_id; - unsigned long flags; - spin_lock_irqsave(&q40kbd->lock, flags); + guard(spinlock_irqsave)(&q40kbd->lock); if (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG)) serio_interrupt(q40kbd->port, master_inb(KEYCODE_REG), 0); master_outb(-1, KEYBOARD_UNLOCK_REG); - spin_unlock_irqrestore(&q40kbd->lock, flags); - return IRQ_HANDLED; } @@ -60,14 +57,11 @@ static irqreturn_t q40kbd_interrupt(int irq, void *dev_id) static void q40kbd_flush(struct q40kbd *q40kbd) { int maxread = 100; - unsigned long flags; - spin_lock_irqsave(&q40kbd->lock, flags); + guard(spinlock_irqsave)(&q40kbd->lock); while (maxread-- && (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG))) master_inb(KEYCODE_REG); - - spin_unlock_irqrestore(&q40kbd->lock, flags); } static void q40kbd_stop(void) @@ -166,7 +160,7 @@ static struct platform_driver q40kbd_driver = { .driver = { .name = "q40kbd", }, - .remove_new = q40kbd_remove, + .remove = q40kbd_remove, }; module_platform_driver_probe(q40kbd_driver, q40kbd_probe); diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c index e236bb7e1014..c65c552b0c45 100644 --- a/drivers/input/serio/rpckbd.c +++ b/drivers/input/serio/rpckbd.c @@ -144,7 +144,7 @@ static void rpckbd_remove(struct platform_device *dev) static struct platform_driver rpckbd_driver = { .probe = rpckbd_probe, - .remove_new = rpckbd_remove, + .remove = rpckbd_remove, .driver = { .name = "kart", }, diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c index 1311caf7dba4..375c6f5f905c 100644 --- a/drivers/input/serio/sa1111ps2.c +++ b/drivers/input/serio/sa1111ps2.c @@ -92,7 +92,8 @@ static irqreturn_t ps2_txint(int irq, void *dev_id) struct ps2if *ps2if = dev_id; unsigned int status; - spin_lock(&ps2if->lock); + guard(spinlock)(&ps2if->lock); + status = readl_relaxed(ps2if->base + PS2STAT); if (ps2if->head == ps2if->tail) { disable_irq_nosync(irq); @@ -101,7 +102,6 @@ static irqreturn_t ps2_txint(int irq, void *dev_id) writel_relaxed(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA); ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1); } - spin_unlock(&ps2if->lock); return IRQ_HANDLED; } @@ -113,10 +113,9 @@ static irqreturn_t ps2_txint(int irq, void *dev_id) static int ps2_write(struct serio *io, unsigned char val) { struct ps2if *ps2if = io->port_data; - unsigned long flags; unsigned int head; - spin_lock_irqsave(&ps2if->lock, flags); + guard(spinlock_irqsave)(&ps2if->lock); /* * If the TX register is empty, we can go straight out. @@ -133,7 +132,6 @@ static int ps2_write(struct serio *io, unsigned char val) } } - spin_unlock_irqrestore(&ps2if->lock, flags); return 0; } diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 97d8eacb9112..4468018cef66 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -38,33 +38,27 @@ static void serio_attach_driver(struct serio_driver *drv); static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) { - int retval; - - mutex_lock(&serio->drv_mutex); - retval = drv->connect(serio, drv); - mutex_unlock(&serio->drv_mutex); + guard(mutex)(&serio->drv_mutex); - return retval; + return drv->connect(serio, drv); } static int serio_reconnect_driver(struct serio *serio) { - int retval = -1; + guard(mutex)(&serio->drv_mutex); - mutex_lock(&serio->drv_mutex); if (serio->drv && serio->drv->reconnect) - retval = serio->drv->reconnect(serio); - mutex_unlock(&serio->drv_mutex); + return serio->drv->reconnect(serio); - return retval; + return -1; } static void serio_disconnect_driver(struct serio *serio) { - mutex_lock(&serio->drv_mutex); + guard(mutex)(&serio->drv_mutex); + if (serio->drv) serio->drv->disconnect(serio); - mutex_unlock(&serio->drv_mutex); } static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) @@ -147,9 +141,8 @@ static LIST_HEAD(serio_event_list); static struct serio_event *serio_get_event(void) { struct serio_event *event = NULL; - unsigned long flags; - spin_lock_irqsave(&serio_event_lock, flags); + guard(spinlock_irqsave)(&serio_event_lock); if (!list_empty(&serio_event_list)) { event = list_first_entry(&serio_event_list, @@ -157,7 +150,6 @@ static struct serio_event *serio_get_event(void) list_del_init(&event->node); } - spin_unlock_irqrestore(&serio_event_lock, flags); return event; } @@ -171,9 +163,8 @@ static void serio_remove_duplicate_events(void *object, enum serio_event_type type) { struct serio_event *e, *next; - unsigned long flags; - spin_lock_irqsave(&serio_event_lock, flags); + guard(spinlock_irqsave)(&serio_event_lock); list_for_each_entry_safe(e, next, &serio_event_list, node) { if (object == e->object) { @@ -189,15 +180,13 @@ static void serio_remove_duplicate_events(void *object, serio_free_event(e); } } - - spin_unlock_irqrestore(&serio_event_lock, flags); } static void serio_handle_event(struct work_struct *work) { struct serio_event *event; - mutex_lock(&serio_mutex); + guard(mutex)(&serio_mutex); while ((event = serio_get_event())) { @@ -228,8 +217,6 @@ static void serio_handle_event(struct work_struct *work) serio_remove_duplicate_events(event->object, event->type); serio_free_event(event); } - - mutex_unlock(&serio_mutex); } static DECLARE_WORK(serio_event_work, serio_handle_event); @@ -237,11 +224,9 @@ static DECLARE_WORK(serio_event_work, serio_handle_event); static int serio_queue_event(void *object, struct module *owner, enum serio_event_type event_type) { - unsigned long flags; struct serio_event *event; - int retval = 0; - spin_lock_irqsave(&serio_event_lock, flags); + guard(spinlock_irqsave)(&serio_event_lock); /* * Scan event list for the other events for the same serio port, @@ -253,7 +238,7 @@ static int serio_queue_event(void *object, struct module *owner, list_for_each_entry_reverse(event, &serio_event_list, node) { if (event->object == object) { if (event->type == event_type) - goto out; + return 0; break; } } @@ -261,16 +246,14 @@ static int serio_queue_event(void *object, struct module *owner, event = kmalloc(sizeof(*event), GFP_ATOMIC); if (!event) { pr_err("Not enough memory to queue event %d\n", event_type); - retval = -ENOMEM; - goto out; + return -ENOMEM; } if (!try_module_get(owner)) { pr_warn("Can't get module reference, dropping event %d\n", event_type); kfree(event); - retval = -EINVAL; - goto out; + return -EINVAL; } event->type = event_type; @@ -280,9 +263,7 @@ static int serio_queue_event(void *object, struct module *owner, list_add_tail(&event->node, &serio_event_list); queue_work(system_long_wq, &serio_event_work); -out: - spin_unlock_irqrestore(&serio_event_lock, flags); - return retval; + return 0; } /* @@ -292,9 +273,8 @@ out: static void serio_remove_pending_events(void *object) { struct serio_event *event, *next; - unsigned long flags; - spin_lock_irqsave(&serio_event_lock, flags); + guard(spinlock_irqsave)(&serio_event_lock); list_for_each_entry_safe(event, next, &serio_event_list, node) { if (event->object == object) { @@ -302,8 +282,6 @@ static void serio_remove_pending_events(void *object) serio_free_event(event); } } - - spin_unlock_irqrestore(&serio_event_lock, flags); } /* @@ -315,23 +293,19 @@ static void serio_remove_pending_events(void *object) static struct serio *serio_get_pending_child(struct serio *parent) { struct serio_event *event; - struct serio *serio, *child = NULL; - unsigned long flags; + struct serio *serio; - spin_lock_irqsave(&serio_event_lock, flags); + guard(spinlock_irqsave)(&serio_event_lock); list_for_each_entry(event, &serio_event_list, node) { if (event->type == SERIO_REGISTER_PORT) { serio = event->object; - if (serio->parent == parent) { - child = serio; - break; - } + if (serio->parent == parent) + return serio; } } - spin_unlock_irqrestore(&serio_event_lock, flags); - return child; + return NULL; } /* @@ -382,29 +356,27 @@ static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, c struct device_driver *drv; int error; - error = mutex_lock_interruptible(&serio_mutex); - if (error) - return error; - - if (!strncmp(buf, "none", count)) { - serio_disconnect_port(serio); - } else if (!strncmp(buf, "reconnect", count)) { - serio_reconnect_subtree(serio); - } else if (!strncmp(buf, "rescan", count)) { - serio_disconnect_port(serio); - serio_find_driver(serio); - serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); - } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { - serio_disconnect_port(serio); - error = serio_bind_driver(serio, to_serio_driver(drv)); - serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); - } else { - error = -EINVAL; + scoped_cond_guard(mutex_intr, return -EINTR, &serio_mutex) { + if (!strncmp(buf, "none", count)) { + serio_disconnect_port(serio); + } else if (!strncmp(buf, "reconnect", count)) { + serio_reconnect_subtree(serio); + } else if (!strncmp(buf, "rescan", count)) { + serio_disconnect_port(serio); + serio_find_driver(serio); + serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); + } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { + serio_disconnect_port(serio); + error = serio_bind_driver(serio, to_serio_driver(drv)); + serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); + if (error) + return error; + } else { + return -EINVAL; + } } - mutex_unlock(&serio_mutex); - - return error ? error : count; + return count; } static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) @@ -526,9 +498,9 @@ static void serio_add_port(struct serio *serio) int error; if (parent) { - serio_pause_rx(parent); + guard(serio_pause_rx)(parent); + list_add_tail(&serio->child_node, &parent->children); - serio_continue_rx(parent); } list_add_tail(&serio->node, &serio_list); @@ -560,9 +532,9 @@ static void serio_destroy_port(struct serio *serio) serio->stop(serio); if (serio->parent) { - serio_pause_rx(serio->parent); + guard(serio_pause_rx)(serio->parent); + list_del_init(&serio->child_node); - serio_continue_rx(serio->parent); serio->parent = NULL; } @@ -701,10 +673,10 @@ EXPORT_SYMBOL(__serio_register_port); */ void serio_unregister_port(struct serio *serio) { - mutex_lock(&serio_mutex); + guard(mutex)(&serio_mutex); + serio_disconnect_port(serio); serio_destroy_port(serio); - mutex_unlock(&serio_mutex); } EXPORT_SYMBOL(serio_unregister_port); @@ -715,12 +687,12 @@ void serio_unregister_child_port(struct serio *serio) { struct serio *s, *next; - mutex_lock(&serio_mutex); + guard(mutex)(&serio_mutex); + list_for_each_entry_safe(s, next, &serio->children, child_node) { serio_disconnect_port(s); serio_destroy_port(s); } - mutex_unlock(&serio_mutex); } EXPORT_SYMBOL(serio_unregister_child_port); @@ -784,10 +756,10 @@ static void serio_driver_remove(struct device *dev) static void serio_cleanup(struct serio *serio) { - mutex_lock(&serio->drv_mutex); + guard(mutex)(&serio->drv_mutex); + if (serio->drv && serio->drv->cleanup) serio->drv->cleanup(serio); - mutex_unlock(&serio->drv_mutex); } static void serio_shutdown(struct device *dev) @@ -850,7 +822,7 @@ void serio_unregister_driver(struct serio_driver *drv) { struct serio *serio; - mutex_lock(&serio_mutex); + guard(mutex)(&serio_mutex); drv->manual_bind = true; /* so serio_find_driver ignores it */ serio_remove_pending_events(drv); @@ -866,15 +838,14 @@ start_over: } driver_unregister(&drv->driver); - mutex_unlock(&serio_mutex); } EXPORT_SYMBOL(serio_unregister_driver); static void serio_set_drv(struct serio *serio, struct serio_driver *drv) { - serio_pause_rx(serio); + guard(serio_pause_rx)(serio); + serio->drv = drv; - serio_continue_rx(serio); } static int serio_bus_match(struct device *dev, const struct device_driver *drv) @@ -935,14 +906,14 @@ static int serio_resume(struct device *dev) struct serio *serio = to_serio_port(dev); int error = -ENOENT; - mutex_lock(&serio->drv_mutex); - if (serio->drv && serio->drv->fast_reconnect) { - error = serio->drv->fast_reconnect(serio); - if (error && error != -ENOENT) - dev_warn(dev, "fast reconnect failed with error %d\n", - error); + scoped_guard(mutex, &serio->drv_mutex) { + if (serio->drv && serio->drv->fast_reconnect) { + error = serio->drv->fast_reconnect(serio); + if (error && error != -ENOENT) + dev_warn(dev, "fast reconnect failed with error %d\n", + error); + } } - mutex_unlock(&serio->drv_mutex); if (error) { /* @@ -989,21 +960,17 @@ EXPORT_SYMBOL(serio_close); irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int dfl) { - unsigned long flags; - irqreturn_t ret = IRQ_NONE; + guard(spinlock_irqsave)(&serio->lock); - spin_lock_irqsave(&serio->lock, flags); + if (likely(serio->drv)) + return serio->drv->interrupt(serio, data, dfl); - if (likely(serio->drv)) { - ret = serio->drv->interrupt(serio, data, dfl); - } else if (!dfl && device_is_registered(&serio->dev)) { + if (!dfl && device_is_registered(&serio->dev)) { serio_rescan(serio); - ret = IRQ_HANDLED; + return IRQ_HANDLED; } - spin_unlock_irqrestore(&serio->lock, flags); - - return ret; + return IRQ_NONE; } EXPORT_SYMBOL(serio_interrupt); diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 0186d1b38f49..4d6395088986 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -29,7 +29,7 @@ struct serio_raw { unsigned char queue[SERIO_RAW_QUEUE_LEN]; unsigned int tail, head; - char name[16]; + char name[20]; struct kref kref; struct serio *serio; struct miscdevice dev; @@ -75,41 +75,31 @@ static int serio_raw_open(struct inode *inode, struct file *file) { struct serio_raw *serio_raw; struct serio_raw_client *client; - int retval; - retval = mutex_lock_interruptible(&serio_raw_mutex); - if (retval) - return retval; + scoped_guard(mutex_intr, &serio_raw_mutex) { + serio_raw = serio_raw_locate(iminor(inode)); + if (!serio_raw) + return -ENODEV; - serio_raw = serio_raw_locate(iminor(inode)); - if (!serio_raw) { - retval = -ENODEV; - goto out; - } + if (serio_raw->dead) + return -ENODEV; - if (serio_raw->dead) { - retval = -ENODEV; - goto out; - } + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) { - retval = -ENOMEM; - goto out; - } + client->serio_raw = serio_raw; + file->private_data = client; - client->serio_raw = serio_raw; - file->private_data = client; + kref_get(&serio_raw->kref); - kref_get(&serio_raw->kref); + scoped_guard(serio_pause_rx, serio_raw->serio) + list_add_tail(&client->node, &serio_raw->client_list); - serio_pause_rx(serio_raw->serio); - list_add_tail(&client->node, &serio_raw->client_list); - serio_continue_rx(serio_raw->serio); + return 0; + } -out: - mutex_unlock(&serio_raw_mutex); - return retval; + return -EINTR; } static void serio_raw_free(struct kref *kref) @@ -126,9 +116,8 @@ static int serio_raw_release(struct inode *inode, struct file *file) struct serio_raw_client *client = file->private_data; struct serio_raw *serio_raw = client->serio_raw; - serio_pause_rx(serio_raw->serio); - list_del(&client->node); - serio_continue_rx(serio_raw->serio); + scoped_guard(serio_pause_rx, serio_raw->serio) + list_del(&client->node); kfree(client); @@ -139,19 +128,15 @@ static int serio_raw_release(struct inode *inode, struct file *file) static bool serio_raw_fetch_byte(struct serio_raw *serio_raw, char *c) { - bool empty; + guard(serio_pause_rx)(serio_raw->serio); - serio_pause_rx(serio_raw->serio); - - empty = serio_raw->head == serio_raw->tail; - if (!empty) { - *c = serio_raw->queue[serio_raw->tail]; - serio_raw->tail = (serio_raw->tail + 1) % SERIO_RAW_QUEUE_LEN; - } + if (serio_raw->head == serio_raw->tail) + return false; /* queue is empty */ - serio_continue_rx(serio_raw->serio); + *c = serio_raw->queue[serio_raw->tail]; + serio_raw->tail = (serio_raw->tail + 1) % SERIO_RAW_QUEUE_LEN; - return !empty; + return true; } static ssize_t serio_raw_read(struct file *file, char __user *buffer, @@ -200,40 +185,32 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, { struct serio_raw_client *client = file->private_data; struct serio_raw *serio_raw = client->serio_raw; - int retval = 0; + int written = 0; unsigned char c; - retval = mutex_lock_interruptible(&serio_raw_mutex); - if (retval) - return retval; + scoped_guard(mutex_intr, &serio_raw_mutex) { + if (serio_raw->dead) + return -ENODEV; - if (serio_raw->dead) { - retval = -ENODEV; - goto out; - } + if (count > 32) + count = 32; - if (count > 32) - count = 32; + while (count--) { + if (get_user(c, buffer++)) + return -EFAULT; - while (count--) { - if (get_user(c, buffer++)) { - retval = -EFAULT; - goto out; - } + if (serio_write(serio_raw->serio, c)) { + /* Either signal error or partial write */ + return written ?: -EIO; + } - if (serio_write(serio_raw->serio, c)) { - /* Either signal error or partial write */ - if (retval == 0) - retval = -EIO; - goto out; + written++; } - retval++; + return written; } -out: - mutex_unlock(&serio_raw_mutex); - return retval; + return -EINTR; } static __poll_t serio_raw_poll(struct file *file, poll_table *wait) @@ -300,7 +277,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv) } snprintf(serio_raw->name, sizeof(serio_raw->name), - "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no)); + "serio_raw%u", atomic_inc_return(&serio_raw_no)); kref_init(&serio_raw->kref); INIT_LIST_HEAD(&serio_raw->client_list); init_waitqueue_head(&serio_raw->wait); @@ -379,10 +356,10 @@ static void serio_raw_hangup(struct serio_raw *serio_raw) { struct serio_raw_client *client; - serio_pause_rx(serio_raw->serio); - list_for_each_entry(client, &serio_raw->client_list, node) - kill_fasync(&client->fasync, SIGIO, POLL_HUP); - serio_continue_rx(serio_raw->serio); + scoped_guard(serio_pause_rx, serio_raw->serio) { + list_for_each_entry(client, &serio_raw->client_list, node) + kill_fasync(&client->fasync, SIGIO, POLL_HUP); + } wake_up_interruptible(&serio_raw->wait); } @@ -394,10 +371,10 @@ static void serio_raw_disconnect(struct serio *serio) misc_deregister(&serio_raw->dev); - mutex_lock(&serio_raw_mutex); - serio_raw->dead = true; - list_del_init(&serio_raw->node); - mutex_unlock(&serio_raw_mutex); + scoped_guard(mutex, &serio_raw_mutex) { + serio_raw->dead = true; + list_del_init(&serio_raw->node); + } serio_raw_hangup(serio_raw); diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c index 5a2b5404ffc2..74ac88796187 100644 --- a/drivers/input/serio/serport.c +++ b/drivers/input/serio/serport.c @@ -50,11 +50,9 @@ static int serport_serio_write(struct serio *serio, unsigned char data) static int serport_serio_open(struct serio *serio) { struct serport *serport = serio->port_data; - unsigned long flags; - spin_lock_irqsave(&serport->lock, flags); + guard(spinlock_irqsave)(&serport->lock); set_bit(SERPORT_ACTIVE, &serport->flags); - spin_unlock_irqrestore(&serport->lock, flags); return 0; } @@ -63,11 +61,9 @@ static int serport_serio_open(struct serio *serio) static void serport_serio_close(struct serio *serio) { struct serport *serport = serio->port_data; - unsigned long flags; - spin_lock_irqsave(&serport->lock, flags); + guard(spinlock_irqsave)(&serport->lock); clear_bit(SERPORT_ACTIVE, &serport->flags); - spin_unlock_irqrestore(&serport->lock, flags); } /* @@ -118,14 +114,13 @@ static void serport_ldisc_receive(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct serport *serport = tty->disc_data; - unsigned long flags; unsigned int ch_flags = 0; int i; - spin_lock_irqsave(&serport->lock, flags); + guard(spinlock_irqsave)(&serport->lock); if (!test_bit(SERPORT_ACTIVE, &serport->flags)) - goto out; + return; for (i = 0; i < count; i++) { if (fp) { @@ -146,9 +141,6 @@ static void serport_ldisc_receive(struct tty_struct *tty, const u8 *cp, serio_interrupt(serport->serio, cp[i], ch_flags); } - -out: - spin_unlock_irqrestore(&serport->lock, flags); } /* @@ -246,11 +238,9 @@ static int serport_ldisc_compat_ioctl(struct tty_struct *tty, static void serport_ldisc_hangup(struct tty_struct *tty) { struct serport *serport = tty->disc_data; - unsigned long flags; - spin_lock_irqsave(&serport->lock, flags); - set_bit(SERPORT_DEAD, &serport->flags); - spin_unlock_irqrestore(&serport->lock, flags); + scoped_guard(spinlock_irqsave, &serport->lock) + set_bit(SERPORT_DEAD, &serport->flags); wake_up_interruptible(&serport->wait); } @@ -258,12 +248,11 @@ static void serport_ldisc_hangup(struct tty_struct *tty) static void serport_ldisc_write_wakeup(struct tty_struct * tty) { struct serport *serport = tty->disc_data; - unsigned long flags; - spin_lock_irqsave(&serport->lock, flags); + guard(spinlock_irqsave)(&serport->lock); + if (test_bit(SERPORT_ACTIVE, &serport->flags)) serio_drv_write_wakeup(serport->serio); - spin_unlock_irqrestore(&serport->lock, flags); } /* diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c index 95cd8aaee65d..524929ce1cae 100644 --- a/drivers/input/serio/sun4i-ps2.c +++ b/drivers/input/serio/sun4i-ps2.c @@ -101,7 +101,7 @@ static irqreturn_t sun4i_ps2_interrupt(int irq, void *dev_id) unsigned int rxflags = 0; u32 rval; - spin_lock(&drvdata->lock); + guard(spinlock)(&drvdata->lock); /* Get the PS/2 interrupts and clear them */ intr_status = readl(drvdata->reg_base + PS2_REG_LSTS); @@ -134,8 +134,6 @@ static irqreturn_t sun4i_ps2_interrupt(int irq, void *dev_id) writel(intr_status, drvdata->reg_base + PS2_REG_LSTS); writel(fifo_status, drvdata->reg_base + PS2_REG_FSTS); - spin_unlock(&drvdata->lock); - return IRQ_HANDLED; } @@ -146,7 +144,6 @@ static int sun4i_ps2_open(struct serio *serio) u32 clk_scdf; u32 clk_pcdf; u32 rval; - unsigned long flags; /* Set line control and enable interrupt */ rval = PS2_LCTL_STOPERREN | PS2_LCTL_ACKERREN @@ -171,9 +168,8 @@ static int sun4i_ps2_open(struct serio *serio) rval = PS2_GCTL_RESET | PS2_GCTL_INTEN | PS2_GCTL_MASTER | PS2_GCTL_BUSEN; - spin_lock_irqsave(&drvdata->lock, flags); + guard(spinlock_irqsave)(&drvdata->lock); writel(rval, drvdata->reg_base + PS2_REG_GCTL); - spin_unlock_irqrestore(&drvdata->lock, flags); return 0; } @@ -322,7 +318,7 @@ MODULE_DEVICE_TABLE(of, sun4i_ps2_match); static struct platform_driver sun4i_ps2_driver = { .probe = sun4i_ps2_probe, - .remove_new = sun4i_ps2_remove, + .remove = sun4i_ps2_remove, .driver = { .name = DRIVER_NAME, .of_match_table = sun4i_ps2_match, diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c index 1ab12b247f98..7f627b08055e 100644 --- a/drivers/input/serio/userio.c +++ b/drivers/input/serio/userio.c @@ -55,18 +55,15 @@ struct userio_device { static int userio_device_write(struct serio *id, unsigned char val) { struct userio_device *userio = id->port_data; - unsigned long flags; - spin_lock_irqsave(&userio->buf_lock, flags); + scoped_guard(spinlock_irqsave, &userio->buf_lock) { + userio->buf[userio->head] = val; + userio->head = (userio->head + 1) % USERIO_BUFSIZE; - userio->buf[userio->head] = val; - userio->head = (userio->head + 1) % USERIO_BUFSIZE; - - if (userio->head == userio->tail) - dev_warn(userio_misc.this_device, - "Buffer overflowed, userio client isn't keeping up"); - - spin_unlock_irqrestore(&userio->buf_lock, flags); + if (userio->head == userio->tail) + dev_warn(userio_misc.this_device, + "Buffer overflowed, userio client isn't keeping up"); + } wake_up_interruptible(&userio->waitq); @@ -75,9 +72,8 @@ static int userio_device_write(struct serio *id, unsigned char val) static int userio_char_open(struct inode *inode, struct file *file) { - struct userio_device *userio; - - userio = kzalloc(sizeof(*userio), GFP_KERNEL); + struct userio_device *userio __free(kfree) = + kzalloc(sizeof(*userio), GFP_KERNEL); if (!userio) return -ENOMEM; @@ -86,15 +82,13 @@ static int userio_char_open(struct inode *inode, struct file *file) init_waitqueue_head(&userio->waitq); userio->serio = kzalloc(sizeof(*userio->serio), GFP_KERNEL); - if (!userio->serio) { - kfree(userio); + if (!userio->serio) return -ENOMEM; - } userio->serio->write = userio_device_write; userio->serio->port_data = userio; - file->private_data = userio; + file->private_data = no_free_ptr(userio); return 0; } @@ -118,14 +112,32 @@ static int userio_char_release(struct inode *inode, struct file *file) return 0; } +static size_t userio_fetch_data(struct userio_device *userio, u8 *buf, + size_t count, size_t *copylen) +{ + size_t available, len; + + guard(spinlock_irqsave)(&userio->buf_lock); + + available = CIRC_CNT_TO_END(userio->head, userio->tail, + USERIO_BUFSIZE); + len = min(available, count); + if (len) { + memcpy(buf, &userio->buf[userio->tail], len); + userio->tail = (userio->tail + len) % USERIO_BUFSIZE; + } + + *copylen = len; + return available; +} + static ssize_t userio_char_read(struct file *file, char __user *user_buffer, size_t count, loff_t *ppos) { struct userio_device *userio = file->private_data; int error; - size_t nonwrap_len, copylen; - unsigned char buf[USERIO_BUFSIZE]; - unsigned long flags; + size_t available, copylen; + u8 buf[USERIO_BUFSIZE]; /* * By the time we get here, the data that was waiting might have @@ -135,21 +147,8 @@ static ssize_t userio_char_read(struct file *file, char __user *user_buffer, * of course). */ for (;;) { - spin_lock_irqsave(&userio->buf_lock, flags); - - nonwrap_len = CIRC_CNT_TO_END(userio->head, - userio->tail, - USERIO_BUFSIZE); - copylen = min(nonwrap_len, count); - if (copylen) { - memcpy(buf, &userio->buf[userio->tail], copylen); - userio->tail = (userio->tail + copylen) % - USERIO_BUFSIZE; - } - - spin_unlock_irqrestore(&userio->buf_lock, flags); - - if (nonwrap_len) + available = userio_fetch_data(userio, buf, count, ©len); + if (available) break; /* buffer was/is empty */ @@ -176,40 +175,21 @@ static ssize_t userio_char_read(struct file *file, char __user *user_buffer, return copylen; } -static ssize_t userio_char_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) +static int userio_execute_cmd(struct userio_device *userio, + const struct userio_cmd *cmd) { - struct userio_device *userio = file->private_data; - struct userio_cmd cmd; - int error; - - if (count != sizeof(cmd)) { - dev_warn(userio_misc.this_device, "Invalid payload size\n"); - return -EINVAL; - } - - if (copy_from_user(&cmd, buffer, sizeof(cmd))) - return -EFAULT; - - error = mutex_lock_interruptible(&userio->mutex); - if (error) - return error; - - switch (cmd.type) { + switch (cmd->type) { case USERIO_CMD_REGISTER: if (!userio->serio->id.type) { dev_warn(userio_misc.this_device, "No port type given on /dev/userio\n"); - - error = -EINVAL; - goto out; + return -EINVAL; } if (userio->running) { dev_warn(userio_misc.this_device, "Begin command sent, but we're already running\n"); - error = -EBUSY; - goto out; + return -EBUSY; } userio->running = true; @@ -220,32 +200,51 @@ static ssize_t userio_char_write(struct file *file, const char __user *buffer, if (userio->running) { dev_warn(userio_misc.this_device, "Can't change port type on an already running userio instance\n"); - error = -EBUSY; - goto out; + return -EBUSY; } - userio->serio->id.type = cmd.data; + userio->serio->id.type = cmd->data; break; case USERIO_CMD_SEND_INTERRUPT: if (!userio->running) { dev_warn(userio_misc.this_device, "The device must be registered before sending interrupts\n"); - error = -ENODEV; - goto out; + return -ENODEV; } - serio_interrupt(userio->serio, cmd.data, 0); + serio_interrupt(userio->serio, cmd->data, 0); break; default: - error = -EOPNOTSUPP; - goto out; + return -EOPNOTSUPP; + } + + return 0; +} + +static ssize_t userio_char_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct userio_device *userio = file->private_data; + struct userio_cmd cmd; + int error; + + if (count != sizeof(cmd)) { + dev_warn(userio_misc.this_device, "Invalid payload size\n"); + return -EINVAL; + } + + if (copy_from_user(&cmd, buffer, sizeof(cmd))) + return -EFAULT; + + scoped_cond_guard(mutex_intr, return -EINTR, &userio->mutex) { + error = userio_execute_cmd(userio, &cmd); + if (error) + return error; } -out: - mutex_unlock(&userio->mutex); - return error ?: count; + return count; } static __poll_t userio_char_poll(struct file *file, poll_table *wait) diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index 1543267d02ac..01433f0b48f1 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c @@ -155,22 +155,17 @@ static irqreturn_t xps2_interrupt(int irq, void *dev_id) static int sxps2_write(struct serio *pserio, unsigned char c) { struct xps2data *drvdata = pserio->port_data; - unsigned long flags; u32 sr; - int status = -1; - spin_lock_irqsave(&drvdata->lock, flags); + guard(spinlock_irqsave)(&drvdata->lock); /* If the PS/2 transmitter is empty send a byte of data */ sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET); - if (!(sr & XPS2_STATUS_TX_FULL)) { - out_be32(drvdata->base_address + XPS2_TX_DATA_OFFSET, c); - status = 0; - } + if (sr & XPS2_STATUS_TX_FULL) + return -EAGAIN; - spin_unlock_irqrestore(&drvdata->lock, flags); - - return status; + out_be32(drvdata->base_address + XPS2_TX_DATA_OFFSET, c); + return 0; } /** @@ -358,7 +353,7 @@ static struct platform_driver xps2_of_driver = { .of_match_table = xps2_of_match, }, .probe = xps2_of_probe, - .remove_new = xps2_of_remove, + .remove = xps2_of_remove, }; module_platform_driver(xps2_of_driver); diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index a68da2988f9c..8d6b71d59793 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -214,6 +214,28 @@ static void pegasus_init(struct work_struct *work) error); } +static int __pegasus_open(struct pegasus *pegasus) +{ + int error; + + guard(mutex)(&pegasus->pm_mutex); + + pegasus->irq->dev = pegasus->usbdev; + if (usb_submit_urb(pegasus->irq, GFP_KERNEL)) + return -EIO; + + error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); + if (error) { + usb_kill_urb(pegasus->irq); + cancel_work_sync(&pegasus->init); + return error; + } + + pegasus->is_open = true; + + return 0; +} + static int pegasus_open(struct input_dev *dev) { struct pegasus *pegasus = input_get_drvdata(dev); @@ -223,39 +245,25 @@ static int pegasus_open(struct input_dev *dev) if (error) return error; - mutex_lock(&pegasus->pm_mutex); - pegasus->irq->dev = pegasus->usbdev; - if (usb_submit_urb(pegasus->irq, GFP_KERNEL)) { - error = -EIO; - goto err_autopm_put; + error = __pegasus_open(pegasus); + if (error) { + usb_autopm_put_interface(pegasus->intf); + return error; } - error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); - if (error) - goto err_kill_urb; - - pegasus->is_open = true; - mutex_unlock(&pegasus->pm_mutex); return 0; - -err_kill_urb: - usb_kill_urb(pegasus->irq); - cancel_work_sync(&pegasus->init); -err_autopm_put: - mutex_unlock(&pegasus->pm_mutex); - usb_autopm_put_interface(pegasus->intf); - return error; } static void pegasus_close(struct input_dev *dev) { struct pegasus *pegasus = input_get_drvdata(dev); - mutex_lock(&pegasus->pm_mutex); - usb_kill_urb(pegasus->irq); - cancel_work_sync(&pegasus->init); - pegasus->is_open = false; - mutex_unlock(&pegasus->pm_mutex); + scoped_guard(mutex, &pegasus->pm_mutex) { + usb_kill_urb(pegasus->irq); + cancel_work_sync(&pegasus->init); + + pegasus->is_open = false; + } usb_autopm_put_interface(pegasus->intf); } @@ -411,10 +419,10 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) { struct pegasus *pegasus = usb_get_intfdata(intf); - mutex_lock(&pegasus->pm_mutex); + guard(mutex)(&pegasus->pm_mutex); + usb_kill_urb(pegasus->irq); cancel_work_sync(&pegasus->init); - mutex_unlock(&pegasus->pm_mutex); return 0; } @@ -422,31 +430,33 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) static int pegasus_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); - int retval = 0; - mutex_lock(&pegasus->pm_mutex); + guard(mutex)(&pegasus->pm_mutex); + if (pegasus->is_open && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) - retval = -EIO; - mutex_unlock(&pegasus->pm_mutex); + return -EIO; - return retval; + return 0; } static int pegasus_reset_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); - int retval = 0; + int error; + + guard(mutex)(&pegasus->pm_mutex); - mutex_lock(&pegasus->pm_mutex); if (pegasus->is_open) { - retval = pegasus_set_mode(pegasus, PEN_MODE_XY, + error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); - if (!retval && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) - retval = -EIO; + if (error) + return error; + + if (usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) + return -EIO; } - mutex_unlock(&pegasus->pm_mutex); - return retval; + return 0; } static const struct usb_device_id pegasus_ids[] = { diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c index 81a3ea4b9a3d..0468ce2b216f 100644 --- a/drivers/input/touchscreen/88pm860x-ts.c +++ b/drivers/input/touchscreen/88pm860x-ts.c @@ -117,13 +117,14 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, struct pm860x_chip *chip, int *res_x) { - struct device_node *np = pdev->dev.parent->of_node; struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \ : chip->companion; int data, n, ret; - if (!np) + if (!pdev->dev.parent->of_node) return -ENODEV; - np = of_get_child_by_name(np, "touch"); + + struct device_node *np __free(device_node) = + of_get_child_by_name(pdev->dev.parent->of_node, "touch"); if (!np) { dev_err(&pdev->dev, "Can't find touch node\n"); return -EINVAL; @@ -141,13 +142,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); if (ret < 0) - goto err_put_node; + return -EINVAL; } /* set tsi prebias time */ if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); if (ret < 0) - goto err_put_node; + return -EINVAL; } /* set prebias & prechg time of pen detect */ data = 0; @@ -158,18 +159,11 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); if (ret < 0) - goto err_put_node; + return -EINVAL; } of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); - of_node_put(np); - return 0; - -err_put_node: - of_node_put(np); - - return -EINVAL; } #else #define pm860x_touch_dt_init(x, y, z) (-1) diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 1ac26fc2e3eb..1a03de7fcfa6 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -420,6 +420,7 @@ config TOUCHSCREEN_GOODIX_BERLIN_SPI config TOUCHSCREEN_HIDEEP tristate "HiDeep Touch IC" depends on I2C + select REGMAP_I2C help Say Y here if you have a touchscreen using HiDeep. @@ -431,6 +432,7 @@ config TOUCHSCREEN_HIDEEP config TOUCHSCREEN_HYCON_HY46XX tristate "Hycon hy46xx touchscreen support" depends on I2C + select REGMAP_I2C help Say Y here if you have a touchscreen using Hycon hy46xx diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 607f18af7010..066dc04003fa 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -331,7 +331,7 @@ struct ser_req { u8 ref_off; u16 scratch; struct spi_message msg; - struct spi_transfer xfer[6]; + struct spi_transfer xfer[8]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. @@ -405,9 +405,19 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) req->xfer[5].rx_buf = &req->scratch; req->xfer[5].len = 2; - CS_CHANGE(req->xfer[5]); spi_message_add_tail(&req->xfer[5], &req->msg); + /* clear the command register */ + req->scratch = 0; + req->xfer[6].tx_buf = &req->scratch; + req->xfer[6].len = 1; + spi_message_add_tail(&req->xfer[6], &req->msg); + + req->xfer[7].rx_buf = &req->scratch; + req->xfer[7].len = 2; + CS_CHANGE(req->xfer[7]); + spi_message_add_tail(&req->xfer[7], &req->msg); + mutex_lock(&ts->lock); ads7846_stop(ts); status = spi_sync(spi, &req->msg); diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c index 8db2a112a476..363a4a1f1560 100644 --- a/drivers/input/touchscreen/auo-pixcir-ts.c +++ b/drivers/input/touchscreen/auo-pixcir-ts.c @@ -72,7 +72,7 @@ /* * Interrupt modes: - * periodical: interrupt is asserted periodicaly + * periodical: interrupt is asserted periodically * compare coordinates: interrupt is asserted when coordinates change * indicate touch: interrupt is asserted during touch */ diff --git a/drivers/input/touchscreen/bcm_iproc_tsc.c b/drivers/input/touchscreen/bcm_iproc_tsc.c index 9c84235327bf..e49bde50d77a 100644 --- a/drivers/input/touchscreen/bcm_iproc_tsc.c +++ b/drivers/input/touchscreen/bcm_iproc_tsc.c @@ -217,7 +217,7 @@ static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data) "pen up-down (%d)\n", priv->pen_status); } - /* coordinates in FIFO exceed the theshold */ + /* coordinates in FIFO exceed the threshold */ if (intr_status & TS_FIFO_INTR_MASK) { for (i = 0; i < priv->cfg_params.fifo_threshold; i++) { regmap_read(priv->regmap, FIFO_DATA, &raw_coordinate); diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c index 52e0e834e76f..c2d3252f8466 100644 --- a/drivers/input/touchscreen/da9052_tsi.c +++ b/drivers/input/touchscreen/da9052_tsi.c @@ -326,7 +326,7 @@ static void da9052_ts_remove(struct platform_device *pdev) static struct platform_driver da9052_tsi_driver = { .probe = da9052_ts_probe, - .remove_new = da9052_ts_remove, + .remove = da9052_ts_remove, .driver = { .name = "da9052-tsi", }, diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 85c6d8ce003f..0d7bf18e2508 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -1237,7 +1237,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client) } /* - * Check which sleep modes we can support. Power-off requieres the + * Check which sleep modes we can support. Power-off requires the * reset-pin to ensure correct power-down/power-up behaviour. Start with * the EDT_PMODE_POWEROFF test since this is the deepest possible sleep * mode. diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index eb883db55420..ad209e6e82a6 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c @@ -225,10 +225,10 @@ static int elo_command_10(struct elo *elo, unsigned char *packet) mutex_lock(&elo->cmd_mutex); - serio_pause_rx(elo->serio); - elo->expected_packet = toupper(packet[0]); - init_completion(&elo->cmd_done); - serio_continue_rx(elo->serio); + scoped_guard(serio_pause_rx, elo->serio) { + elo->expected_packet = toupper(packet[0]); + init_completion(&elo->cmd_done); + } if (serio_write(elo->serio, ELO10_LEAD_BYTE)) goto out; diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 260c83dc23a2..fa38d70aded7 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -898,7 +898,7 @@ static umode_t ili210x_attributes_visible(struct kobject *kobj, if (attr == &dev_attr_calibrate.attr) return priv->chip->has_calibrate_reg ? attr->mode : 0; - /* Firmware/Kernel/Protocol/BootMode is implememted only for ILI251x */ + /* Firmware/Kernel/Protocol/BootMode is implemented only for ILI251x */ if (!priv->chip->has_firmware_proto) return 0; diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c index aeabf8d057de..abeae9102323 100644 --- a/drivers/input/touchscreen/imagis.c +++ b/drivers/input/touchscreen/imagis.c @@ -395,6 +395,7 @@ static int imagis_resume(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(imagis_pm_ops, imagis_suspend, imagis_resume); +#ifdef CONFIG_OF static const struct imagis_properties imagis_3032c_data = { .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE, .touch_coord_cmd = IST3038C_REG_TOUCH_COORD, @@ -427,7 +428,6 @@ static const struct imagis_properties imagis_3038c_data = { .protocol_b = true, }; -#ifdef CONFIG_OF static const struct of_device_id imagis_of_match[] = { { .compatible = "imagis,ist3032c", .data = &imagis_3032c_data }, { .compatible = "imagis,ist3038", .data = &imagis_3038_data }, diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index bfbebe245040..5abf164ae14c 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c @@ -261,7 +261,7 @@ static void mainstone_wm97xx_remove(struct platform_device *pdev) static struct platform_driver mainstone_wm97xx_driver = { .probe = mainstone_wm97xx_probe, - .remove_new = mainstone_wm97xx_remove, + .remove = mainstone_wm97xx_remove, .driver = { .name = "wm97xx-touch", }, diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c index cbcd6e34efb7..33635da85079 100644 --- a/drivers/input/touchscreen/mc13783_ts.c +++ b/drivers/input/touchscreen/mc13783_ts.c @@ -226,7 +226,7 @@ static void mc13783_ts_remove(struct platform_device *pdev) } static struct platform_driver mc13783_ts_driver = { - .remove_new = mc13783_ts_remove, + .remove = mc13783_ts_remove, .driver = { .name = MC13783_TS_NAME, }, diff --git a/drivers/input/touchscreen/novatek-nvt-ts.c b/drivers/input/touchscreen/novatek-nvt-ts.c index 0afee41ac9de..44b58e0dc1ad 100644 --- a/drivers/input/touchscreen/novatek-nvt-ts.c +++ b/drivers/input/touchscreen/novatek-nvt-ts.c @@ -31,9 +31,6 @@ #define NVT_TS_PARAMS_CHIP_ID 0x0e #define NVT_TS_PARAMS_SIZE 0x0f -#define NVT_TS_SUPPORTED_WAKE_TYPE 0x05 -#define NVT_TS_SUPPORTED_CHIP_ID 0x05 - #define NVT_TS_MAX_TOUCHES 10 #define NVT_TS_MAX_SIZE 4096 @@ -51,10 +48,16 @@ static const int nvt_ts_irq_type[4] = { IRQF_TRIGGER_HIGH }; +struct nvt_ts_i2c_chip_data { + u8 wake_type; + u8 chip_id; +}; + struct nvt_ts_data { struct i2c_client *client; struct input_dev *input; struct gpio_desc *reset_gpio; + struct regulator_bulk_data regulators[2]; struct touchscreen_properties prop; int max_touches; u8 buf[NVT_TS_TOUCH_SIZE * NVT_TS_MAX_TOUCHES]; @@ -142,6 +145,13 @@ static irqreturn_t nvt_ts_irq(int irq, void *dev_id) static int nvt_ts_start(struct input_dev *dev) { struct nvt_ts_data *data = input_get_drvdata(dev); + int error; + + error = regulator_bulk_enable(ARRAY_SIZE(data->regulators), data->regulators); + if (error) { + dev_err(&data->client->dev, "failed to enable regulators\n"); + return error; + } enable_irq(data->client->irq); gpiod_set_value_cansleep(data->reset_gpio, 0); @@ -155,6 +165,7 @@ static void nvt_ts_stop(struct input_dev *dev) disable_irq(data->client->irq); gpiod_set_value_cansleep(data->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators); } static int nvt_ts_suspend(struct device *dev) @@ -188,6 +199,7 @@ static int nvt_ts_probe(struct i2c_client *client) struct device *dev = &client->dev; int error, width, height, irq_type; struct nvt_ts_data *data; + const struct nvt_ts_i2c_chip_data *chip; struct input_dev *input; if (!client->irq) { @@ -199,12 +211,35 @@ static int nvt_ts_probe(struct i2c_client *client) if (!data) return -ENOMEM; + chip = device_get_match_data(&client->dev); + if (!chip) + return -EINVAL; + data->client = client; i2c_set_clientdata(client, data); + /* + * VCC is the analog voltage supply + * IOVCC is the digital voltage supply + */ + data->regulators[0].supply = "vcc"; + data->regulators[1].supply = "iovcc"; + error = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators), data->regulators); + if (error) { + dev_err(dev, "cannot get regulators: %d\n", error); + return error; + } + + error = regulator_bulk_enable(ARRAY_SIZE(data->regulators), data->regulators); + if (error) { + dev_err(dev, "failed to enable regulators: %d\n", error); + return error; + } + data->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); error = PTR_ERR_OR_ZERO(data->reset_gpio); if (error) { + regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators); dev_err(dev, "failed to request reset GPIO: %d\n", error); return error; } @@ -214,6 +249,7 @@ static int nvt_ts_probe(struct i2c_client *client) error = nvt_ts_read_data(data->client, NVT_TS_PARAMETERS_START, data->buf, NVT_TS_PARAMS_SIZE); gpiod_set_value_cansleep(data->reset_gpio, 1); /* Put back in reset */ + regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators); if (error) return error; @@ -225,8 +261,8 @@ static int nvt_ts_probe(struct i2c_client *client) if (width > NVT_TS_MAX_SIZE || height >= NVT_TS_MAX_SIZE || data->max_touches > NVT_TS_MAX_TOUCHES || irq_type >= ARRAY_SIZE(nvt_ts_irq_type) || - data->buf[NVT_TS_PARAMS_WAKE_TYPE] != NVT_TS_SUPPORTED_WAKE_TYPE || - data->buf[NVT_TS_PARAMS_CHIP_ID] != NVT_TS_SUPPORTED_CHIP_ID) { + data->buf[NVT_TS_PARAMS_WAKE_TYPE] != chip->wake_type || + data->buf[NVT_TS_PARAMS_CHIP_ID] != chip->chip_id) { dev_err(dev, "Unsupported touchscreen parameters: %*ph\n", NVT_TS_PARAMS_SIZE, data->buf); return -EIO; @@ -277,8 +313,26 @@ static int nvt_ts_probe(struct i2c_client *client) return 0; } +static const struct nvt_ts_i2c_chip_data nvt_nt11205_ts_data = { + .wake_type = 0x05, + .chip_id = 0x05, +}; + +static const struct nvt_ts_i2c_chip_data nvt_nt36672a_ts_data = { + .wake_type = 0x01, + .chip_id = 0x08, +}; + +static const struct of_device_id nvt_ts_of_match[] = { + { .compatible = "novatek,nt11205-ts", .data = &nvt_nt11205_ts_data }, + { .compatible = "novatek,nt36672a-ts", .data = &nvt_nt36672a_ts_data }, + { } +}; +MODULE_DEVICE_TABLE(of, nvt_ts_of_match); + static const struct i2c_device_id nvt_ts_i2c_id[] = { - { "NVT-ts" }, + { "nt11205-ts", (unsigned long) &nvt_nt11205_ts_data }, + { "nt36672a-ts", (unsigned long) &nvt_nt36672a_ts_data }, { } }; MODULE_DEVICE_TABLE(i2c, nvt_ts_i2c_id); @@ -287,6 +341,7 @@ static struct i2c_driver nvt_ts_driver = { .driver = { .name = "novatek-nvt-ts", .pm = pm_sleep_ptr(&nvt_ts_pm_ops), + .of_match_table = nvt_ts_of_match, }, .probe = nvt_ts_probe, .id_table = nvt_ts_i2c_id, diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c index 821245019fea..083206a3457b 100644 --- a/drivers/input/touchscreen/pcap_ts.c +++ b/drivers/input/touchscreen/pcap_ts.c @@ -238,7 +238,7 @@ static const struct dev_pm_ops pcap_ts_pm_ops = { static struct platform_driver pcap_ts_driver = { .probe = pcap_ts_probe, - .remove_new = pcap_ts_remove, + .remove = pcap_ts_remove, .driver = { .name = "pcap-ts", .pm = PCAP_TS_PM_OPS, diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c index 83bf27085ebc..dad5786e82a4 100644 --- a/drivers/input/touchscreen/pixcir_i2c_ts.c +++ b/drivers/input/touchscreen/pixcir_i2c_ts.c @@ -44,7 +44,7 @@ enum pixcir_power_mode { /* * Interrupt modes: - * periodical: interrupt is asserted periodicaly + * periodical: interrupt is asserted periodically * diff coordinates: interrupt is asserted when coordinates change * level on touch: interrupt level asserted during touch * pulse on touch: interrupt pulse asserted during touch diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c index 45c575df994e..841d39a449b3 100644 --- a/drivers/input/touchscreen/raspberrypi-ts.c +++ b/drivers/input/touchscreen/raspberrypi-ts.c @@ -122,20 +122,18 @@ static int rpi_ts_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct input_dev *input; - struct device_node *fw_node; struct rpi_firmware *fw; struct rpi_ts *ts; u32 touchbuf; int error; - fw_node = of_get_parent(np); + struct device_node *fw_node __free(device_node) = of_get_parent(np); if (!fw_node) { dev_err(dev, "Missing firmware node\n"); return -ENOENT; } fw = devm_rpi_firmware_get(&pdev->dev, fw_node); - of_node_put(fw_node); if (!fw) return -EPROBE_DEFER; diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c index 0e5cc9fbad17..295d8d75ba32 100644 --- a/drivers/input/touchscreen/rohm_bu21023.c +++ b/drivers/input/touchscreen/rohm_bu21023.c @@ -388,13 +388,13 @@ static int rohm_ts_manual_calibration(struct rohm_ts_data *ts) err_y = (int)READ_CALIB_BUF(PRM1_Y_H) << 2 | READ_CALIB_BUF(PRM1_Y_L); - /* X axis ajust */ + /* X axis adjust */ if (err_x <= 4) calib_x -= AXIS_ADJUST; else if (err_x >= 60) calib_x += AXIS_ADJUST; - /* Y axis ajust */ + /* Y axis adjust */ if (err_y <= 4) calib_y -= AXIS_ADJUST; else if (err_y >= 60) diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c index b204fdb2d22c..a94a1997f96b 100644 --- a/drivers/input/touchscreen/stmpe-ts.c +++ b/drivers/input/touchscreen/stmpe-ts.c @@ -107,7 +107,7 @@ static void stmpe_work(struct work_struct *work) /* * touch_det sometimes get desasserted or just get stuck. This appears - * to be a silicon bug, We still have to clearify this with the + * to be a silicon bug, We still have to clarify this with the * manufacture. As a workaround We release the key anyway if the * touch_det keeps coming in after 4ms, while the FIFO contains no value * during the whole time. @@ -140,7 +140,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data) /* * The FIFO sometimes just crashes and stops generating interrupts. This - * appears to be a silicon bug. We still have to clearify this with + * appears to be a silicon bug. We still have to clarify this with * the manufacture. As a workaround we disable the TSC while we are * collecting data and flush the FIFO after reading */ @@ -362,7 +362,7 @@ static struct platform_driver stmpe_ts_driver = { .name = STMPE_TS_NAME, }, .probe = stmpe_input_probe, - .remove_new = stmpe_ts_remove, + .remove = stmpe_ts_remove, }; module_platform_driver(stmpe_ts_driver); diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 92b2b840b4b7..e8286060043a 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -396,12 +396,12 @@ static const struct of_device_id sun4i_ts_of_match[] = { MODULE_DEVICE_TABLE(of, sun4i_ts_of_match); static struct platform_driver sun4i_ts_driver = { - .driver = { + .driver = { .name = "sun4i-ts", .of_match_table = sun4i_ts_of_match, }, .probe = sun4i_ts_probe, - .remove_new = sun4i_ts_remove, + .remove = sun4i_ts_remove, }; module_platform_driver(sun4i_ts_driver); diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 294b7ceded27..93d659ff90aa 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -550,9 +550,9 @@ MODULE_DEVICE_TABLE(of, ti_tsc_dt_ids); static struct platform_driver ti_tsc_driver = { .probe = titsc_probe, - .remove_new = titsc_remove, + .remove = titsc_remove, .driver = { - .name = "TI-am335x-tsc", + .name = "TI-am335x-tsc", .pm = pm_sleep_ptr(&titsc_pm_ops), .of_match_table = ti_tsc_dt_ids, }, diff --git a/drivers/input/touchscreen/ts4800-ts.c b/drivers/input/touchscreen/ts4800-ts.c index 6cf66aadc10e..98422d1e80d6 100644 --- a/drivers/input/touchscreen/ts4800-ts.c +++ b/drivers/input/touchscreen/ts4800-ts.c @@ -110,18 +110,17 @@ static int ts4800_parse_dt(struct platform_device *pdev, { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct device_node *syscon_np; u32 reg, bit; int error; - syscon_np = of_parse_phandle(np, "syscon", 0); + struct device_node *syscon_np __free(device_node) = + of_parse_phandle(np, "syscon", 0); if (!syscon_np) { dev_err(dev, "no syscon property\n"); return -ENODEV; } ts->regmap = syscon_node_to_regmap(syscon_np); - of_node_put(syscon_np); if (IS_ERR(ts->regmap)) { dev_err(dev, "cannot get parent's regmap\n"); return PTR_ERR(ts->regmap); diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c index 9cee26b63341..98f8ec408cad 100644 --- a/drivers/input/touchscreen/wm831x-ts.c +++ b/drivers/input/touchscreen/wm831x-ts.c @@ -387,7 +387,7 @@ static struct platform_driver wm831x_ts_driver = { .name = "wm831x-touch", }, .probe = wm831x_ts_probe, - .remove_new = wm831x_ts_remove, + .remove = wm831x_ts_remove, }; module_platform_driver(wm831x_ts_driver); diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index f01f6cc9b59f..b25771a8df2b 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(wm97xx_set_gpio); /* * Codec GPIO pin configuration, this sets pin direction, polarity, - * stickyness and wake up. + * stickiness and wake up. */ void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio, enum wm97xx_gpio_dir dir, enum wm97xx_gpio_pol pol, enum wm97xx_gpio_sticky sticky, @@ -403,7 +403,7 @@ static int wm97xx_read_samples(struct wm97xx *wm) * is actively working with the touchscreen we * don't want to lose the quick response. So we * will slowly increase sleep time after the - * pen is up and quicky restore it to ~one task + * pen is up and quickly restore it to ~one task * switch when pen is down again. */ if (wm->ts_reader_interval < HZ / 10) @@ -876,7 +876,7 @@ static struct platform_driver wm97xx_mfd_driver = { .pm = pm_sleep_ptr(&wm97xx_pm_ops), }, .probe = wm97xx_mfd_probe, - .remove_new = wm97xx_mfd_remove, + .remove = wm97xx_mfd_remove, }; static int __init wm97xx_init(void) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b3aa1f5d5321..47c46e4b739e 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -195,6 +195,7 @@ config MSM_IOMMU source "drivers/iommu/amd/Kconfig" source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" +source "drivers/iommu/riscv/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" @@ -415,6 +416,15 @@ config ARM_SMMU_V3_SVA Say Y here if your system supports SVA extensions such as PCIe PASID and PRI. +config ARM_SMMU_V3_IOMMUFD + bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)" + depends on IOMMUFD + help + Support for IOMMUFD features intended to support virtual machines + with accelerated virtual IOMMUs. + + Say Y here if you are doing development and testing on this feature. + config ARM_SMMU_V3_KUNIT_TEST tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 542760d963ec..5e5a83c6c2aa 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ +obj-y += amd/ intel/ arm/ iommufd/ riscv/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 6386fa4556d9..1bef5d55b2f9 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -46,13 +46,15 @@ extern int amd_iommu_gpt_level; extern unsigned long amd_iommu_pgsize_bitmap; /* Protection domain ops */ +void amd_iommu_init_identity_domain(void); struct protection_domain *protection_domain_alloc(unsigned int type, int nid); void protection_domain_free(struct protection_domain *domain); struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, struct mm_struct *mm); void amd_iommu_domain_free(struct iommu_domain *dom); int iommu_sva_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid); + struct device *dev, ioasid_t pasid, + struct iommu_domain *old); void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct iommu_domain *domain); @@ -118,9 +120,14 @@ static inline bool check_feature2(u64 mask) return (amd_iommu_efr2 & mask); } +static inline bool amd_iommu_v2_pgtbl_supported(void) +{ + return (check_feature(FEATURE_GIOSUP) && check_feature(FEATURE_GT)); +} + static inline bool amd_iommu_gt_ppr_supported(void) { - return (check_feature(FEATURE_GT) && + return (amd_iommu_v2_pgtbl_supported() && check_feature(FEATURE_PPR) && check_feature(FEATURE_EPHSUP)); } diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 601fb4ee6900..fdb0357e0bb9 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -565,6 +565,12 @@ struct pdom_dev_data { struct list_head list; }; +/* Keeps track of the IOMMUs attached to protection domain */ +struct pdom_iommu_info { + struct amd_iommu *iommu; /* IOMMUs attach to protection domain */ + u32 refcnt; /* Count of attached dev/pasid per domain/IOMMU */ +}; + /* * This structure contains generic data for IOMMU protection domains * independent of their use. @@ -578,8 +584,7 @@ struct protection_domain { u16 id; /* the domain id written to the device table */ enum protection_domain_mode pd_mode; /* Track page table type */ bool dirty_tracking; /* dirty tracking is enabled in the domain */ - unsigned dev_cnt; /* devices assigned to this domain */ - unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ + struct xarray iommu_array; /* per-IOMMU reference count */ struct mmu_notifier mn; /* mmu notifier for the SVA domain */ struct list_head dev_data_list; /* List of pdom_dev_data */ @@ -831,7 +836,7 @@ struct devid_map { */ struct iommu_dev_data { /*Protect against attach/detach races */ - spinlock_t lock; + struct mutex mutex; struct list_head list; /* For domain->dev_list */ struct llist_node dev_data_list; /* For global dev_data_list */ @@ -873,12 +878,6 @@ extern struct list_head amd_iommu_pci_seg_list; extern struct list_head amd_iommu_list; /* - * Array with pointers to each IOMMU struct - * The indices are referenced in the protection domains - */ -extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; - -/* * Structure defining one entry in the device table */ struct dev_table_entry { @@ -912,14 +911,14 @@ struct unity_map_entry { /* size of the dma_ops aperture as power of 2 */ extern unsigned amd_iommu_aperture_order; -/* allocation bitmap for domain ids */ -extern unsigned long *amd_iommu_pd_alloc_bitmap; - extern bool amd_iommu_force_isolation; /* Max levels of glxval supported */ extern int amd_iommu_max_glx_val; +/* IDA to track protection domain IDs */ +extern struct ida pdom_ids; + /* Global EFR and EFR2 registers */ extern u64 amd_iommu_efr; extern u64 amd_iommu_efr2; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 43131c3a2172..0e0a531042ac 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -177,9 +177,6 @@ LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ -/* Array to assign indices to IOMMUs*/ -struct amd_iommu *amd_iommus[MAX_IOMMUS]; - /* Number of IOMMUs present in the system */ static int amd_iommus_present; @@ -194,12 +191,6 @@ bool amd_iommu_force_isolation __read_mostly; unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES; -/* - * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap - * to know which ones are already in use. - */ -unsigned long *amd_iommu_pd_alloc_bitmap; - enum iommu_init_state { IOMMU_START_STATE, IOMMU_IVRS_DETECTED, @@ -1082,7 +1073,12 @@ static bool __copy_device_table(struct amd_iommu *iommu) if (dte_v && dom_id) { pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; - __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); + /* Reserve the Domain IDs used by previous kernel */ + if (ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_ATOMIC) != dom_id) { + pr_err("Failed to reserve domain ID 0x%x\n", dom_id); + memunmap(old_devtb); + return false; + } /* If gcr3 table existed, mask it out */ if (old_devtb[devid].data[0] & DTE_FLAG_GV) { tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; @@ -1744,9 +1740,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, return -ENOSYS; } - /* Index is fine - add IOMMU to the array */ - amd_iommus[iommu->index] = iommu; - /* * Copy data from ACPI table entry to the iommu struct */ @@ -2070,14 +2063,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) init_iommu_perf_ctr(iommu); - if (amd_iommu_pgtable == AMD_IOMMU_V2) { - if (!check_feature(FEATURE_GIOSUP) || - !check_feature(FEATURE_GT)) { - pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); - amd_iommu_pgtable = AMD_IOMMU_V1; - } - } - if (is_rd890_iommu(iommu->dev)) { int i, j; @@ -2172,6 +2157,9 @@ static int __init amd_iommu_init_pci(void) struct amd_iommu_pci_seg *pci_seg; int ret; + /* Init global identity domain before registering IOMMU */ + amd_iommu_init_identity_domain(); + for_each_iommu(iommu) { ret = iommu_init_pci(iommu); if (ret) { @@ -2882,11 +2870,6 @@ static void enable_iommus_vapic(void) #endif } -static void enable_iommus(void) -{ - early_enable_iommus(); -} - static void disable_iommus(void) { struct amd_iommu *iommu; @@ -2913,7 +2896,8 @@ static void amd_iommu_resume(void) iommu_apply_resume_quirks(iommu); /* re-load the hardware */ - enable_iommus(); + for_each_iommu(iommu) + early_enable_iommu(iommu); amd_iommu_enable_interrupts(); } @@ -2994,9 +2978,7 @@ static bool __init check_ioapic_information(void) static void __init free_dma_resources(void) { - iommu_free_pages(amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID / 8)); - amd_iommu_pd_alloc_bitmap = NULL; + ida_destroy(&pdom_ids); free_unity_maps(); } @@ -3064,20 +3046,6 @@ static int __init early_amd_iommu_init(void) amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); - /* Device table - directly used by all IOMMUs */ - ret = -ENOMEM; - - amd_iommu_pd_alloc_bitmap = iommu_alloc_pages(GFP_KERNEL, - get_order(MAX_DOMAIN_ID / 8)); - if (amd_iommu_pd_alloc_bitmap == NULL) - goto out; - - /* - * never allocate domain 0 because its used as the non-allocated and - * error value placeholder - */ - __set_bit(0, amd_iommu_pd_alloc_bitmap); - /* * now the data structures are allocated and basically initialized * start the real acpi table scan @@ -3091,6 +3059,13 @@ static int __init early_amd_iommu_init(void) FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; + if (amd_iommu_pgtable == AMD_IOMMU_V2) { + if (!amd_iommu_v2_pgtbl_supported()) { + pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); + amd_iommu_pgtable = AMD_IOMMU_V1; + } + } + /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 804b788f3f16..f3399087859f 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -118,6 +118,7 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist) */ static bool increase_address_space(struct amd_io_pgtable *pgtable, unsigned long address, + unsigned int page_size_level, gfp_t gfp) { struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; @@ -133,7 +134,8 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, spin_lock_irqsave(&domain->lock, flags); - if (address <= PM_LEVEL_SIZE(pgtable->mode)) + if (address <= PM_LEVEL_SIZE(pgtable->mode) && + pgtable->mode - 1 >= page_size_level) goto out; ret = false; @@ -163,18 +165,21 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, gfp_t gfp, bool *updated) { + unsigned long last_addr = address + (page_size - 1); struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; int level, end_lvl; u64 *pte, *page; BUG_ON(!is_power_of_2(page_size)); - while (address > PM_LEVEL_SIZE(pgtable->mode)) { + while (last_addr > PM_LEVEL_SIZE(pgtable->mode) || + pgtable->mode - 1 < PAGE_SIZE_LEVEL(page_size)) { /* * Return an error if there is no memory to update the * page-table. */ - if (!increase_address_space(pgtable, address, gfp)) + if (!increase_address_space(pgtable, last_addr, + PAGE_SIZE_LEVEL(page_size), gfp)) return NULL; } diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index 25b9042fa453..c616de2c5926 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -268,8 +268,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, out: if (updated) { struct protection_domain *pdom = io_pgtable_ops_to_domain(ops); + unsigned long flags; + spin_lock_irqsave(&pdom->lock, flags); amd_iommu_domain_flush_pages(pdom, o_iova, size); + spin_unlock_irqrestore(&pdom->lock, flags); } if (mapped) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 8364cd6fa47d..3f691e1fd22c 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -18,6 +18,7 @@ #include <linux/scatterlist.h> #include <linux/dma-map-ops.h> #include <linux/dma-direct.h> +#include <linux/idr.h> #include <linux/iommu-helper.h> #include <linux/delay.h> #include <linux/amd-iommu.h> @@ -52,8 +53,6 @@ #define HT_RANGE_START (0xfd00000000ULL) #define HT_RANGE_END (0xffffffffffULL) -static DEFINE_SPINLOCK(pd_bitmap_lock); - LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); @@ -70,9 +69,16 @@ struct iommu_cmd { u32 data[4]; }; +/* + * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap + * to know which ones are already in use. + */ +DEFINE_IDA(pdom_ids); + struct kmem_cache *amd_iommu_irq_cache; -static void detach_device(struct device *dev); +static int amd_iommu_attach_device(struct iommu_domain *dom, + struct device *dev); static void set_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data); @@ -202,7 +208,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) if (!dev_data) return NULL; - spin_lock_init(&dev_data->lock); + mutex_init(&dev_data->mutex); dev_data->devid = devid; ratelimit_default_init(&dev_data->rs); @@ -555,22 +561,6 @@ static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) setup_aliases(iommu, dev); } -static void amd_iommu_uninit_device(struct device *dev) -{ - struct iommu_dev_data *dev_data; - - dev_data = dev_iommu_priv_get(dev); - if (!dev_data) - return; - - if (dev_data->domain) - detach_device(dev); - - /* - * We keep dev_data around for unplugged devices and reuse it when the - * device is re-plugged - not doing so would introduce a ton of races. - */ -} /**************************************************************************** * @@ -1230,7 +1220,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) if (!iommu->need_sync) return 0; - data = atomic64_add_return(1, &iommu->cmd_sem_val); + data = atomic64_inc_return(&iommu->cmd_sem_val); build_completion_wait(&cmd, iommu, data); raw_spin_lock_irqsave(&iommu->lock, flags); @@ -1249,18 +1239,17 @@ out_unlock: static void domain_flush_complete(struct protection_domain *domain) { - int i; + struct pdom_iommu_info *pdom_iommu_info; + unsigned long i; - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain && !domain->dev_iommu[i]) - continue; + lockdep_assert_held(&domain->lock); - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); - } + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + xa_for_each(&domain->iommu_array, i, pdom_iommu_info) + iommu_completion_wait(pdom_iommu_info->iommu); } static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) @@ -1442,21 +1431,22 @@ static int domain_flush_pages_v2(struct protection_domain *pdom, static int domain_flush_pages_v1(struct protection_domain *pdom, u64 address, size_t size) { + struct pdom_iommu_info *pdom_iommu_info; struct iommu_cmd cmd; - int ret = 0, i; + int ret = 0; + unsigned long i; + + lockdep_assert_held(&pdom->lock); build_inv_iommu_pages(&cmd, address, size, pdom->id, IOMMU_NO_PASID, false); - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (!pdom->dev_iommu[i]) - continue; - + xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) { /* * Devices of this domain are behind this IOMMU * We need a TLB flush */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); + ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); } return ret; @@ -1495,6 +1485,8 @@ static void __domain_flush_pages(struct protection_domain *domain, void amd_iommu_domain_flush_pages(struct protection_domain *domain, u64 address, size_t size) { + lockdep_assert_held(&domain->lock); + if (likely(!amd_iommu_np_cache)) { __domain_flush_pages(domain, address, size); @@ -1640,31 +1632,14 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag) * ****************************************************************************/ -static u16 domain_id_alloc(void) +static int pdom_id_alloc(void) { - unsigned long flags; - int id; - - spin_lock_irqsave(&pd_bitmap_lock, flags); - id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); - BUG_ON(id == 0); - if (id > 0 && id < MAX_DOMAIN_ID) - __set_bit(id, amd_iommu_pd_alloc_bitmap); - else - id = 0; - spin_unlock_irqrestore(&pd_bitmap_lock, flags); - - return id; + return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); } -static void domain_id_free(int id) +static void pdom_id_free(int id) { - unsigned long flags; - - spin_lock_irqsave(&pd_bitmap_lock, flags); - if (id > 0 && id < MAX_DOMAIN_ID) - __clear_bit(id, amd_iommu_pd_alloc_bitmap); - spin_unlock_irqrestore(&pd_bitmap_lock, flags); + ida_free(&pdom_ids, id); } static void free_gcr3_tbl_level1(u64 *tbl) @@ -1709,7 +1684,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) gcr3_info->glx = 0; /* Free per device domain ID */ - domain_id_free(gcr3_info->domid); + pdom_id_free(gcr3_info->domid); iommu_free_page(gcr3_info->gcr3_tbl); gcr3_info->gcr3_tbl = NULL; @@ -1736,6 +1711,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, { int levels = get_gcr3_levels(pasids); int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; + int domid; if (levels > amd_iommu_max_glx_val) return -EINVAL; @@ -1744,11 +1720,14 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, return -EBUSY; /* Allocate per device domain ID */ - gcr3_info->domid = domain_id_alloc(); + domid = pdom_id_alloc(); + if (domid <= 0) + return -ENOSPC; + gcr3_info->domid = domid; gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC); if (gcr3_info->gcr3_tbl == NULL) { - domain_id_free(gcr3_info->domid); + pdom_id_free(domid); return -ENOMEM; } @@ -2019,57 +1998,69 @@ static void destroy_gcr3_table(struct iommu_dev_data *dev_data, free_gcr3_table(gcr3_info); } -static int do_attach(struct iommu_dev_data *dev_data, - struct protection_domain *domain) +static int pdom_attach_iommu(struct amd_iommu *iommu, + struct protection_domain *pdom) { - struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); - struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg; + struct pdom_iommu_info *pdom_iommu_info, *curr; + struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg; + unsigned long flags; int ret = 0; - /* Update data structures */ - dev_data->domain = domain; - list_add(&dev_data->list, &domain->dev_list); + spin_lock_irqsave(&pdom->lock, flags); - /* Update NUMA Node ID */ - if (cfg->amd.nid == NUMA_NO_NODE) - cfg->amd.nid = dev_to_node(dev_data->dev); + pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); + if (pdom_iommu_info) { + pdom_iommu_info->refcnt++; + goto out_unlock; + } - /* Do reference counting */ - domain->dev_iommu[iommu->index] += 1; - domain->dev_cnt += 1; + pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC); + if (!pdom_iommu_info) { + ret = -ENOMEM; + goto out_unlock; + } - /* Setup GCR3 table */ - if (pdom_is_sva_capable(domain)) { - ret = init_gcr3_table(dev_data, domain); - if (ret) - return ret; + pdom_iommu_info->iommu = iommu; + pdom_iommu_info->refcnt = 1; + + curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, + NULL, pdom_iommu_info, GFP_ATOMIC); + if (curr) { + kfree(pdom_iommu_info); + ret = -ENOSPC; + goto out_unlock; } + /* Update NUMA Node ID */ + if (cfg->amd.nid == NUMA_NO_NODE) + cfg->amd.nid = dev_to_node(&iommu->dev->dev); + +out_unlock: + spin_unlock_irqrestore(&pdom->lock, flags); return ret; } -static void do_detach(struct iommu_dev_data *dev_data) +static void pdom_detach_iommu(struct amd_iommu *iommu, + struct protection_domain *pdom) { - struct protection_domain *domain = dev_data->domain; - struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); - - /* Clear DTE and flush the entry */ - dev_update_dte(dev_data, false); + struct pdom_iommu_info *pdom_iommu_info; + unsigned long flags; - /* Flush IOTLB and wait for the flushes to finish */ - amd_iommu_domain_flush_all(domain); + spin_lock_irqsave(&pdom->lock, flags); - /* Clear GCR3 table */ - if (pdom_is_sva_capable(domain)) - destroy_gcr3_table(dev_data, domain); + pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); + if (!pdom_iommu_info) { + spin_unlock_irqrestore(&pdom->lock, flags); + return; + } - /* Update data structures */ - dev_data->domain = NULL; - list_del(&dev_data->list); + pdom_iommu_info->refcnt--; + if (pdom_iommu_info->refcnt == 0) { + xa_erase(&pdom->iommu_array, iommu->index); + kfree(pdom_iommu_info); + } - /* decrease reference counters - needs to happen after the flushes */ - domain->dev_iommu[iommu->index] -= 1; - domain->dev_cnt -= 1; + spin_unlock_irqrestore(&pdom->lock, flags); } /* @@ -2079,27 +2070,56 @@ static void do_detach(struct iommu_dev_data *dev_data) static int attach_device(struct device *dev, struct protection_domain *domain) { - struct iommu_dev_data *dev_data; - unsigned long flags; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct pci_dev *pdev; int ret = 0; - spin_lock_irqsave(&domain->lock, flags); - - dev_data = dev_iommu_priv_get(dev); - - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); if (dev_data->domain != NULL) { ret = -EBUSY; goto out; } - ret = do_attach(dev_data, domain); + /* Do reference counting */ + ret = pdom_attach_iommu(iommu, domain); + if (ret) + goto out; -out: - spin_unlock(&dev_data->lock); + /* Setup GCR3 table */ + if (pdom_is_sva_capable(domain)) { + ret = init_gcr3_table(dev_data, domain); + if (ret) { + pdom_detach_iommu(iommu, domain); + goto out; + } + } - spin_unlock_irqrestore(&domain->lock, flags); + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); + + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } + + /* Update data structures */ + dev_data->domain = domain; + list_add(&dev_data->list, &domain->dev_list); + + /* Update device table */ + dev_update_dte(dev_data, true); + +out: + mutex_unlock(&dev_data->mutex); return ret; } @@ -2110,14 +2130,11 @@ out: static void detach_device(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct protection_domain *domain = dev_data->domain; unsigned long flags; - bool ppr = dev_data->ppr; - - spin_lock_irqsave(&domain->lock, flags); - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); /* * First check if the device is still attached. It might already @@ -2128,27 +2145,36 @@ static void detach_device(struct device *dev) if (WARN_ON(!dev_data->domain)) goto out; - if (ppr) { + /* Remove IOPF handler */ + if (dev_data->ppr) { iopf_queue_flush_dev(dev); - - /* Updated here so that it gets reflected in DTE */ - dev_data->ppr = false; + amd_iommu_iopf_remove_device(iommu, dev_data); } - do_detach(dev_data); + if (dev_is_pci(dev)) + pdev_disable_caps(to_pci_dev(dev)); -out: - spin_unlock(&dev_data->lock); + /* Clear DTE and flush the entry */ + dev_update_dte(dev_data, false); + /* Flush IOTLB and wait for the flushes to finish */ + spin_lock_irqsave(&domain->lock, flags); + amd_iommu_domain_flush_all(domain); spin_unlock_irqrestore(&domain->lock, flags); - /* Remove IOPF handler */ - if (ppr) - amd_iommu_iopf_remove_device(iommu, dev_data); + /* Clear GCR3 table */ + if (pdom_is_sva_capable(domain)) + destroy_gcr3_table(dev_data, domain); - if (dev_is_pci(dev)) - pdev_disable_caps(to_pci_dev(dev)); + /* Update data structures */ + dev_data->domain = NULL; + list_del(&dev_data->list); + + /* decrease reference counters - needs to happen after the flushes */ + pdom_detach_iommu(iommu, domain); +out: + mutex_unlock(&dev_data->mutex); } static struct iommu_device *amd_iommu_probe_device(struct device *dev) @@ -2205,17 +2231,14 @@ out_err: static void amd_iommu_release_device(struct device *dev) { - struct amd_iommu *iommu; - - if (!check_device(dev)) - return; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - iommu = rlookup_amd_iommu(dev); - if (!iommu) - return; + WARN_ON(dev_data->domain); - amd_iommu_uninit_device(dev); - iommu_completion_wait(iommu); + /* + * We keep dev_data around for unplugged devices and reuse it when the + * device is re-plugged - not doing so would introduce a ton of races. + */ } static struct iommu_group *amd_iommu_device_group(struct device *dev) @@ -2236,70 +2259,53 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) * *****************************************************************************/ -static void cleanup_domain(struct protection_domain *domain) -{ - struct iommu_dev_data *entry; - - lockdep_assert_held(&domain->lock); - - if (!domain->dev_cnt) - return; - - while (!list_empty(&domain->dev_list)) { - entry = list_first_entry(&domain->dev_list, - struct iommu_dev_data, list); - BUG_ON(!entry->domain); - do_detach(entry); - } - WARN_ON(domain->dev_cnt != 0); -} - void protection_domain_free(struct protection_domain *domain) { WARN_ON(!list_empty(&domain->dev_list)); if (domain->domain.type & __IOMMU_DOMAIN_PAGING) free_io_pgtable_ops(&domain->iop.pgtbl.ops); - domain_id_free(domain->id); + pdom_id_free(domain->id); kfree(domain); } +static void protection_domain_init(struct protection_domain *domain, int nid) +{ + spin_lock_init(&domain->lock); + INIT_LIST_HEAD(&domain->dev_list); + INIT_LIST_HEAD(&domain->dev_data_list); + xa_init(&domain->iommu_array); + domain->iop.pgtbl.cfg.amd.nid = nid; +} + struct protection_domain *protection_domain_alloc(unsigned int type, int nid) { - struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable; + int domid; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; - domain->id = domain_id_alloc(); - if (!domain->id) - goto err_free; + domid = pdom_id_alloc(); + if (domid <= 0) { + kfree(domain); + return NULL; + } + domain->id = domid; - spin_lock_init(&domain->lock); - INIT_LIST_HEAD(&domain->dev_list); - INIT_LIST_HEAD(&domain->dev_data_list); - domain->iop.pgtbl.cfg.amd.nid = nid; + protection_domain_init(domain, nid); + + return domain; +} + +static int pdom_setup_pgtable(struct protection_domain *domain, + unsigned int type, int pgtable) +{ + struct io_pgtable_ops *pgtbl_ops; - switch (type) { /* No need to allocate io pgtable ops in passthrough mode */ - case IOMMU_DOMAIN_IDENTITY: - case IOMMU_DOMAIN_SVA: - return domain; - case IOMMU_DOMAIN_DMA: - pgtable = amd_iommu_pgtable; - break; - /* - * Force IOMMU v1 page table when allocating - * domain for pass-through devices. - */ - case IOMMU_DOMAIN_UNMANAGED: - pgtable = AMD_IOMMU_V1; - break; - default: - goto err_id; - } + if (!(type & __IOMMU_DOMAIN_PAGING)) + return 0; switch (pgtable) { case AMD_IOMMU_V1: @@ -2309,25 +2315,20 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) domain->pd_mode = PD_MODE_V2; break; default: - goto err_id; + return -EINVAL; } pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) - goto err_id; + return -ENOMEM; - return domain; -err_id: - domain_id_free(domain->id); -err_free: - kfree(domain); - return NULL; + return 0; } -static inline u64 dma_max_address(void) +static inline u64 dma_max_address(int pgtable) { - if (amd_iommu_pgtable == AMD_IOMMU_V1) + if (pgtable == AMD_IOMMU_V1) return ~0ULL; /* V2 with 4/5 level page table */ @@ -2340,11 +2341,13 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu) } static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, - struct device *dev, u32 flags) + struct device *dev, + u32 flags, int pgtable) { bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; struct amd_iommu *iommu = NULL; + int ret; if (dev) iommu = get_amd_iommu_from_dev(dev); @@ -2356,16 +2359,20 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) return ERR_PTR(-EINVAL); - if (dirty_tracking && !amd_iommu_hd_support(iommu)) - return ERR_PTR(-EOPNOTSUPP); - domain = protection_domain_alloc(type, dev ? dev_to_node(dev) : NUMA_NO_NODE); if (!domain) return ERR_PTR(-ENOMEM); + ret = pdom_setup_pgtable(domain, type, pgtable); + if (ret) { + pdom_id_free(domain->id); + kfree(domain); + return ERR_PTR(ret); + } + domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = dma_max_address(); + domain->domain.geometry.aperture_end = dma_max_address(pgtable); domain->domain.geometry.force_aperture = true; domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; @@ -2383,8 +2390,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) { struct iommu_domain *domain; + int pgtable = amd_iommu_pgtable; - domain = do_iommu_domain_alloc(type, NULL, 0); + /* + * Force IOMMU v1 page table when allocating + * domain for pass-through devices. + */ + if (type == IOMMU_DOMAIN_UNMANAGED) + pgtable = AMD_IOMMU_V1; + + domain = do_iommu_domain_alloc(type, NULL, 0, pgtable); if (IS_ERR(domain)) return NULL; @@ -2392,31 +2407,46 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) } static struct iommu_domain * -amd_iommu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { unsigned int type = IOMMU_DOMAIN_UNMANAGED; + struct amd_iommu *iommu = NULL; + const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_PASID; - if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data) + if (dev) + iommu = get_amd_iommu_from_dev(dev); + + if ((flags & ~supported_flags) || user_data) return ERR_PTR(-EOPNOTSUPP); - return do_iommu_domain_alloc(type, dev, flags); -} + /* Allocate domain with v2 page table if IOMMU supports PASID. */ + if (flags & IOMMU_HWPT_ALLOC_PASID) { + if (!amd_iommu_pasid_supported()) + return ERR_PTR(-EOPNOTSUPP); -void amd_iommu_domain_free(struct iommu_domain *dom) -{ - struct protection_domain *domain; - unsigned long flags; + return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2); + } - domain = to_pdomain(dom); + /* Allocate domain with v1 page table for dirty tracking */ + if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) { + if (iommu && amd_iommu_hd_support(iommu)) { + return do_iommu_domain_alloc(type, dev, + flags, AMD_IOMMU_V1); + } - spin_lock_irqsave(&domain->lock, flags); + return ERR_PTR(-EOPNOTSUPP); + } - cleanup_domain(domain); + /* If nothing specific is required use the kernel commandline default */ + return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable); +} - spin_unlock_irqrestore(&domain->lock, flags); +void amd_iommu_domain_free(struct iommu_domain *dom) +{ + struct protection_domain *domain = to_pdomain(dom); protection_domain_free(domain); } @@ -2430,9 +2460,9 @@ static int blocked_domain_attach_device(struct iommu_domain *domain, detach_device(dev); /* Clear DTE and flush the entry */ - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); dev_update_dte(dev_data, false); - spin_unlock(&dev_data->lock); + mutex_unlock(&dev_data->mutex); return 0; } @@ -2444,13 +2474,39 @@ static struct iommu_domain blocked_domain = { } }; +static struct protection_domain identity_domain; + +static const struct iommu_domain_ops identity_domain_ops = { + .attach_dev = amd_iommu_attach_device, +}; + +void amd_iommu_init_identity_domain(void) +{ + struct iommu_domain *domain = &identity_domain.domain; + + domain->type = IOMMU_DOMAIN_IDENTITY; + domain->ops = &identity_domain_ops; + domain->owner = &amd_iommu_ops; + + identity_domain.id = pdom_id_alloc(); + + protection_domain_init(&identity_domain, NUMA_NO_NODE); +} + +/* Same as blocked domain except it supports only ops->attach_dev() */ +static struct iommu_domain release_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocked_domain_attach_device, + } +}; + static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); - struct pci_dev *pdev; int ret; /* @@ -2483,24 +2539,6 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; - if (pdev && pdom_is_sva_capable(domain)) { - pdev_enable_caps(pdev); - - /* - * Device can continue to function even if IOPF - * enablement failed. Hence in error path just - * disable device PRI support. - */ - if (amd_iommu_iopf_add_device(iommu, dev_data)) - pdev_disable_cap_pri(pdev); - } else if (pdev) { - pdev_enable_cap_ats(pdev); - } - - /* Update device table */ - dev_update_dte(dev_data, true); - return ret; } @@ -2842,8 +2880,10 @@ static int amd_iommu_dev_disable_feature(struct device *dev, const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .blocked_domain = &blocked_domain, + .release_domain = &release_domain, + .identity_domain = &identity_domain.domain, .domain_alloc = amd_iommu_domain_alloc, - .domain_alloc_user = amd_iommu_domain_alloc_user, + .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, .domain_alloc_sva = amd_iommu_domain_alloc_sva, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, @@ -2890,7 +2930,7 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) return; build_inv_irt(&cmd, devid); - data = atomic64_add_return(1, &iommu->cmd_sem_val); + data = atomic64_inc_return(&iommu->cmd_sem_val); build_completion_wait(&cmd2, iommu, data); raw_spin_lock_irqsave(&iommu->lock, flags); diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c index 0657b9373be5..8c73a30c2800 100644 --- a/drivers/iommu/amd/pasid.c +++ b/drivers/iommu/amd/pasid.c @@ -100,7 +100,8 @@ static const struct mmu_notifier_ops sva_mn = { }; int iommu_sva_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) { struct pdom_dev_data *pdom_dev_data; struct protection_domain *sva_pdom = to_pdomain(domain); @@ -108,6 +109,9 @@ int iommu_sva_set_dev_pasid(struct iommu_domain *domain, unsigned long flags; int ret = -EINVAL; + if (old) + return -EOPNOTSUPP; + /* PASID zero is used for requests from the I/O device without PASID */ if (!is_pasid_valid(dev_data, pasid)) return ret; diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile index dc98c88b48c8..493a659cc66b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/Makefile +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o arm_smmu_v3-y := arm-smmu-v3.o +arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_IOMMUFD) += arm-smmu-v3-iommufd.o arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o arm_smmu_v3-$(CONFIG_TEGRA241_CMDQV) += tegra241-cmdqv.o diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c new file mode 100644 index 000000000000..6cc14d82399f --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES + */ + +#include <uapi/linux/iommufd.h> + +#include "arm-smmu-v3.h" + +void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct iommu_hw_info_arm_smmuv3 *info; + u32 __iomem *base_idr; + unsigned int i; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + base_idr = master->smmu->base + ARM_SMMU_IDR0; + for (i = 0; i <= 5; i++) + info->idr[i] = readl_relaxed(base_idr + i); + info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR); + info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR); + + *length = sizeof(*info); + *type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3; + + return info; +} + +static void arm_smmu_make_nested_cd_table_ste( + struct arm_smmu_ste *target, struct arm_smmu_master *master, + struct arm_smmu_nested_domain *nested_domain, bool ats_enabled) +{ + arm_smmu_make_s2_domain_ste( + target, master, nested_domain->vsmmu->s2_parent, ats_enabled); + + target->data[0] = cpu_to_le64(STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, + STRTAB_STE_0_CFG_NESTED)); + target->data[0] |= nested_domain->ste[0] & + ~cpu_to_le64(STRTAB_STE_0_CFG); + target->data[1] |= nested_domain->ste[1]; +} + +/* + * Create a physical STE from the virtual STE that userspace provided when it + * created the nested domain. Using the vSTE userspace can request: + * - Non-valid STE + * - Abort STE + * - Bypass STE (install the S2, no CD table) + * - CD table STE (install the S2 and the userspace CD table) + */ +static void arm_smmu_make_nested_domain_ste( + struct arm_smmu_ste *target, struct arm_smmu_master *master, + struct arm_smmu_nested_domain *nested_domain, bool ats_enabled) +{ + unsigned int cfg = + FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0])); + + /* + * Userspace can request a non-valid STE through the nesting interface. + * We relay that into an abort physical STE with the intention that + * C_BAD_STE for this SID can be generated to userspace. + */ + if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) + cfg = STRTAB_STE_0_CFG_ABORT; + + switch (cfg) { + case STRTAB_STE_0_CFG_S1_TRANS: + arm_smmu_make_nested_cd_table_ste(target, master, nested_domain, + ats_enabled); + break; + case STRTAB_STE_0_CFG_BYPASS: + arm_smmu_make_s2_domain_ste(target, master, + nested_domain->vsmmu->s2_parent, + ats_enabled); + break; + case STRTAB_STE_0_CFG_ABORT: + default: + arm_smmu_make_abort_ste(target); + break; + } +} + +static int arm_smmu_attach_dev_nested(struct iommu_domain *domain, + struct device *dev) +{ + struct arm_smmu_nested_domain *nested_domain = + to_smmu_nested_domain(domain); + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_attach_state state = { + .master = master, + .old_domain = iommu_get_domain_for_dev(dev), + .ssid = IOMMU_NO_PASID, + }; + struct arm_smmu_ste ste; + int ret; + + if (nested_domain->vsmmu->smmu != master->smmu) + return -EINVAL; + if (arm_smmu_ssids_in_use(&master->cd_table)) + return -EBUSY; + + mutex_lock(&arm_smmu_asid_lock); + /* + * The VM has to control the actual ATS state at the PCI device because + * we forward the invalidations directly from the VM. If the VM doesn't + * think ATS is on it will not generate ATC flushes and the ATC will + * become incoherent. Since we can't access the actual virtual PCI ATS + * config bit here base this off the EATS value in the STE. If the EATS + * is set then the VM must generate ATC flushes. + */ + state.disable_ats = !nested_domain->enable_ats; + ret = arm_smmu_attach_prepare(&state, domain); + if (ret) { + mutex_unlock(&arm_smmu_asid_lock); + return ret; + } + + arm_smmu_make_nested_domain_ste(&ste, master, nested_domain, + state.ats_enabled); + arm_smmu_install_ste_for_dev(master, &ste); + arm_smmu_attach_commit(&state); + mutex_unlock(&arm_smmu_asid_lock); + return 0; +} + +static void arm_smmu_domain_nested_free(struct iommu_domain *domain) +{ + kfree(to_smmu_nested_domain(domain)); +} + +static const struct iommu_domain_ops arm_smmu_nested_ops = { + .attach_dev = arm_smmu_attach_dev_nested, + .free = arm_smmu_domain_nested_free, +}; + +static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg, + bool *enable_ats) +{ + unsigned int eats; + unsigned int cfg; + + if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) { + memset(arg->ste, 0, sizeof(arg->ste)); + return 0; + } + + /* EIO is reserved for invalid STE data. */ + if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) || + (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED)) + return -EIO; + + cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0])); + if (cfg != STRTAB_STE_0_CFG_ABORT && cfg != STRTAB_STE_0_CFG_BYPASS && + cfg != STRTAB_STE_0_CFG_S1_TRANS) + return -EIO; + + /* + * Only Full ATS or ATS UR is supported + * The EATS field will be set by arm_smmu_make_nested_domain_ste() + */ + eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1])); + arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS); + if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS) + return -EIO; + + if (cfg == STRTAB_STE_0_CFG_S1_TRANS) + *enable_ats = (eats == STRTAB_STE_1_EATS_TRANS); + return 0; +} + +static struct iommu_domain * +arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data) +{ + struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core); + const u32 SUPPORTED_FLAGS = IOMMU_HWPT_FAULT_ID_VALID; + struct arm_smmu_nested_domain *nested_domain; + struct iommu_hwpt_arm_smmuv3 arg; + bool enable_ats = false; + int ret; + + /* + * Faults delivered to the nested domain are faults that originated by + * the S1 in the domain. The core code will match all PASIDs when + * delivering the fault due to user_pasid_table + */ + if (flags & ~SUPPORTED_FLAGS) + return ERR_PTR(-EOPNOTSUPP); + + ret = iommu_copy_struct_from_user(&arg, user_data, + IOMMU_HWPT_DATA_ARM_SMMUV3, ste); + if (ret) + return ERR_PTR(ret); + + ret = arm_smmu_validate_vste(&arg, &enable_ats); + if (ret) + return ERR_PTR(ret); + + nested_domain = kzalloc(sizeof(*nested_domain), GFP_KERNEL_ACCOUNT); + if (!nested_domain) + return ERR_PTR(-ENOMEM); + + nested_domain->domain.type = IOMMU_DOMAIN_NESTED; + nested_domain->domain.ops = &arm_smmu_nested_ops; + nested_domain->enable_ats = enable_ats; + nested_domain->vsmmu = vsmmu; + nested_domain->ste[0] = arg.ste[0]; + nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS); + + return &nested_domain->domain; +} + +static int arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid) +{ + struct arm_smmu_master *master; + struct device *dev; + int ret = 0; + + xa_lock(&vsmmu->core.vdevs); + dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid); + if (!dev) { + ret = -EIO; + goto unlock; + } + master = dev_iommu_priv_get(dev); + + /* At this moment, iommufd only supports PCI device that has one SID */ + if (sid) + *sid = master->streams[0].id; +unlock: + xa_unlock(&vsmmu->core.vdevs); + return ret; +} + +/* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */ +struct arm_vsmmu_invalidation_cmd { + union { + u64 cmd[2]; + struct iommu_viommu_arm_smmuv3_invalidate ucmd; + }; +}; + +/* + * Convert, in place, the raw invalidation command into an internal format that + * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are + * stored in CPU endian. + * + * Enforce the VMID or SID on the command. + */ +static int arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu, + struct arm_vsmmu_invalidation_cmd *cmd) +{ + /* Commands are le64 stored in u64 */ + cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]); + cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]); + + switch (cmd->cmd[0] & CMDQ_0_OP) { + case CMDQ_OP_TLBI_NSNH_ALL: + /* Convert to NH_ALL */ + cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL | + FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); + cmd->cmd[1] = 0; + break; + case CMDQ_OP_TLBI_NH_VA: + case CMDQ_OP_TLBI_NH_VAA: + case CMDQ_OP_TLBI_NH_ALL: + case CMDQ_OP_TLBI_NH_ASID: + cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID; + cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); + break; + case CMDQ_OP_ATC_INV: + case CMDQ_OP_CFGI_CD: + case CMDQ_OP_CFGI_CD_ALL: { + u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]); + + if (arm_vsmmu_vsid_to_sid(vsmmu, vsid, &sid)) + return -EIO; + cmd->cmd[0] &= ~CMDQ_CFGI_0_SID; + cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid); + break; + } + default: + return -EIO; + } + return 0; +} + +static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu, + struct iommu_user_data_array *array) +{ + struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core); + struct arm_smmu_device *smmu = vsmmu->smmu; + struct arm_vsmmu_invalidation_cmd *last; + struct arm_vsmmu_invalidation_cmd *cmds; + struct arm_vsmmu_invalidation_cmd *cur; + struct arm_vsmmu_invalidation_cmd *end; + int ret; + + cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + cur = cmds; + end = cmds + array->entry_num; + + static_assert(sizeof(*cmds) == 2 * sizeof(u64)); + ret = iommu_copy_struct_from_full_user_array( + cmds, sizeof(*cmds), array, + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3); + if (ret) + goto out; + + last = cmds; + while (cur != end) { + ret = arm_vsmmu_convert_user_cmd(vsmmu, cur); + if (ret) + goto out; + + /* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */ + cur++; + if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1) + continue; + + /* FIXME always uses the main cmdq rather than trying to group by type */ + ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd, + cur - last, true); + if (ret) { + cur--; + goto out; + } + last = cur; + } +out: + array->entry_num = cur - cmds; + kfree(cmds); + return ret; +} + +static const struct iommufd_viommu_ops arm_vsmmu_ops = { + .alloc_domain_nested = arm_vsmmu_alloc_domain_nested, + .cache_invalidate = arm_vsmmu_cache_invalidate, +}; + +struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev, + struct iommu_domain *parent, + struct iommufd_ctx *ictx, + unsigned int viommu_type) +{ + struct arm_smmu_device *smmu = + iommu_get_iommu_dev(dev, struct arm_smmu_device, iommu); + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_domain *s2_parent = to_smmu_domain(parent); + struct arm_vsmmu *vsmmu; + + if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3) + return ERR_PTR(-EOPNOTSUPP); + + if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) + return ERR_PTR(-EOPNOTSUPP); + + if (s2_parent->smmu != master->smmu) + return ERR_PTR(-EINVAL); + + /* + * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW + * defect is needed to determine if arm_vsmmu_cache_invalidate() needs + * any change to remove this. + */ + if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) + return ERR_PTR(-EOPNOTSUPP); + + /* + * Must support some way to prevent the VM from bypassing the cache + * because VFIO currently does not do any cache maintenance. canwbs + * indicates the device is fully coherent and no cache maintenance is + * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make + * things non-coherent using the memattr, but No-Snoop behavior is not + * effected. + */ + if (!arm_smmu_master_canwbs(master) && + !(smmu->features & ARM_SMMU_FEAT_S2FWB)) + return ERR_PTR(-EOPNOTSUPP); + + vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core, + &arm_vsmmu_ops); + if (IS_ERR(vsmmu)) + return ERR_CAST(vsmmu); + + vsmmu->smmu = smmu; + vsmmu->s2_parent = s2_parent; + /* FIXME Move VMID allocation from the S2 domain allocation to here */ + vsmmu->vmid = s2_parent->s2_cfg.vmid; + + return &vsmmu->core; +} + +MODULE_IMPORT_NS(IOMMUFD); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index a7c36654dee5..1d3e71569775 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -332,7 +332,8 @@ void arm_smmu_sva_notifier_synchronize(void) } static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t id) + struct device *dev, ioasid_t id, + struct iommu_domain *old) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_master *master = dev_iommu_priv_get(dev); @@ -348,7 +349,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, * get reassigned */ arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); - ret = arm_smmu_set_pasid(master, smmu_domain, id, &target); + ret = arm_smmu_set_pasid(master, smmu_domain, id, &target, old); mmput(domain->mm); return ret; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 353fea58cd31..e4ebd9e12ad4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -295,6 +295,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); fallthrough; + case CMDQ_OP_TLBI_NH_ALL: case CMDQ_OP_TLBI_S12_VMALL: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); break; @@ -765,9 +766,9 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, * insert their own list of commands then all of the commands from one * CPU will appear before any of the commands from the other CPU. */ -static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, - struct arm_smmu_cmdq *cmdq, - u64 *cmds, int n, bool sync) +int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, u64 *cmds, int n, + bool sync) { u64 cmd_sync[CMDQ_ENT_DWORDS]; u32 prod; @@ -1045,7 +1046,8 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) /* S2 translates */ if (cfg & BIT(1)) { used_bits[1] |= - cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG); + cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS | + STRTAB_STE_1_SHCFG); used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR | STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI | @@ -1549,7 +1551,6 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, } } -VISIBLE_IF_KUNIT void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) { memset(target, 0, sizeof(*target)); @@ -1632,7 +1633,6 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, } EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste); -VISIBLE_IF_KUNIT void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, struct arm_smmu_master *master, struct arm_smmu_domain *smmu_domain, @@ -1655,6 +1655,8 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, FIELD_PREP(STRTAB_STE_1_EATS, ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_S2FWB) + target->data[1] |= cpu_to_le64(STRTAB_STE_1_S2FWB); if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); @@ -2105,7 +2107,16 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, if (!master->ats_enabled) continue; - arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, &cmd); + if (master_domain->nested_ats_flush) { + /* + * If a S2 used as a nesting parent is changed we have + * no option but to completely flush the ATC. + */ + arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); + } else { + arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, + &cmd); + } for (i = 0; i < master->num_streams; i++) { cmd.atc.sid = master->streams[i].id; @@ -2232,6 +2243,15 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, } __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); + if (smmu_domain->nest_parent) { + /* + * When the S2 domain changes all the nested S1 ASIDs have to be + * flushed too. + */ + cmd.opcode = CMDQ_OP_TLBI_NH_ALL; + arm_smmu_cmdq_issue_cmd_with_sync(smmu_domain->smmu, &cmd); + } + /* * Unfortunately, this can't be leaf-only since we may have * zapped an entire table. @@ -2293,6 +2313,8 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) case IOMMU_CAP_CACHE_COHERENCY: /* Assume that a coherent TCU implies coherent TBUs */ return master->smmu->features & ARM_SMMU_FEAT_COHERENCY; + case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: + return arm_smmu_master_canwbs(master); case IOMMU_CAP_NOEXEC: case IOMMU_CAP_DEFERRED_FLUSH: return true; @@ -2303,6 +2325,26 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) } } +static bool arm_smmu_enforce_cache_coherency(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_master_domain *master_domain; + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + if (!arm_smmu_master_canwbs(master_domain->master)) { + ret = false; + break; + } + } + smmu_domain->enforce_cache_coherency = ret; + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + return ret; +} + struct arm_smmu_domain *arm_smmu_domain_alloc(void) { struct arm_smmu_domain *smmu_domain; @@ -2442,6 +2484,9 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, pgtbl_cfg.oas = smmu->oas; fmt = ARM_64_LPAE_S2; finalise_stage_fn = arm_smmu_domain_finalise_s2; + if ((smmu->features & ARM_SMMU_FEAT_S2FWB) && + (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_ARM_S2FWB; break; default: return -EINVAL; @@ -2483,8 +2528,8 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) } } -static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, - const struct arm_smmu_ste *target) +void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, + const struct arm_smmu_ste *target) { int i, j; struct arm_smmu_device *smmu = master->smmu; @@ -2595,7 +2640,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master) static struct arm_smmu_master_domain * arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master *master, - ioasid_t ssid) + ioasid_t ssid, bool nested_ats_flush) { struct arm_smmu_master_domain *master_domain; @@ -2604,7 +2649,8 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) { if (master_domain->master == master && - master_domain->ssid == ssid) + master_domain->ssid == ssid && + master_domain->nested_ats_flush == nested_ats_flush) return master_domain; } return NULL; @@ -2624,6 +2670,8 @@ to_smmu_domain_devices(struct iommu_domain *domain) if ((domain->type & __IOMMU_DOMAIN_PAGING) || domain->type == IOMMU_DOMAIN_SVA) return to_smmu_domain(domain); + if (domain->type == IOMMU_DOMAIN_NESTED) + return to_smmu_nested_domain(domain)->vsmmu->s2_parent; return NULL; } @@ -2633,13 +2681,18 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, { struct arm_smmu_domain *smmu_domain = to_smmu_domain_devices(domain); struct arm_smmu_master_domain *master_domain; + bool nested_ats_flush = false; unsigned long flags; if (!smmu_domain) return; + if (domain->type == IOMMU_DOMAIN_NESTED) + nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats; + spin_lock_irqsave(&smmu_domain->devices_lock, flags); - master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid); + master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid, + nested_ats_flush); if (master_domain) { list_del(&master_domain->devices_elm); kfree(master_domain); @@ -2649,16 +2702,6 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); } -struct arm_smmu_attach_state { - /* Inputs */ - struct iommu_domain *old_domain; - struct arm_smmu_master *master; - bool cd_needs_ats; - ioasid_t ssid; - /* Resulting state */ - bool ats_enabled; -}; - /* * Start the sequence to attach a domain to a master. The sequence contains three * steps: @@ -2679,8 +2722,8 @@ struct arm_smmu_attach_state { * new_domain can be a non-paging domain. In this case ATS will not be enabled, * and invalidations won't be tracked. */ -static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, - struct iommu_domain *new_domain) +int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, + struct iommu_domain *new_domain) { struct arm_smmu_master *master = state->master; struct arm_smmu_master_domain *master_domain; @@ -2706,7 +2749,8 @@ static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, * enabled if we have arm_smmu_domain, those always have page * tables. */ - state->ats_enabled = arm_smmu_ats_supported(master); + state->ats_enabled = !state->disable_ats && + arm_smmu_ats_supported(master); } if (smmu_domain) { @@ -2715,6 +2759,9 @@ static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, return -ENOMEM; master_domain->master = master; master_domain->ssid = state->ssid; + if (new_domain->type == IOMMU_DOMAIN_NESTED) + master_domain->nested_ats_flush = + to_smmu_nested_domain(new_domain)->enable_ats; /* * During prepare we want the current smmu_domain and new @@ -2731,6 +2778,14 @@ static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, * one of them. */ spin_lock_irqsave(&smmu_domain->devices_lock, flags); + if (smmu_domain->enforce_cache_coherency && + !arm_smmu_master_canwbs(master)) { + spin_unlock_irqrestore(&smmu_domain->devices_lock, + flags); + kfree(master_domain); + return -EINVAL; + } + if (state->ats_enabled) atomic_inc(&smmu_domain->nr_ats_masters); list_add(&master_domain->devices_elm, &smmu_domain->devices); @@ -2754,7 +2809,7 @@ static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, * completes synchronizing the PCI device's ATC and finishes manipulating the * smmu_domain->devices list. */ -static void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) +void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) { struct arm_smmu_master *master = state->master; @@ -2856,7 +2911,8 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) } static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t id) + struct device *dev, ioasid_t id, + struct iommu_domain *old) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_master *master = dev_iommu_priv_get(dev); @@ -2882,7 +2938,7 @@ static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain, */ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); return arm_smmu_set_pasid(master, to_smmu_domain(domain), id, - &target_cd); + &target_cd, old); } static void arm_smmu_update_ste(struct arm_smmu_master *master, @@ -2912,16 +2968,13 @@ static void arm_smmu_update_ste(struct arm_smmu_master *master, int arm_smmu_set_pasid(struct arm_smmu_master *master, struct arm_smmu_domain *smmu_domain, ioasid_t pasid, - struct arm_smmu_cd *cd) + struct arm_smmu_cd *cd, struct iommu_domain *old) { struct iommu_domain *sid_domain = iommu_get_domain_for_dev(master->dev); struct arm_smmu_attach_state state = { .master = master, - /* - * For now the core code prevents calling this when a domain is - * already attached, no need to set old_domain. - */ .ssid = pasid, + .old_domain = old, }; struct arm_smmu_cd *cdptr; int ret; @@ -3079,24 +3132,37 @@ static struct iommu_domain arm_smmu_blocked_domain = { }; static struct iommu_domain * -arm_smmu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); - const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_PASID | + IOMMU_HWPT_ALLOC_NEST_PARENT; struct arm_smmu_domain *smmu_domain; int ret; if (flags & ~PAGING_FLAGS) return ERR_PTR(-EOPNOTSUPP); - if (parent || user_data) + if (user_data) return ERR_PTR(-EOPNOTSUPP); + if (flags & IOMMU_HWPT_ALLOC_PASID) + return arm_smmu_domain_alloc_paging(dev); + smmu_domain = arm_smmu_domain_alloc(); if (IS_ERR(smmu_domain)) return ERR_CAST(smmu_domain); + if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT) { + if (!(master->smmu->features & ARM_SMMU_FEAT_NESTING)) { + ret = -EOPNOTSUPP; + goto err_free; + } + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; + smmu_domain->nest_parent = true; + } + smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags); @@ -3378,21 +3444,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } -static int arm_smmu_enable_nesting(struct iommu_domain *domain) -{ - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - int ret = 0; - - mutex_lock(&smmu_domain->init_mutex); - if (smmu_domain->smmu) - ret = -EPERM; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S2; - mutex_unlock(&smmu_domain->init_mutex); - - return ret; -} - static int arm_smmu_of_xlate(struct device *dev, const struct of_phandle_args *args) { @@ -3491,9 +3542,10 @@ static struct iommu_ops arm_smmu_ops = { .identity_domain = &arm_smmu_identity_domain, .blocked_domain = &arm_smmu_blocked_domain, .capable = arm_smmu_capable, + .hw_info = arm_smmu_hw_info, .domain_alloc_paging = arm_smmu_domain_alloc_paging, .domain_alloc_sva = arm_smmu_sva_domain_alloc, - .domain_alloc_user = arm_smmu_domain_alloc_user, + .domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, @@ -3504,17 +3556,19 @@ static struct iommu_ops arm_smmu_ops = { .dev_disable_feat = arm_smmu_dev_disable_feature, .page_response = arm_smmu_page_response, .def_domain_type = arm_smmu_def_domain_type, + .viommu_alloc = arm_vsmmu_alloc, + .user_pasid_table = 1, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, + .enforce_cache_coherency = arm_smmu_enforce_cache_coherency, .set_dev_pasid = arm_smmu_s1_set_dev_pasid, .map_pages = arm_smmu_map_pages, .unmap_pages = arm_smmu_unmap_pages, .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, - .enable_nesting = arm_smmu_enable_nesting, .free = arm_smmu_domain_free_paging, } }; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 1e9952ca989f..0107d3f333a1 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -10,6 +10,7 @@ #include <linux/bitfield.h> #include <linux/iommu.h> +#include <linux/iommufd.h> #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/sizes.h> @@ -57,6 +58,7 @@ struct arm_smmu_device; #define IDR1_SIDSIZE GENMASK(5, 0) #define ARM_SMMU_IDR3 0xc +#define IDR3_FWB (1 << 8) #define IDR3_RIL (1 << 10) #define ARM_SMMU_IDR5 0x14 @@ -81,6 +83,8 @@ struct arm_smmu_device; #define IIDR_REVISION GENMASK(15, 12) #define IIDR_IMPLEMENTER GENMASK(11, 0) +#define ARM_SMMU_AIDR 0x1C + #define ARM_SMMU_CR0 0x20 #define CR0_ATSCHK (1 << 4) #define CR0_CMDQEN (1 << 3) @@ -241,6 +245,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid) #define STRTAB_STE_0_CFG_BYPASS 4 #define STRTAB_STE_0_CFG_S1_TRANS 5 #define STRTAB_STE_0_CFG_S2_TRANS 6 +#define STRTAB_STE_0_CFG_NESTED 7 #define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4) #define STRTAB_STE_0_S1FMT_LINEAR 0 @@ -261,6 +266,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid) #define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4) #define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6) +#define STRTAB_STE_1_S2FWB (1UL << 25) #define STRTAB_STE_1_S1STALLD (1UL << 27) #define STRTAB_STE_1_EATS GENMASK_ULL(29, 28) @@ -292,6 +298,15 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid) #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4) +/* These bits can be controlled by userspace for STRTAB_STE_0_CFG_NESTED */ +#define STRTAB_STE_0_NESTING_ALLOWED \ + cpu_to_le64(STRTAB_STE_0_V | STRTAB_STE_0_CFG | STRTAB_STE_0_S1FMT | \ + STRTAB_STE_0_S1CTXPTR_MASK | STRTAB_STE_0_S1CDMAX) +#define STRTAB_STE_1_NESTING_ALLOWED \ + cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR | \ + STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH | \ + STRTAB_STE_1_S1STALLD | STRTAB_STE_1_EATS) + /* * Context descriptors. * @@ -511,8 +526,10 @@ struct arm_smmu_cmdq_ent { }; } cfgi; + #define CMDQ_OP_TLBI_NH_ALL 0x10 #define CMDQ_OP_TLBI_NH_ASID 0x11 #define CMDQ_OP_TLBI_NH_VA 0x12 + #define CMDQ_OP_TLBI_NH_VAA 0x13 #define CMDQ_OP_TLBI_EL2_ALL 0x20 #define CMDQ_OP_TLBI_EL2_ASID 0x21 #define CMDQ_OP_TLBI_EL2_VA 0x22 @@ -726,6 +743,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20) #define ARM_SMMU_FEAT_HA (1 << 21) #define ARM_SMMU_FEAT_HD (1 << 22) +#define ARM_SMMU_FEAT_S2FWB (1 << 23) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) @@ -811,10 +829,20 @@ struct arm_smmu_domain { /* List of struct arm_smmu_master_domain */ struct list_head devices; spinlock_t devices_lock; + bool enforce_cache_coherency : 1; + bool nest_parent : 1; struct mmu_notifier mmu_notifier; }; +struct arm_smmu_nested_domain { + struct iommu_domain domain; + struct arm_vsmmu *vsmmu; + bool enable_ats : 1; + + __le64 ste[2]; +}; + /* The following are exposed for testing purposes. */ struct arm_smmu_entry_writer_ops; struct arm_smmu_entry_writer { @@ -827,21 +855,22 @@ struct arm_smmu_entry_writer_ops { void (*sync)(struct arm_smmu_entry_writer *writer); }; +void arm_smmu_make_abort_ste(struct arm_smmu_ste *target); +void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, + bool ats_enabled); + #if IS_ENABLED(CONFIG_KUNIT) void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits); void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur, const __le64 *target); void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits); -void arm_smmu_make_abort_ste(struct arm_smmu_ste *target); void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, struct arm_smmu_ste *target); void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, struct arm_smmu_master *master, bool ats_enabled, unsigned int s1dss); -void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, - struct arm_smmu_master *master, - struct arm_smmu_domain *smmu_domain, - bool ats_enabled); void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, struct arm_smmu_master *master, struct mm_struct *mm, u16 asid); @@ -851,6 +880,7 @@ struct arm_smmu_master_domain { struct list_head devices_elm; struct arm_smmu_master *master; ioasid_t ssid; + bool nested_ats_flush : 1; }; static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) @@ -858,6 +888,12 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) return container_of(dom, struct arm_smmu_domain, domain); } +static inline struct arm_smmu_nested_domain * +to_smmu_nested_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct arm_smmu_nested_domain, domain); +} + extern struct xarray arm_smmu_asid_xa; extern struct mutex arm_smmu_asid_lock; @@ -875,7 +911,7 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, int arm_smmu_set_pasid(struct arm_smmu_master *master, struct arm_smmu_domain *smmu_domain, ioasid_t pasid, - struct arm_smmu_cd *cd); + struct arm_smmu_cd *cd, struct iommu_domain *old); void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, @@ -893,6 +929,33 @@ int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, int arm_smmu_cmdq_init(struct arm_smmu_device *smmu, struct arm_smmu_cmdq *cmdq); +static inline bool arm_smmu_master_canwbs(struct arm_smmu_master *master) +{ + return dev_iommu_fwspec_get(master->dev)->flags & + IOMMU_FWSPEC_PCI_RC_CANWBS; +} + +struct arm_smmu_attach_state { + /* Inputs */ + struct iommu_domain *old_domain; + struct arm_smmu_master *master; + bool cd_needs_ats; + bool disable_ats; + ioasid_t ssid; + /* Resulting state */ + bool ats_enabled; +}; + +int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, + struct iommu_domain *new_domain); +void arm_smmu_attach_commit(struct arm_smmu_attach_state *state); +void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, + const struct arm_smmu_ste *target); + +int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, u64 *cmds, int n, + bool sync); + #ifdef CONFIG_ARM_SMMU_V3_SVA bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); @@ -949,4 +1012,23 @@ tegra241_cmdqv_probe(struct arm_smmu_device *smmu) return ERR_PTR(-ENODEV); } #endif /* CONFIG_TEGRA241_CMDQV */ + +struct arm_vsmmu { + struct iommufd_viommu core; + struct arm_smmu_device *smmu; + struct arm_smmu_domain *s2_parent; + u16 vmid; +}; + +#if IS_ENABLED(CONFIG_ARM_SMMU_V3_IOMMUFD) +void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type); +struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev, + struct iommu_domain *parent, + struct iommufd_ctx *ictx, + unsigned int viommu_type); +#else +#define arm_smmu_hw_info NULL +#define arm_vsmmu_alloc NULL +#endif /* CONFIG_ARM_SMMU_V3_IOMMUFD */ + #endif /* _ARM_SMMU_V3_H */ diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index fcd13d301fff..c8ec74f089f3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -509,7 +509,8 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq) snprintf(name, 16, "vcmdq%u", vcmdq->idx); - q->llq.max_n_shift = VCMDQ_LOG2SIZE_MAX; + /* Queue size, capped to ensure natural alignment */ + q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX); /* Use the common helper to init the VCMDQ, and then... */ ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0, @@ -800,7 +801,9 @@ out_fallback: return 0; } -struct dentry *cmdqv_debugfs_dir; +#ifdef CONFIG_IOMMU_DEBUGFS +static struct dentry *cmdqv_debugfs_dir; +#endif static struct arm_smmu_device * __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 8321962b3714..ade4684c14c9 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1437,6 +1437,17 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) goto out_free; } else { smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); + + /* + * Defer probe if the relevant SMMU instance hasn't finished + * probing yet. This is a fragile hack and we'd ideally + * avoid this race in the core code. Until that's ironed + * out, however, this is the most pragmatic option on the + * table. + */ + if (!smmu) + return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, + "smmu dev has not bound yet\n")); } ret = -EINVAL; @@ -1558,21 +1569,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } -static int arm_smmu_enable_nesting(struct iommu_domain *domain) -{ - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - int ret = 0; - - mutex_lock(&smmu_domain->init_mutex); - if (smmu_domain->smmu) - ret = -EPERM; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - mutex_unlock(&smmu_domain->init_mutex); - - return ret; -} - static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks) { @@ -1656,7 +1652,6 @@ static struct iommu_ops arm_smmu_ops = { .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, - .enable_nesting = arm_smmu_enable_nesting, .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, .free = arm_smmu_domain_free, } diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 88fd32a9323c..f2f538c70650 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -14,6 +14,7 @@ config INTEL_IOMMU depends on PCI_MSI && ACPI && X86 select IOMMU_API select IOMMU_IOVA + select IOMMU_IOPF select IOMMUFD_DRIVER if IOMMUFD select NEED_DMA_MAP_STATE select DMAR_TABLE @@ -50,7 +51,6 @@ config INTEL_IOMMU_SVM depends on X86_64 select MMU_NOTIFIER select IOMMU_SVA - select IOMMU_IOPF help Shared Virtual Memory (SVM) provides a facility for devices to access DMA resources through process address space by diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile index c8beb0281559..d3bb0798092d 100644 --- a/drivers/iommu/intel/Makefile +++ b/drivers/iommu/intel/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DMAR_TABLE) += dmar.o -obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o +obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o obj-$(CONFIG_DMAR_PERF) += perf.o obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index eaf862e8dea1..9f424acf474e 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1060,7 +1060,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) err = iommu->seq_id; goto error; } - sprintf(iommu->name, "dmar%d", iommu->seq_id); + snprintf(iommu->name, sizeof(iommu->name), "dmar%d", iommu->seq_id); err = map_iommu(iommu, drhd); if (err) { @@ -1895,19 +1895,6 @@ void dmar_msi_write(int irq, struct msi_msg *msg) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } -void dmar_msi_read(int irq, struct msi_msg *msg) -{ - struct intel_iommu *iommu = irq_get_handler_data(irq); - int reg = dmar_msi_reg(iommu, irq); - unsigned long flag; - - raw_spin_lock_irqsave(&iommu->register_lock, flag); - msg->data = readl(iommu->reg + reg + 4); - msg->address_lo = readl(iommu->reg + reg + 8); - msg->address_hi = readl(iommu->reg + reg + 12); - raw_spin_unlock_irqrestore(&iommu->register_lock, flag); -} - static int dmar_fault_do_one(struct intel_iommu *iommu, int type, u8 fault_reason, u32 pasid, u16 source_id, unsigned long long addr) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e860bc9439a2..7d0acb74d5a5 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -352,89 +352,6 @@ static bool iommu_paging_structure_coherency(struct intel_iommu *iommu) ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); } -static void domain_update_iommu_coherency(struct dmar_domain *domain) -{ - struct iommu_domain_info *info; - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - bool found = false; - unsigned long i; - - domain->iommu_coherency = true; - xa_for_each(&domain->iommu_array, i, info) { - found = true; - if (!iommu_paging_structure_coherency(info->iommu)) { - domain->iommu_coherency = false; - break; - } - } - if (found) - return; - - /* No hardware attached; use lowest common denominator */ - rcu_read_lock(); - for_each_active_iommu(iommu, drhd) { - if (!iommu_paging_structure_coherency(iommu)) { - domain->iommu_coherency = false; - break; - } - } - rcu_read_unlock(); -} - -static int domain_update_iommu_superpage(struct dmar_domain *domain, - struct intel_iommu *skip) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - int mask = 0x3; - - if (!intel_iommu_superpage) - return 0; - - /* set iommu_superpage to the smallest common denominator */ - rcu_read_lock(); - for_each_active_iommu(iommu, drhd) { - if (iommu != skip) { - if (domain && domain->use_first_level) { - if (!cap_fl1gp_support(iommu->cap)) - mask = 0x1; - } else { - mask &= cap_super_page_val(iommu->cap); - } - - if (!mask) - break; - } - } - rcu_read_unlock(); - - return fls(mask); -} - -static int domain_update_device_node(struct dmar_domain *domain) -{ - struct device_domain_info *info; - int nid = NUMA_NO_NODE; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - list_for_each_entry(info, &domain->devices, link) { - /* - * There could possibly be multiple device numa nodes as devices - * within the same domain may sit behind different IOMMUs. There - * isn't perfect answer in such situation, so we select first - * come first served policy. - */ - nid = dev_to_node(info->dev); - if (nid != NUMA_NO_NODE) - break; - } - spin_unlock_irqrestore(&domain->lock, flags); - - return nid; -} - /* Return the super pagesize bitmap if supported. */ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) { @@ -452,34 +369,6 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) return bitmap; } -/* Some capabilities may be different across iommus */ -void domain_update_iommu_cap(struct dmar_domain *domain) -{ - domain_update_iommu_coherency(domain); - domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL); - - /* - * If RHSA is missing, we should default to the device numa domain - * as fall back. - */ - if (domain->nid == NUMA_NO_NODE) - domain->nid = domain_update_device_node(domain); - - /* - * First-level translation restricts the input-address to a - * canonical address (i.e., address bits 63:N have the same - * value as address bit [N-1], where N is 48-bits with 4-level - * paging and 57-bits with 5-level paging). Hence, skip bit - * [N-1]. - */ - if (domain->use_first_level) - domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); - else - domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); - - domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); -} - struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc) { @@ -707,14 +596,15 @@ static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, while (1) { offset = pfn_level_offset(pfn, level); pte = &parent[offset]; - if (!pte || (dma_pte_superpage(pte) || !dma_pte_present(pte))) { - pr_info("PTE not present at level %d\n", level); - break; - } pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); - if (level == 1) + if (!dma_pte_present(pte)) { + pr_info("page table not present at level %d\n", level - 1); + break; + } + + if (level == 1 || dma_pte_superpage(pte)) break; parent = phys_to_virt(dma_pte_addr(pte)); @@ -737,11 +627,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); /* root entry dump */ - rt_entry = &iommu->root_entry[bus]; - if (!rt_entry) { - pr_info("root table entry is not present\n"); + if (!iommu->root_entry) { + pr_info("root table is not present\n"); return; } + rt_entry = &iommu->root_entry[bus]; if (sm_supported(iommu)) pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n", @@ -752,7 +642,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, /* context entry dump */ ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); if (!ctx_entry) { - pr_info("context table entry is not present\n"); + pr_info("context table is not present\n"); return; } @@ -761,17 +651,23 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, /* legacy mode does not require PASID entries */ if (!sm_supported(iommu)) { + if (!context_present(ctx_entry)) { + pr_info("legacy mode page table is not present\n"); + return; + } level = agaw_to_level(ctx_entry->hi & 7); pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); goto pgtable_walk; } - /* get the pointer to pasid directory entry */ - dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); - if (!dir) { - pr_info("pasid directory entry is not present\n"); + if (!context_present(ctx_entry)) { + pr_info("pasid directory table is not present\n"); return; } + + /* get the pointer to pasid directory entry */ + dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); + /* For request-without-pasid, get the pasid from context entry */ if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID) pasid = IOMMU_NO_PASID; @@ -783,7 +679,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, /* get the pointer to the pasid table entry */ entries = get_pasid_table_from_pde(pde); if (!entries) { - pr_info("pasid table entry is not present\n"); + pr_info("pasid table is not present\n"); return; } index = pasid & PASID_PTE_MASK; @@ -791,6 +687,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, for (i = 0; i < ARRAY_SIZE(pte->val); i++) pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); + if (!pasid_pte_is_present(pte)) { + pr_info("scalable mode page table is not present\n"); + return; + } + if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) { level = pte->val[2] & BIT_ULL(2) ? 5 : 4; pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); @@ -1428,51 +1329,25 @@ static void free_dmar_iommu(struct intel_iommu *iommu) /* free context mapping */ free_context_table(iommu); -#ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu)) { - if (ecap_prs(iommu->ecap)) - intel_svm_finish_prq(iommu); - } -#endif + if (ecap_prs(iommu->ecap)) + intel_iommu_finish_prq(iommu); } /* * Check and return whether first level is used by default for * DMA translation. */ -static bool first_level_by_default(unsigned int type) +static bool first_level_by_default(struct intel_iommu *iommu) { /* Only SL is available in legacy mode */ - if (!scalable_mode_support()) + if (!sm_supported(iommu)) return false; /* Only level (either FL or SL) is available, just use it */ - if (intel_cap_flts_sanity() ^ intel_cap_slts_sanity()) - return intel_cap_flts_sanity(); - - /* Both levels are available, decide it based on domain type */ - return type != IOMMU_DOMAIN_UNMANAGED; -} + if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap)) + return ecap_flts(iommu->ecap); -static struct dmar_domain *alloc_domain(unsigned int type) -{ - struct dmar_domain *domain; - - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - - domain->nid = NUMA_NO_NODE; - if (first_level_by_default(type)) - domain->use_first_level = true; - INIT_LIST_HEAD(&domain->devices); - INIT_LIST_HEAD(&domain->dev_pasids); - INIT_LIST_HEAD(&domain->cache_tags); - spin_lock_init(&domain->lock); - spin_lock_init(&domain->cache_lock); - xa_init(&domain->iommu_array); - - return domain; + return true; } int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) @@ -1514,7 +1389,6 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) ret = xa_err(curr) ? : -EBUSY; goto err_clear; } - domain_update_iommu_cap(domain); spin_unlock(&iommu->lock); return 0; @@ -1540,26 +1414,11 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) clear_bit(info->did, iommu->domain_ids); xa_erase(&domain->iommu_array, iommu->seq_id); domain->nid = NUMA_NO_NODE; - domain_update_iommu_cap(domain); kfree(info); } spin_unlock(&iommu->lock); } -static int guestwidth_to_adjustwidth(int gaw) -{ - int agaw; - int r = (gaw - 12) % 9; - - if (r == 0) - agaw = gaw; - else - agaw = gaw + 9 - r; - if (agaw > 64) - agaw = 64; - return agaw; -} - static void domain_exit(struct dmar_domain *domain) { if (domain->pgd) { @@ -1601,7 +1460,7 @@ static void copied_context_tear_down(struct intel_iommu *iommu, if (did_old < cap_ndoms(iommu->cap)) { iommu->flush.flush_context(iommu, did_old, - (((u16)bus) << 8) | devfn, + PCI_DEVID(bus, devfn), DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); iommu->flush.flush_iotlb(iommu, did_old, 0, 0, @@ -1622,7 +1481,7 @@ static void context_present_cache_flush(struct intel_iommu *iommu, u16 did, { if (cap_caching_mode(iommu->cap)) { iommu->flush.flush_context(iommu, 0, - (((u16)bus) << 8) | devfn, + PCI_DEVID(bus, devfn), DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -1641,7 +1500,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int translation = CONTEXT_TT_MULTI_LEVEL; struct dma_pte *pgd = domain->pgd; struct context_entry *context; - int agaw, ret; + int ret; pr_debug("Set context mapping for %02x:%02x.%d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); @@ -1658,27 +1517,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, copied_context_tear_down(iommu, context, bus, devfn); context_clear_entry(context); - context_set_domain_id(context, did); - /* - * Skip top levels of page tables for iommu which has - * less agaw than default. Unnecessary for PT mode. - */ - for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { - ret = -ENOMEM; - pgd = phys_to_virt(dma_pte_addr(pgd)); - if (!dma_pte_present(pgd)) - goto out_unlock; - } - if (info && info->ats_supported) translation = CONTEXT_TT_DEV_IOTLB; else translation = CONTEXT_TT_MULTI_LEVEL; context_set_address_root(context, virt_to_phys(pgd)); - context_set_address_width(context, agaw); + context_set_address_width(context, domain->agaw); context_set_translation_type(context, translation); context_set_fault_enable(context); context_set_present(context); @@ -1905,26 +1752,52 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 intel_context_flush_present(info, context, did, true); } +int __domain_setup_first_level(struct intel_iommu *iommu, + struct device *dev, ioasid_t pasid, + u16 did, pgd_t *pgd, int flags, + struct iommu_domain *old) +{ + if (!old) + return intel_pasid_setup_first_level(iommu, dev, pgd, + pasid, did, flags); + return intel_pasid_replace_first_level(iommu, dev, pgd, pasid, did, + iommu_domain_did(old, iommu), + flags); +} + +static int domain_setup_second_level(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) +{ + if (!old) + return intel_pasid_setup_second_level(iommu, domain, + dev, pasid); + return intel_pasid_replace_second_level(iommu, domain, dev, + iommu_domain_did(old, iommu), + pasid); +} + +static int domain_setup_passthrough(struct intel_iommu *iommu, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) +{ + if (!old) + return intel_pasid_setup_pass_through(iommu, dev, pasid); + return intel_pasid_replace_pass_through(iommu, dev, + iommu_domain_did(old, iommu), + pasid); +} + static int domain_setup_first_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, - u32 pasid) + u32 pasid, struct iommu_domain *old) { struct dma_pte *pgd = domain->pgd; - int agaw, level; - int flags = 0; + int level, flags = 0; - /* - * Skip top levels of page tables for iommu which has - * less agaw than default. Unnecessary for PT mode. - */ - for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { - pgd = phys_to_virt(dma_pte_addr(pgd)); - if (!dma_pte_present(pgd)) - return -ENOMEM; - } - - level = agaw_to_level(agaw); + level = agaw_to_level(domain->agaw); if (level != 4 && level != 5) return -EINVAL; @@ -1934,15 +1807,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu, if (domain->force_snooping) flags |= PASID_FLAG_PAGE_SNOOP; - return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, - domain_id_iommu(domain, iommu), - flags); -} - -static bool dev_is_real_dma_subdevice(struct device *dev) -{ - return dev && dev_is_pci(dev) && - pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev); + return __domain_setup_first_level(iommu, dev, pasid, + domain_id_iommu(domain, iommu), + (pgd_t *)pgd, flags, old); } static int dmar_domain_attach_device(struct dmar_domain *domain, @@ -1968,9 +1835,11 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, if (!sm_supported(iommu)) ret = domain_context_mapping(domain, dev); else if (domain->use_first_level) - ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID); + ret = domain_setup_first_level(iommu, domain, dev, + IOMMU_NO_PASID, NULL); else - ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID); + ret = domain_setup_second_level(iommu, domain, dev, + IOMMU_NO_PASID, NULL); if (ret) goto out_block_translation; @@ -2354,19 +2223,18 @@ static int __init init_dmars(void) iommu_flush_write_buffer(iommu); -#ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { + if (ecap_prs(iommu->ecap)) { /* * Call dmar_alloc_hwirq() with dmar_global_lock held, * could cause possible lock race condition. */ up_write(&dmar_global_lock); - ret = intel_svm_enable_prq(iommu); + ret = intel_iommu_enable_prq(iommu); down_write(&dmar_global_lock); if (ret) goto free_iommu; } -#endif + ret = dmar_set_interrupt(iommu); if (ret) goto free_iommu; @@ -2746,20 +2614,13 @@ int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg) static int intel_iommu_add(struct dmar_drhd_unit *dmaru) { - int sp, ret; struct intel_iommu *iommu = dmaru->iommu; + int ret; ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); if (ret) goto out; - sp = domain_update_iommu_superpage(NULL, iommu) - 1; - if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { - pr_warn("%s: Doesn't support large page.\n", - iommu->name); - return -ENXIO; - } - /* * Disable translation if already enabled prior to OS handover. */ @@ -2786,13 +2647,12 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) intel_iommu_init_qi(iommu); iommu_flush_write_buffer(iommu); -#ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { - ret = intel_svm_enable_prq(iommu); + if (ecap_prs(iommu->ecap)) { + ret = intel_iommu_enable_prq(iommu); if (ret) goto disable_iommu; } -#endif + ret = dmar_set_interrupt(iommu); if (ret) goto disable_iommu; @@ -3288,7 +3148,7 @@ int __init intel_iommu_init(void) * the virtual and physical IOMMU page-tables. */ if (cap_caching_mode(iommu->cap) && - !first_level_by_default(IOMMU_DOMAIN_DMA)) { + !first_level_by_default(iommu)) { pr_info_once("IOMMU batching disallowed due to virtualization\n"); iommu_set_dma_strict(); } @@ -3381,27 +3241,6 @@ void device_block_translation(struct device *dev) info->domain = NULL; } -static int md_domain_init(struct dmar_domain *domain, int guest_width) -{ - int adjust_width; - - /* calculate AGAW */ - domain->gaw = guest_width; - adjust_width = guestwidth_to_adjustwidth(guest_width); - domain->agaw = width_to_agaw(adjust_width); - - domain->iommu_coherency = false; - domain->iommu_superpage = 0; - domain->max_addr = 0; - - /* always allocate the top pgd */ - domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC); - if (!domain->pgd) - return -ENOMEM; - domain_flush_cache(domain, domain->pgd, PAGE_SIZE); - return 0; -} - static int blocking_domain_attach_dev(struct iommu_domain *domain, struct device *dev) { @@ -3488,43 +3327,9 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st return domain; } -static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) -{ - struct dmar_domain *dmar_domain; - struct iommu_domain *domain; - - switch (type) { - case IOMMU_DOMAIN_DMA: - case IOMMU_DOMAIN_UNMANAGED: - dmar_domain = alloc_domain(type); - if (!dmar_domain) { - pr_err("Can't allocate dmar_domain\n"); - return NULL; - } - if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { - pr_err("Domain initialization failed\n"); - domain_exit(dmar_domain); - return NULL; - } - - domain = &dmar_domain->domain; - domain->geometry.aperture_start = 0; - domain->geometry.aperture_end = - __DOMAIN_MAX_ADDR(dmar_domain->gaw); - domain->geometry.force_aperture = true; - - return domain; - default: - return NULL; - } - - return NULL; -} - static struct iommu_domain * -intel_iommu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { struct device_domain_info *info = dev_iommu_priv_get(dev); bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; @@ -3532,24 +3337,31 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, struct intel_iommu *iommu = info->iommu; struct dmar_domain *dmar_domain; struct iommu_domain *domain; - - /* Must be NESTING domain */ - if (parent) { - if (!nested_supported(iommu) || flags) - return ERR_PTR(-EOPNOTSUPP); - return intel_nested_domain_alloc(parent, user_data); - } + bool first_stage; if (flags & - (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING + | IOMMU_HWPT_FAULT_ID_VALID))) return ERR_PTR(-EOPNOTSUPP); if (nested_parent && !nested_supported(iommu)) return ERR_PTR(-EOPNOTSUPP); if (user_data || (dirty_tracking && !ssads_supported(iommu))) return ERR_PTR(-EOPNOTSUPP); - /* Do not use first stage for user domain translation. */ - dmar_domain = paging_domain_alloc(dev, false); + /* + * Always allocate the guest compatible page table unless + * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING + * is specified. + */ + if (nested_parent || dirty_tracking) { + if (!sm_supported(iommu) || !ecap_slts(iommu->ecap)) + return ERR_PTR(-EOPNOTSUPP); + first_stage = false; + } else { + first_stage = first_level_by_default(iommu); + } + + dmar_domain = paging_domain_alloc(dev, first_stage); if (IS_ERR(dmar_domain)) return ERR_CAST(dmar_domain); domain = &dmar_domain->domain; @@ -3583,42 +3395,41 @@ static void intel_iommu_domain_free(struct iommu_domain *domain) domain_exit(dmar_domain); } -int prepare_domain_attach_device(struct iommu_domain *domain, - struct device *dev) +int paging_domain_compatible(struct iommu_domain *domain, struct device *dev) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu = info->iommu; int addr_width; + if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) + return -EPERM; + if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) return -EINVAL; if (domain->dirty_ops && !ssads_supported(iommu)) return -EINVAL; + if (dmar_domain->iommu_coherency != + iommu_paging_structure_coherency(iommu)) + return -EINVAL; + + if (dmar_domain->iommu_superpage != + iommu_superpage_capability(iommu, dmar_domain->use_first_level)) + return -EINVAL; + + if (dmar_domain->use_first_level && + (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) + return -EINVAL; + /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) addr_width = cap_mgaw(iommu->cap); - if (dmar_domain->max_addr > (1LL << addr_width)) + if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) return -EINVAL; - dmar_domain->gaw = addr_width; - - /* - * Knock out extra levels of page tables if necessary - */ - while (iommu->agaw < dmar_domain->agaw) { - struct dma_pte *pte; - - pte = dmar_domain->pgd; - if (dma_pte_present(pte)) { - dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte)); - iommu_free_page(pte); - } - dmar_domain->agaw--; - } if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) && context_copied(iommu, info->bus, info->devfn)) @@ -3634,7 +3445,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, device_block_translation(dev); - ret = prepare_domain_attach_device(domain, dev); + ret = paging_domain_compatible(domain, dev); if (ret) return ret; @@ -4252,8 +4063,8 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain, return 0; } -static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, - struct iommu_domain *domain) +void domain_remove_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct dev_pasid_info *curr, *dev_pasid = NULL; @@ -4261,10 +4072,12 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct dmar_domain *dmar_domain; unsigned long flags; - if (domain->type == IOMMU_DOMAIN_IDENTITY) { - intel_pasid_tear_down_entry(iommu, dev, pasid, false); + if (!domain) + return; + + /* Identity domain has no meta data for pasid. */ + if (domain->type == IOMMU_DOMAIN_IDENTITY) return; - } dmar_domain = to_dmar_domain(domain); spin_lock_irqsave(&dmar_domain->lock, flags); @@ -4282,12 +4095,20 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, domain_detach_iommu(dmar_domain, iommu); intel_iommu_debugfs_remove_dev_pasid(dev_pasid); kfree(dev_pasid); - intel_pasid_tear_down_entry(iommu, dev, pasid, false); - intel_drain_pasid_prq(dev, pasid); } -static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) +static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + + intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); + domain_remove_dev_pasid(domain, dev, pasid); +} + +struct dev_pasid_info * +domain_add_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct dmar_domain *dmar_domain = to_dmar_domain(domain); @@ -4296,22 +4117,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, unsigned long flags; int ret; - if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) - return -EOPNOTSUPP; - - if (domain->dirty_ops) - return -EINVAL; - - if (context_copied(iommu, info->bus, info->devfn)) - return -EBUSY; - - ret = prepare_domain_attach_device(domain, dev); - if (ret) - return ret; - dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL); if (!dev_pasid) - return -ENOMEM; + return ERR_PTR(-ENOMEM); ret = domain_attach_iommu(dmar_domain, iommu); if (ret) @@ -4321,31 +4129,67 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (ret) goto out_detach_iommu; - if (dmar_domain->use_first_level) - ret = domain_setup_first_level(iommu, dmar_domain, - dev, pasid); - else - ret = intel_pasid_setup_second_level(iommu, dmar_domain, - dev, pasid); - if (ret) - goto out_unassign_tag; - dev_pasid->dev = dev; dev_pasid->pasid = pasid; spin_lock_irqsave(&dmar_domain->lock, flags); list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); spin_unlock_irqrestore(&dmar_domain->lock, flags); - if (domain->type & __IOMMU_DOMAIN_PAGING) - intel_iommu_debugfs_create_dev_pasid(dev_pasid); - - return 0; -out_unassign_tag: - cache_tag_unassign_domain(dmar_domain, dev, pasid); + return dev_pasid; out_detach_iommu: domain_detach_iommu(dmar_domain, iommu); out_free: kfree(dev_pasid); + return ERR_PTR(ret); +} + +static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct intel_iommu *iommu = info->iommu; + struct dev_pasid_info *dev_pasid; + int ret; + + if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) + return -EINVAL; + + if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) + return -EOPNOTSUPP; + + if (domain->dirty_ops) + return -EINVAL; + + if (context_copied(iommu, info->bus, info->devfn)) + return -EBUSY; + + ret = paging_domain_compatible(domain, dev); + if (ret) + return ret; + + dev_pasid = domain_add_dev_pasid(domain, dev, pasid); + if (IS_ERR(dev_pasid)) + return PTR_ERR(dev_pasid); + + if (dmar_domain->use_first_level) + ret = domain_setup_first_level(iommu, dmar_domain, + dev, pasid, old); + else + ret = domain_setup_second_level(iommu, dmar_domain, + dev, pasid, old); + if (ret) + goto out_remove_dev_pasid; + + domain_remove_dev_pasid(old, dev, pasid); + + intel_iommu_debugfs_create_dev_pasid(dev_pasid); + + return 0; + +out_remove_dev_pasid: + domain_remove_dev_pasid(domain, dev, pasid); return ret; } @@ -4573,15 +4417,22 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device } static int identity_domain_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; + int ret; if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) return -EOPNOTSUPP; - return intel_pasid_setup_pass_through(iommu, dev, pasid); + ret = domain_setup_passthrough(iommu, dev, pasid, old); + if (ret) + return ret; + + domain_remove_dev_pasid(old, dev, pasid); + return 0; } static struct iommu_domain identity_domain = { @@ -4592,15 +4443,31 @@ static struct iommu_domain identity_domain = { }, }; +static struct iommu_domain *intel_iommu_domain_alloc_paging(struct device *dev) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct dmar_domain *dmar_domain; + bool first_stage; + + first_stage = first_level_by_default(iommu); + dmar_domain = paging_domain_alloc(dev, first_stage); + if (IS_ERR(dmar_domain)) + return ERR_CAST(dmar_domain); + + return &dmar_domain->domain; +} + const struct iommu_ops intel_iommu_ops = { .blocked_domain = &blocking_domain, .release_domain = &blocking_domain, .identity_domain = &identity_domain, .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, - .domain_alloc = intel_iommu_domain_alloc, - .domain_alloc_user = intel_iommu_domain_alloc_user, + .domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags, .domain_alloc_sva = intel_svm_domain_alloc, + .domain_alloc_paging = intel_iommu_domain_alloc_paging, + .domain_alloc_nested = intel_iommu_domain_alloc_nested, .probe_device = intel_iommu_probe_device, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, @@ -4611,9 +4478,7 @@ const struct iommu_ops intel_iommu_ops = { .def_domain_type = device_def_domain_type, .remove_dev_pasid = intel_iommu_remove_dev_pasid, .pgsize_bitmap = SZ_4K, -#ifdef CONFIG_INTEL_IOMMU_SVM - .page_response = intel_svm_page_response, -#endif + .page_response = intel_iommu_page_response, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = intel_iommu_attach_device, .set_dev_pasid = intel_iommu_set_dev_pasid, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 1497f3112b12..6ea7bbe26b19 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -22,6 +22,7 @@ #include <linux/bitfield.h> #include <linux/xarray.h> #include <linux/perf_event.h> +#include <linux/pci.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -653,8 +654,6 @@ struct dmar_domain { struct { /* parent page table which the user domain is nested on */ struct dmar_domain *s2_domain; - /* user page table pointer (in GPA) */ - unsigned long s1_pgtbl; /* page table attributes */ struct iommu_hwpt_vtd_s1 s1_cfg; /* link to parent domain siblings */ @@ -720,7 +719,7 @@ struct intel_iommu { int msagaw; /* max sagaw of this iommu */ unsigned int irq, pr_irq, perf_irq; u16 segment; /* PCI segment# */ - unsigned char name[13]; /* Device Name */ + unsigned char name[16]; /* Device Name */ #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ @@ -730,12 +729,10 @@ struct intel_iommu { struct iommu_flush flush; #endif -#ifdef CONFIG_INTEL_IOMMU_SVM struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ unsigned long prq_seq_number; struct completion prq_complete; -#endif struct iopf_queue *iopf_queue; unsigned char iopfq_name[16]; /* Synchronization between fault report and iommu device release. */ @@ -810,6 +807,13 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) return container_of(dom, struct dmar_domain, domain); } +/* + * Domain ID reserved for pasid entries programmed for first-level + * only and pass-through transfer modes. + */ +#define FLPT_DEFAULT_DID 1 +#define NUM_RESERVED_DID 2 + /* Retrieve the domain ID which has allocated to the domain */ static inline u16 domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) @@ -820,6 +824,21 @@ domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) return info->did; } +static inline u16 +iommu_domain_did(struct iommu_domain *domain, struct intel_iommu *iommu) +{ + if (domain->type == IOMMU_DOMAIN_SVA || + domain->type == IOMMU_DOMAIN_IDENTITY) + return FLPT_DEFAULT_DID; + return domain_id_iommu(to_dmar_domain(domain), iommu); +} + +static inline bool dev_is_real_dma_subdevice(struct device *dev) +{ + return dev && dev_is_pci(dev) && + pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev); +} + /* * 0: readable * 1: writable @@ -1230,15 +1249,26 @@ void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); void device_block_translation(struct device *dev); -int prepare_domain_attach_device(struct iommu_domain *domain, - struct device *dev); -void domain_update_iommu_cap(struct dmar_domain *domain); +int paging_domain_compatible(struct iommu_domain *domain, struct device *dev); + +struct dev_pasid_info * +domain_add_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid); +void domain_remove_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid); + +int __domain_setup_first_level(struct intel_iommu *iommu, + struct device *dev, ioasid_t pasid, + u16 did, pgd_t *pgd, int flags, + struct iommu_domain *old); int dmar_ir_support(void); void iommu_flush_write_buffer(struct intel_iommu *iommu); -struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, - const struct iommu_user_data *user_data); +struct iommu_domain * +intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, + u32 flags, + const struct iommu_user_data *user_data); struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid); enum cache_tag_type { @@ -1278,18 +1308,18 @@ void intel_context_flush_present(struct device_domain_info *info, struct context_entry *context, u16 did, bool affect_domains); +int intel_iommu_enable_prq(struct intel_iommu *iommu); +int intel_iommu_finish_prq(struct intel_iommu *iommu); +void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg); +void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid); + #ifdef CONFIG_INTEL_IOMMU_SVM void intel_svm_check(struct intel_iommu *iommu); -int intel_svm_enable_prq(struct intel_iommu *iommu); -int intel_svm_finish_prq(struct intel_iommu *iommu); -void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, - struct iommu_page_response *msg); struct iommu_domain *intel_svm_domain_alloc(struct device *dev, struct mm_struct *mm); -void intel_drain_pasid_prq(struct device *dev, u32 pasid); #else static inline void intel_svm_check(struct intel_iommu *iommu) {} -static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {} static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev, struct mm_struct *mm) { diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 7a6d188e3bea..466c1412dd45 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -312,7 +312,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) for (i = 0; i < MAX_IO_APICS; i++) { if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { - sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; + sid = PCI_DEVID(ir_ioapic[i].bus, ir_ioapic[i].devfn); break; } } @@ -337,7 +337,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) for (i = 0; i < MAX_HPET_TBS; i++) { if (ir_hpet[i].iommu && ir_hpet[i].id == id) { - sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; + sid = PCI_DEVID(ir_hpet[i].bus, ir_hpet[i].devfn); break; } } diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index 433c58944401..aba92c00b427 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -40,7 +40,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, * The s2_domain will be used in nested translation, hence needs * to ensure the s2_domain is compatible with this IOMMU. */ - ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain, dev); + ret = paging_domain_compatible(&dmar_domain->s2_domain->domain, dev); if (ret) { dev_err_ratelimited(dev, "s2 domain is not compatible\n"); return ret; @@ -130,20 +130,77 @@ out: return ret; } +static int domain_setup_nested(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) +{ + if (!old) + return intel_pasid_setup_nested(iommu, dev, pasid, domain); + return intel_pasid_replace_nested(iommu, dev, pasid, + iommu_domain_did(old, iommu), + domain); +} + +static int intel_nested_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct intel_iommu *iommu = info->iommu; + struct dev_pasid_info *dev_pasid; + int ret; + + if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) + return -EOPNOTSUPP; + + if (context_copied(iommu, info->bus, info->devfn)) + return -EBUSY; + + ret = paging_domain_compatible(&dmar_domain->s2_domain->domain, dev); + if (ret) + return ret; + + dev_pasid = domain_add_dev_pasid(domain, dev, pasid); + if (IS_ERR(dev_pasid)) + return PTR_ERR(dev_pasid); + + ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old); + if (ret) + goto out_remove_dev_pasid; + + domain_remove_dev_pasid(old, dev, pasid); + + return 0; + +out_remove_dev_pasid: + domain_remove_dev_pasid(domain, dev, pasid); + return ret; +} + static const struct iommu_domain_ops intel_nested_domain_ops = { .attach_dev = intel_nested_attach_dev, + .set_dev_pasid = intel_nested_set_dev_pasid, .free = intel_nested_domain_free, .cache_invalidate_user = intel_nested_cache_invalidate_user, }; -struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, - const struct iommu_user_data *user_data) +struct iommu_domain * +intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, + u32 flags, + const struct iommu_user_data *user_data) { + struct device_domain_info *info = dev_iommu_priv_get(dev); struct dmar_domain *s2_domain = to_dmar_domain(parent); + struct intel_iommu *iommu = info->iommu; struct iommu_hwpt_vtd_s1 vtd; struct dmar_domain *domain; int ret; + if (!nested_supported(iommu) || flags) + return ERR_PTR(-EOPNOTSUPP); + /* Must be nested domain */ if (user_data->type != IOMMU_HWPT_DATA_VTD_S1) return ERR_PTR(-EOPNOTSUPP); @@ -162,7 +219,6 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, domain->use_first_level = true; domain->s2_domain = s2_domain; - domain->s1_pgtbl = vtd.pgtbl_addr; domain->s1_cfg = vtd; domain->domain.ops = &intel_nested_domain_ops; domain->domain.type = IOMMU_DOMAIN_NESTED; diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 2e5fa0a23299..0f2a926d3bd5 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -220,7 +220,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu, if (pci_dev_is_disconnected(to_pci_dev(dev))) return; - sid = info->bus << 8 | info->devfn; + sid = PCI_DEVID(info->bus, info->devfn); qdep = info->ats_qdep; pfsid = info->pfsid; @@ -265,6 +265,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); devtlb_invalidation_with_pasid(iommu, dev, pasid); + intel_iommu_drain_pasid_prq(dev, pasid); } /* @@ -287,9 +288,68 @@ static void pasid_flush_caches(struct intel_iommu *iommu, } /* + * This function is supposed to be used after caller updates the fields + * except for the SSADE and P bit of a pasid table entry. It does the + * below: + * - Flush cacheline if needed + * - Flush the caches per Table 28 ”Guidance to Software for Invalidations“ + * of VT-d spec 5.0. + */ +static void intel_pasid_flush_present(struct intel_iommu *iommu, + struct device *dev, + u32 pasid, u16 did, + struct pasid_entry *pte) +{ + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + /* + * VT-d spec 5.0 table28 states guides for cache invalidation: + * + * - PASID-selective-within-Domain PASID-cache invalidation + * - PASID-selective PASID-based IOTLB invalidation + * - If (pasid is RID_PASID) + * - Global Device-TLB invalidation to affected functions + * Else + * - PASID-based Device-TLB invalidation (with S=1 and + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions + */ + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + devtlb_invalidation_with_pasid(iommu, dev, pasid); +} + +/* * Set up the scalable mode pasid table entry for first only * translation type. */ +static void pasid_pte_config_first_level(struct intel_iommu *iommu, + struct pasid_entry *pte, + pgd_t *pgd, u16 did, int flags) +{ + lockdep_assert_held(&iommu->lock); + + pasid_clear_entry(pte); + + /* Setup the first level page table pointer: */ + pasid_set_flptr(pte, (u64)__pa(pgd)); + + if (flags & PASID_FLAG_FL5LP) + pasid_set_flpm(pte, 1); + + if (flags & PASID_FLAG_PAGE_SNOOP) + pasid_set_pgsnp(pte); + + pasid_set_domain_id(pte, did); + pasid_set_address_width(pte, iommu->agaw); + pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + + /* Setup Present and PASID Granular Transfer Type: */ + pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); + pasid_set_present(pte); +} + int intel_pasid_setup_first_level(struct intel_iommu *iommu, struct device *dev, pgd_t *pgd, u32 pasid, u16 did, int flags) @@ -320,53 +380,82 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, return -EBUSY; } - pasid_clear_entry(pte); + pasid_pte_config_first_level(iommu, pte, pgd, did, flags); - /* Setup the first level page table pointer: */ - pasid_set_flptr(pte, (u64)__pa(pgd)); + spin_unlock(&iommu->lock); - if (flags & PASID_FLAG_FL5LP) - pasid_set_flpm(pte, 1); + pasid_flush_caches(iommu, pte, pasid, did); - if (flags & PASID_FLAG_PAGE_SNOOP) - pasid_set_pgsnp(pte); + return 0; +} - pasid_set_domain_id(pte, did); - pasid_set_address_width(pte, iommu->agaw); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); +int intel_pasid_replace_first_level(struct intel_iommu *iommu, + struct device *dev, pgd_t *pgd, + u32 pasid, u16 did, u16 old_did, + int flags) +{ + struct pasid_entry *pte, new_pte; - /* Setup Present and PASID Granular Transfer Type: */ - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); - pasid_set_present(pte); + if (!ecap_flts(iommu->ecap)) { + pr_err("No first level translation support on %s\n", + iommu->name); + return -EINVAL; + } + + if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { + pr_err("No 5-level paging support for first-level on %s\n", + iommu->name); + return -EINVAL; + } + + pasid_pte_config_first_level(iommu, &new_pte, pgd, did, flags); + + spin_lock(&iommu->lock); + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (!pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); + return -EINVAL; + } + + WARN_ON(old_did != pasid_get_domain_id(pte)); + + *pte = new_pte; spin_unlock(&iommu->lock); - pasid_flush_caches(iommu, pte, pasid, did); + intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); + intel_iommu_drain_pasid_prq(dev, pasid); return 0; } /* - * Skip top levels of page tables for iommu which has less agaw - * than default. Unnecessary for PT mode. + * Set up the scalable mode pasid entry for second only translation type. */ -static int iommu_skip_agaw(struct dmar_domain *domain, - struct intel_iommu *iommu, - struct dma_pte **pgd) +static void pasid_pte_config_second_level(struct intel_iommu *iommu, + struct pasid_entry *pte, + u64 pgd_val, int agaw, u16 did, + bool dirty_tracking) { - int agaw; + lockdep_assert_held(&iommu->lock); - for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { - *pgd = phys_to_virt(dma_pte_addr(*pgd)); - if (!dma_pte_present(*pgd)) - return -EINVAL; - } + pasid_clear_entry(pte); + pasid_set_domain_id(pte, did); + pasid_set_slptr(pte, pgd_val); + pasid_set_address_width(pte, agaw); + pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); + pasid_set_fault_enable(pte); + pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (dirty_tracking) + pasid_set_ssade(pte); - return agaw; + pasid_set_present(pte); } -/* - * Set up the scalable mode pasid entry for second only translation type. - */ int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid) @@ -374,7 +463,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct pasid_entry *pte; struct dma_pte *pgd; u64 pgd_val; - int agaw; u16 did; /* @@ -388,15 +476,58 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, } pgd = domain->pgd; - agaw = iommu_skip_agaw(domain, iommu, &pgd); - if (agaw < 0) { - dev_err(dev, "Invalid domain page table\n"); + pgd_val = virt_to_phys(pgd); + did = domain_id_iommu(domain, iommu); + + spin_lock(&iommu->lock); + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); + return -EBUSY; + } + + pasid_pte_config_second_level(iommu, pte, pgd_val, domain->agaw, + did, domain->dirty_tracking); + spin_unlock(&iommu->lock); + + pasid_flush_caches(iommu, pte, pasid, did); + + return 0; +} + +int intel_pasid_replace_second_level(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u16 old_did, + u32 pasid) +{ + struct pasid_entry *pte, new_pte; + struct dma_pte *pgd; + u64 pgd_val; + u16 did; + + /* + * If hardware advertises no support for second level + * translation, return directly. + */ + if (!ecap_slts(iommu->ecap)) { + pr_err("No second level translation support on %s\n", + iommu->name); return -EINVAL; } + pgd = domain->pgd; pgd_val = virt_to_phys(pgd); did = domain_id_iommu(domain, iommu); + pasid_pte_config_second_level(iommu, &new_pte, pgd_val, + domain->agaw, did, + domain->dirty_tracking); + spin_lock(&iommu->lock); pte = intel_pasid_get_entry(dev, pasid); if (!pte) { @@ -404,25 +535,18 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return -ENODEV; } - if (pasid_pte_is_present(pte)) { + if (!pasid_pte_is_present(pte)) { spin_unlock(&iommu->lock); - return -EBUSY; + return -EINVAL; } - pasid_clear_entry(pte); - pasid_set_domain_id(pte, did); - pasid_set_slptr(pte, pgd_val); - pasid_set_address_width(pte, agaw); - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); - pasid_set_fault_enable(pte); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - if (domain->dirty_tracking) - pasid_set_ssade(pte); + WARN_ON(old_did != pasid_get_domain_id(pte)); - pasid_set_present(pte); + *pte = new_pte; spin_unlock(&iommu->lock); - pasid_flush_caches(iommu, pte, pasid, did); + intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); + intel_iommu_drain_pasid_prq(dev, pasid); return 0; } @@ -499,6 +623,20 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, /* * Set up the scalable mode pasid entry for passthrough translation type. */ +static void pasid_pte_config_pass_through(struct intel_iommu *iommu, + struct pasid_entry *pte, u16 did) +{ + lockdep_assert_held(&iommu->lock); + + pasid_clear_entry(pte); + pasid_set_domain_id(pte, did); + pasid_set_address_width(pte, iommu->agaw); + pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT); + pasid_set_fault_enable(pte); + pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + pasid_set_present(pte); +} + int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct device *dev, u32 pasid) { @@ -517,13 +655,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return -EBUSY; } - pasid_clear_entry(pte); - pasid_set_domain_id(pte, did); - pasid_set_address_width(pte, iommu->agaw); - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT); - pasid_set_fault_enable(pte); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - pasid_set_present(pte); + pasid_pte_config_pass_through(iommu, pte, did); spin_unlock(&iommu->lock); pasid_flush_caches(iommu, pte, pasid, did); @@ -531,6 +663,38 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return 0; } +int intel_pasid_replace_pass_through(struct intel_iommu *iommu, + struct device *dev, u16 old_did, + u32 pasid) +{ + struct pasid_entry *pte, new_pte; + u16 did = FLPT_DEFAULT_DID; + + pasid_pte_config_pass_through(iommu, &new_pte, did); + + spin_lock(&iommu->lock); + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (!pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); + return -EINVAL; + } + + WARN_ON(old_did != pasid_get_domain_id(pte)); + + *pte = new_pte; + spin_unlock(&iommu->lock); + + intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); + intel_iommu_drain_pasid_prq(dev, pasid); + + return 0; +} + /* * Set the page snoop control for a pasid entry which has been set up. */ @@ -551,24 +715,47 @@ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, did = pasid_get_domain_id(pte); spin_unlock(&iommu->lock); - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(pte, sizeof(*pte)); + intel_pasid_flush_present(iommu, dev, pasid, did, pte); +} - /* - * VT-d spec 3.4 table23 states guides for cache invalidation: - * - * - PASID-selective-within-Domain PASID-cache invalidation - * - PASID-selective PASID-based IOTLB invalidation - * - If (pasid is RID_PASID) - * - Global Device-TLB invalidation to affected functions - * Else - * - PASID-based Device-TLB invalidation (with S=1 and - * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions - */ - pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); +static void pasid_pte_config_nestd(struct intel_iommu *iommu, + struct pasid_entry *pte, + struct iommu_hwpt_vtd_s1 *s1_cfg, + struct dmar_domain *s2_domain, + u16 did) +{ + struct dma_pte *pgd = s2_domain->pgd; - devtlb_invalidation_with_pasid(iommu, dev, pasid); + lockdep_assert_held(&iommu->lock); + + pasid_clear_entry(pte); + + if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL) + pasid_set_flpm(pte, 1); + + pasid_set_flptr(pte, s1_cfg->pgtbl_addr); + + if (s1_cfg->flags & IOMMU_VTD_S1_SRE) { + pasid_set_sre(pte); + if (s1_cfg->flags & IOMMU_VTD_S1_WPE) + pasid_set_wpe(pte); + } + + if (s1_cfg->flags & IOMMU_VTD_S1_EAFE) + pasid_set_eafe(pte); + + if (s2_domain->force_snooping) + pasid_set_pgsnp(pte); + + pasid_set_slptr(pte, virt_to_phys(pgd)); + pasid_set_fault_enable(pte); + pasid_set_domain_id(pte, did); + pasid_set_address_width(pte, s2_domain->agaw); + pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (s2_domain->dirty_tracking) + pasid_set_ssade(pte); + pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); + pasid_set_present(pte); } /** @@ -586,10 +773,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, u32 pasid, struct dmar_domain *domain) { struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg; - pgd_t *s1_gpgd = (pgd_t *)(uintptr_t)domain->s1_pgtbl; struct dmar_domain *s2_domain = domain->s2_domain; u16 did = domain_id_iommu(domain, iommu); - struct dma_pte *pgd = s2_domain->pgd; struct pasid_entry *pte; /* Address width should match the address width supported by hardware */ @@ -632,37 +817,73 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, return -EBUSY; } - pasid_clear_entry(pte); + pasid_pte_config_nestd(iommu, pte, s1_cfg, s2_domain, did); + spin_unlock(&iommu->lock); - if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL) - pasid_set_flpm(pte, 1); + pasid_flush_caches(iommu, pte, pasid, did); - pasid_set_flptr(pte, (uintptr_t)s1_gpgd); + return 0; +} - if (s1_cfg->flags & IOMMU_VTD_S1_SRE) { - pasid_set_sre(pte); - if (s1_cfg->flags & IOMMU_VTD_S1_WPE) - pasid_set_wpe(pte); +int intel_pasid_replace_nested(struct intel_iommu *iommu, + struct device *dev, u32 pasid, + u16 old_did, struct dmar_domain *domain) +{ + struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg; + struct dmar_domain *s2_domain = domain->s2_domain; + u16 did = domain_id_iommu(domain, iommu); + struct pasid_entry *pte, new_pte; + + /* Address width should match the address width supported by hardware */ + switch (s1_cfg->addr_width) { + case ADDR_WIDTH_4LEVEL: + break; + case ADDR_WIDTH_5LEVEL: + if (!cap_fl5lp_support(iommu->cap)) { + dev_err_ratelimited(dev, + "5-level paging not supported\n"); + return -EINVAL; + } + break; + default: + dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n", + s1_cfg->addr_width); + return -EINVAL; } - if (s1_cfg->flags & IOMMU_VTD_S1_EAFE) - pasid_set_eafe(pte); + if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { + pr_err_ratelimited("No supervisor request support on %s\n", + iommu->name); + return -EINVAL; + } - if (s2_domain->force_snooping) - pasid_set_pgsnp(pte); + if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { + pr_err_ratelimited("No extended access flag support on %s\n", + iommu->name); + return -EINVAL; + } - pasid_set_slptr(pte, virt_to_phys(pgd)); - pasid_set_fault_enable(pte); - pasid_set_domain_id(pte, did); - pasid_set_address_width(pte, s2_domain->agaw); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - if (s2_domain->dirty_tracking) - pasid_set_ssade(pte); - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); - pasid_set_present(pte); + pasid_pte_config_nestd(iommu, &new_pte, s1_cfg, s2_domain, did); + + spin_lock(&iommu->lock); + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (!pasid_pte_is_present(pte)) { + spin_unlock(&iommu->lock); + return -EINVAL; + } + + WARN_ON(old_did != pasid_get_domain_id(pte)); + + *pte = new_pte; spin_unlock(&iommu->lock); - pasid_flush_caches(iommu, pte, pasid, did); + intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); + intel_iommu_drain_pasid_prq(dev, pasid); return 0; } diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index dde6d3ba5ae0..082f4fe20216 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -22,13 +22,6 @@ #define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1) #define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7)) -/* - * Domain ID reserved for pasid entries programmed for first-level - * only and pass-through transfer modes. - */ -#define FLPT_DEFAULT_DID 1 -#define NUM_RESERVED_DID 2 - #define PASID_FLAG_NESTED BIT(1) #define PASID_FLAG_PAGE_SNOOP BIT(2) @@ -303,6 +296,21 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct device *dev, u32 pasid); int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, u32 pasid, struct dmar_domain *domain); +int intel_pasid_replace_first_level(struct intel_iommu *iommu, + struct device *dev, pgd_t *pgd, + u32 pasid, u16 did, u16 old_did, + int flags); +int intel_pasid_replace_second_level(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u16 old_did, + u32 pasid); +int intel_pasid_replace_pass_through(struct intel_iommu *iommu, + struct device *dev, u16 old_did, + u32 pasid); +int intel_pasid_replace_nested(struct intel_iommu *iommu, + struct device *dev, u32 pasid, + u16 old_did, struct dmar_domain *domain); + void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore); diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c new file mode 100644 index 000000000000..c2d792db52c3 --- /dev/null +++ b/drivers/iommu/intel/prq.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015 Intel Corporation + * + * Originally split from drivers/iommu/intel/svm.c + */ + +#include <linux/pci.h> +#include <linux/pci-ats.h> + +#include "iommu.h" +#include "pasid.h" +#include "../iommu-pages.h" +#include "trace.h" + +/* Page request queue descriptor */ +struct page_req_dsc { + union { + struct { + u64 type:8; + u64 pasid_present:1; + u64 rsvd:7; + u64 rid:16; + u64 pasid:20; + u64 exe_req:1; + u64 pm_req:1; + u64 rsvd2:10; + }; + u64 qw_0; + }; + union { + struct { + u64 rd_req:1; + u64 wr_req:1; + u64 lpig:1; + u64 prg_index:9; + u64 addr:52; + }; + u64 qw_1; + }; + u64 qw_2; + u64 qw_3; +}; + +/** + * intel_iommu_drain_pasid_prq - Drain page requests and responses for a pasid + * @dev: target device + * @pasid: pasid for draining + * + * Drain all pending page requests and responses related to @pasid in both + * software and hardware. This is supposed to be called after the device + * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB + * and DevTLB have been invalidated. + * + * It waits until all pending page requests for @pasid in the page fault + * queue are completed by the prq handling thread. Then follow the steps + * described in VT-d spec CH7.10 to drain all page requests and page + * responses pending in the hardware. + */ +void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid) +{ + struct device_domain_info *info; + struct dmar_domain *domain; + struct intel_iommu *iommu; + struct qi_desc desc[3]; + int head, tail; + u16 sid, did; + + info = dev_iommu_priv_get(dev); + if (!info->pri_enabled) + return; + + iommu = info->iommu; + domain = info->domain; + sid = PCI_DEVID(info->bus, info->devfn); + did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; + + /* + * Check and wait until all pending page requests in the queue are + * handled by the prq handling thread. + */ +prq_retry: + reinit_completion(&iommu->prq_complete); + tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; + head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; + while (head != tail) { + struct page_req_dsc *req; + + req = &iommu->prq[head / sizeof(*req)]; + if (!req->pasid_present || req->pasid != pasid) { + head = (head + sizeof(*req)) & PRQ_RING_MASK; + continue; + } + + wait_for_completion(&iommu->prq_complete); + goto prq_retry; + } + + iopf_queue_flush_dev(dev); + + /* + * Perform steps described in VT-d spec CH7.10 to drain page + * requests and responses in hardware. + */ + memset(desc, 0, sizeof(desc)); + desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) | + QI_IWD_FENCE | + QI_IWD_TYPE; + if (pasid == IOMMU_NO_PASID) { + qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]); + qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0, + MAX_AGAW_PFN_WIDTH, &desc[2]); + } else { + qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]); + qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep, + 0, MAX_AGAW_PFN_WIDTH, &desc[2]); + } +qi_retry: + reinit_completion(&iommu->prq_complete); + qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); + if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { + wait_for_completion(&iommu->prq_complete); + goto qi_retry; + } +} + +static bool is_canonical_address(u64 addr) +{ + int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); + long saddr = (long)addr; + + return (((saddr << shift) >> shift) == saddr); +} + +static void handle_bad_prq_event(struct intel_iommu *iommu, + struct page_req_dsc *req, int result) +{ + struct qi_desc desc = { }; + + pr_err("%s: Invalid page request: %08llx %08llx\n", + iommu->name, ((unsigned long long *)req)[0], + ((unsigned long long *)req)[1]); + + if (!req->lpig) + return; + + desc.qw0 = QI_PGRP_PASID(req->pasid) | + QI_PGRP_DID(req->rid) | + QI_PGRP_PASID_P(req->pasid_present) | + QI_PGRP_RESP_CODE(result) | + QI_PGRP_RESP_TYPE; + desc.qw1 = QI_PGRP_IDX(req->prg_index) | + QI_PGRP_LPIG(req->lpig); + + qi_submit_sync(iommu, &desc, 1, 0); +} + +static int prq_to_iommu_prot(struct page_req_dsc *req) +{ + int prot = 0; + + if (req->rd_req) + prot |= IOMMU_FAULT_PERM_READ; + if (req->wr_req) + prot |= IOMMU_FAULT_PERM_WRITE; + if (req->exe_req) + prot |= IOMMU_FAULT_PERM_EXEC; + if (req->pm_req) + prot |= IOMMU_FAULT_PERM_PRIV; + + return prot; +} + +static void intel_prq_report(struct intel_iommu *iommu, struct device *dev, + struct page_req_dsc *desc) +{ + struct iopf_fault event = { }; + + /* Fill in event data for device specific processing */ + event.fault.type = IOMMU_FAULT_PAGE_REQ; + event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; + event.fault.prm.pasid = desc->pasid; + event.fault.prm.grpid = desc->prg_index; + event.fault.prm.perm = prq_to_iommu_prot(desc); + + if (desc->lpig) + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + if (desc->pasid_present) { + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; + } + + iommu_report_device_fault(dev, &event); +} + +static irqreturn_t prq_event_thread(int irq, void *d) +{ + struct intel_iommu *iommu = d; + struct page_req_dsc *req; + int head, tail, handled; + struct device *dev; + u64 address; + + /* + * Clear PPR bit before reading head/tail registers, to ensure that + * we get a new interrupt if needed. + */ + writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); + + tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; + head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; + handled = (head != tail); + while (head != tail) { + req = &iommu->prq[head / sizeof(*req)]; + address = (u64)req->addr << VTD_PAGE_SHIFT; + + if (unlikely(!is_canonical_address(address))) { + pr_err("IOMMU: %s: Address is not canonical\n", + iommu->name); +bad_req: + handle_bad_prq_event(iommu, req, QI_RESP_INVALID); + goto prq_advance; + } + + if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) { + pr_err("IOMMU: %s: Page request in Privilege Mode\n", + iommu->name); + goto bad_req; + } + + if (unlikely(req->exe_req && req->rd_req)) { + pr_err("IOMMU: %s: Execution request not supported\n", + iommu->name); + goto bad_req; + } + + /* Drop Stop Marker message. No need for a response. */ + if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) + goto prq_advance; + + /* + * If prq is to be handled outside iommu driver via receiver of + * the fault notifiers, we skip the page response here. + */ + mutex_lock(&iommu->iopf_lock); + dev = device_rbtree_find(iommu, req->rid); + if (!dev) { + mutex_unlock(&iommu->iopf_lock); + goto bad_req; + } + + intel_prq_report(iommu, dev, req); + trace_prq_report(iommu, dev, req->qw_0, req->qw_1, + req->qw_2, req->qw_3, + iommu->prq_seq_number++); + mutex_unlock(&iommu->iopf_lock); +prq_advance: + head = (head + sizeof(*req)) & PRQ_RING_MASK; + } + + dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); + + /* + * Clear the page request overflow bit and wake up all threads that + * are waiting for the completion of this handling. + */ + if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { + pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", + iommu->name); + head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; + tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; + if (head == tail) { + iopf_queue_discard_partial(iommu->iopf_queue); + writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); + pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", + iommu->name); + } + } + + if (!completion_done(&iommu->prq_complete)) + complete(&iommu->prq_complete); + + return IRQ_RETVAL(handled); +} + +int intel_iommu_enable_prq(struct intel_iommu *iommu) +{ + struct iopf_queue *iopfq; + int irq, ret; + + iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); + if (!iommu->prq) { + pr_warn("IOMMU: %s: Failed to allocate page request queue\n", + iommu->name); + return -ENOMEM; + } + + irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); + if (irq <= 0) { + pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", + iommu->name); + ret = -EINVAL; + goto free_prq; + } + iommu->pr_irq = irq; + + snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), + "dmar%d-iopfq", iommu->seq_id); + iopfq = iopf_queue_alloc(iommu->iopfq_name); + if (!iopfq) { + pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); + ret = -ENOMEM; + goto free_hwirq; + } + iommu->iopf_queue = iopfq; + + snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); + + ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT, + iommu->prq_name, iommu); + if (ret) { + pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", + iommu->name); + goto free_iopfq; + } + dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); + dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); + dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); + + init_completion(&iommu->prq_complete); + + return 0; + +free_iopfq: + iopf_queue_free(iommu->iopf_queue); + iommu->iopf_queue = NULL; +free_hwirq: + dmar_free_hwirq(irq); + iommu->pr_irq = 0; +free_prq: + iommu_free_pages(iommu->prq, PRQ_ORDER); + iommu->prq = NULL; + + return ret; +} + +int intel_iommu_finish_prq(struct intel_iommu *iommu) +{ + dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); + dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); + dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); + + if (iommu->pr_irq) { + free_irq(iommu->pr_irq, iommu); + dmar_free_hwirq(iommu->pr_irq); + iommu->pr_irq = 0; + } + + if (iommu->iopf_queue) { + iopf_queue_free(iommu->iopf_queue); + iommu->iopf_queue = NULL; + } + + iommu_free_pages(iommu->prq, PRQ_ORDER); + iommu->prq = NULL; + + return 0; +} + +void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + u8 bus = info->bus, devfn = info->devfn; + struct iommu_fault_page_request *prm; + struct qi_desc desc; + bool pasid_present; + bool last_page; + u16 sid; + + prm = &evt->fault.prm; + sid = PCI_DEVID(bus, devfn); + pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + + desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | + QI_PGRP_PASID_P(pasid_present) | + QI_PGRP_RESP_CODE(msg->code) | + QI_PGRP_RESP_TYPE; + desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); + desc.qw2 = 0; + desc.qw3 = 0; + + qi_submit_sync(iommu, &desc, 1, 0); +} diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 078d1e32a24e..f5569347591f 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -25,92 +25,6 @@ #include "../iommu-pages.h" #include "trace.h" -static irqreturn_t prq_event_thread(int irq, void *d); - -int intel_svm_enable_prq(struct intel_iommu *iommu) -{ - struct iopf_queue *iopfq; - int irq, ret; - - iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); - if (!iommu->prq) { - pr_warn("IOMMU: %s: Failed to allocate page request queue\n", - iommu->name); - return -ENOMEM; - } - - irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); - if (irq <= 0) { - pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", - iommu->name); - ret = -EINVAL; - goto free_prq; - } - iommu->pr_irq = irq; - - snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), - "dmar%d-iopfq", iommu->seq_id); - iopfq = iopf_queue_alloc(iommu->iopfq_name); - if (!iopfq) { - pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); - ret = -ENOMEM; - goto free_hwirq; - } - iommu->iopf_queue = iopfq; - - snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); - - ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT, - iommu->prq_name, iommu); - if (ret) { - pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", - iommu->name); - goto free_iopfq; - } - dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); - dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); - dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); - - init_completion(&iommu->prq_complete); - - return 0; - -free_iopfq: - iopf_queue_free(iommu->iopf_queue); - iommu->iopf_queue = NULL; -free_hwirq: - dmar_free_hwirq(irq); - iommu->pr_irq = 0; -free_prq: - iommu_free_pages(iommu->prq, PRQ_ORDER); - iommu->prq = NULL; - - return ret; -} - -int intel_svm_finish_prq(struct intel_iommu *iommu) -{ - dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); - dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); - dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); - - if (iommu->pr_irq) { - free_irq(iommu->pr_irq, iommu); - dmar_free_hwirq(iommu->pr_irq); - iommu->pr_irq = 0; - } - - if (iommu->iopf_queue) { - iopf_queue_free(iommu->iopf_queue); - iommu->iopf_queue = NULL; - } - - iommu_free_pages(iommu->prq, PRQ_ORDER); - iommu->prq = NULL; - - return 0; -} - void intel_svm_check(struct intel_iommu *iommu) { if (!pasid_supported(iommu)) @@ -197,360 +111,37 @@ static const struct mmu_notifier_ops intel_mmuops = { }; static int intel_svm_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) { struct device_domain_info *info = dev_iommu_priv_get(dev); - struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu = info->iommu; struct mm_struct *mm = domain->mm; struct dev_pasid_info *dev_pasid; unsigned long sflags; - unsigned long flags; int ret = 0; - dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL); - if (!dev_pasid) - return -ENOMEM; - - dev_pasid->dev = dev; - dev_pasid->pasid = pasid; - - ret = cache_tag_assign_domain(to_dmar_domain(domain), dev, pasid); - if (ret) - goto free_dev_pasid; + dev_pasid = domain_add_dev_pasid(domain, dev, pasid); + if (IS_ERR(dev_pasid)) + return PTR_ERR(dev_pasid); /* Setup the pasid table: */ sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; - ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid, - FLPT_DEFAULT_DID, sflags); + ret = __domain_setup_first_level(iommu, dev, pasid, + FLPT_DEFAULT_DID, mm->pgd, + sflags, old); if (ret) - goto unassign_tag; + goto out_remove_dev_pasid; - spin_lock_irqsave(&dmar_domain->lock, flags); - list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); - spin_unlock_irqrestore(&dmar_domain->lock, flags); + domain_remove_dev_pasid(old, dev, pasid); return 0; -unassign_tag: - cache_tag_unassign_domain(to_dmar_domain(domain), dev, pasid); -free_dev_pasid: - kfree(dev_pasid); - +out_remove_dev_pasid: + domain_remove_dev_pasid(domain, dev, pasid); return ret; } -/* Page request queue descriptor */ -struct page_req_dsc { - union { - struct { - u64 type:8; - u64 pasid_present:1; - u64 rsvd:7; - u64 rid:16; - u64 pasid:20; - u64 exe_req:1; - u64 pm_req:1; - u64 rsvd2:10; - }; - u64 qw_0; - }; - union { - struct { - u64 rd_req:1; - u64 wr_req:1; - u64 lpig:1; - u64 prg_index:9; - u64 addr:52; - }; - u64 qw_1; - }; - u64 qw_2; - u64 qw_3; -}; - -static bool is_canonical_address(u64 addr) -{ - int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); - long saddr = (long) addr; - - return (((saddr << shift) >> shift) == saddr); -} - -/** - * intel_drain_pasid_prq - Drain page requests and responses for a pasid - * @dev: target device - * @pasid: pasid for draining - * - * Drain all pending page requests and responses related to @pasid in both - * software and hardware. This is supposed to be called after the device - * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB - * and DevTLB have been invalidated. - * - * It waits until all pending page requests for @pasid in the page fault - * queue are completed by the prq handling thread. Then follow the steps - * described in VT-d spec CH7.10 to drain all page requests and page - * responses pending in the hardware. - */ -void intel_drain_pasid_prq(struct device *dev, u32 pasid) -{ - struct device_domain_info *info; - struct dmar_domain *domain; - struct intel_iommu *iommu; - struct qi_desc desc[3]; - struct pci_dev *pdev; - int head, tail; - u16 sid, did; - int qdep; - - info = dev_iommu_priv_get(dev); - if (WARN_ON(!info || !dev_is_pci(dev))) - return; - - if (!info->pri_enabled) - return; - - iommu = info->iommu; - domain = info->domain; - pdev = to_pci_dev(dev); - sid = PCI_DEVID(info->bus, info->devfn); - did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; - qdep = pci_ats_queue_depth(pdev); - - /* - * Check and wait until all pending page requests in the queue are - * handled by the prq handling thread. - */ -prq_retry: - reinit_completion(&iommu->prq_complete); - tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; - head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; - while (head != tail) { - struct page_req_dsc *req; - - req = &iommu->prq[head / sizeof(*req)]; - if (!req->pasid_present || req->pasid != pasid) { - head = (head + sizeof(*req)) & PRQ_RING_MASK; - continue; - } - - wait_for_completion(&iommu->prq_complete); - goto prq_retry; - } - - iopf_queue_flush_dev(dev); - - /* - * Perform steps described in VT-d spec CH7.10 to drain page - * requests and responses in hardware. - */ - memset(desc, 0, sizeof(desc)); - desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) | - QI_IWD_FENCE | - QI_IWD_TYPE; - desc[1].qw0 = QI_EIOTLB_PASID(pasid) | - QI_EIOTLB_DID(did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; - desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) | - QI_DEV_EIOTLB_SID(sid) | - QI_DEV_EIOTLB_QDEP(qdep) | - QI_DEIOTLB_TYPE | - QI_DEV_IOTLB_PFSID(info->pfsid); -qi_retry: - reinit_completion(&iommu->prq_complete); - qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); - if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { - wait_for_completion(&iommu->prq_complete); - goto qi_retry; - } -} - -static int prq_to_iommu_prot(struct page_req_dsc *req) -{ - int prot = 0; - - if (req->rd_req) - prot |= IOMMU_FAULT_PERM_READ; - if (req->wr_req) - prot |= IOMMU_FAULT_PERM_WRITE; - if (req->exe_req) - prot |= IOMMU_FAULT_PERM_EXEC; - if (req->pm_req) - prot |= IOMMU_FAULT_PERM_PRIV; - - return prot; -} - -static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, - struct page_req_dsc *desc) -{ - struct iopf_fault event = { }; - - /* Fill in event data for device specific processing */ - event.fault.type = IOMMU_FAULT_PAGE_REQ; - event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; - event.fault.prm.pasid = desc->pasid; - event.fault.prm.grpid = desc->prg_index; - event.fault.prm.perm = prq_to_iommu_prot(desc); - - if (desc->lpig) - event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; - if (desc->pasid_present) { - event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; - event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; - } - - iommu_report_device_fault(dev, &event); -} - -static void handle_bad_prq_event(struct intel_iommu *iommu, - struct page_req_dsc *req, int result) -{ - struct qi_desc desc = { }; - - pr_err("%s: Invalid page request: %08llx %08llx\n", - iommu->name, ((unsigned long long *)req)[0], - ((unsigned long long *)req)[1]); - - if (!req->lpig) - return; - - desc.qw0 = QI_PGRP_PASID(req->pasid) | - QI_PGRP_DID(req->rid) | - QI_PGRP_PASID_P(req->pasid_present) | - QI_PGRP_RESP_CODE(result) | - QI_PGRP_RESP_TYPE; - desc.qw1 = QI_PGRP_IDX(req->prg_index) | - QI_PGRP_LPIG(req->lpig); - - qi_submit_sync(iommu, &desc, 1, 0); -} - -static irqreturn_t prq_event_thread(int irq, void *d) -{ - struct intel_iommu *iommu = d; - struct page_req_dsc *req; - int head, tail, handled; - struct device *dev; - u64 address; - - /* - * Clear PPR bit before reading head/tail registers, to ensure that - * we get a new interrupt if needed. - */ - writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); - - tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; - head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; - handled = (head != tail); - while (head != tail) { - req = &iommu->prq[head / sizeof(*req)]; - address = (u64)req->addr << VTD_PAGE_SHIFT; - - if (unlikely(!req->pasid_present)) { - pr_err("IOMMU: %s: Page request without PASID\n", - iommu->name); -bad_req: - handle_bad_prq_event(iommu, req, QI_RESP_INVALID); - goto prq_advance; - } - - if (unlikely(!is_canonical_address(address))) { - pr_err("IOMMU: %s: Address is not canonical\n", - iommu->name); - goto bad_req; - } - - if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) { - pr_err("IOMMU: %s: Page request in Privilege Mode\n", - iommu->name); - goto bad_req; - } - - if (unlikely(req->exe_req && req->rd_req)) { - pr_err("IOMMU: %s: Execution request not supported\n", - iommu->name); - goto bad_req; - } - - /* Drop Stop Marker message. No need for a response. */ - if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) - goto prq_advance; - - /* - * If prq is to be handled outside iommu driver via receiver of - * the fault notifiers, we skip the page response here. - */ - mutex_lock(&iommu->iopf_lock); - dev = device_rbtree_find(iommu, req->rid); - if (!dev) { - mutex_unlock(&iommu->iopf_lock); - goto bad_req; - } - - intel_svm_prq_report(iommu, dev, req); - trace_prq_report(iommu, dev, req->qw_0, req->qw_1, - req->qw_2, req->qw_3, - iommu->prq_seq_number++); - mutex_unlock(&iommu->iopf_lock); -prq_advance: - head = (head + sizeof(*req)) & PRQ_RING_MASK; - } - - dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); - - /* - * Clear the page request overflow bit and wake up all threads that - * are waiting for the completion of this handling. - */ - if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { - pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", - iommu->name); - head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; - tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; - if (head == tail) { - iopf_queue_discard_partial(iommu->iopf_queue); - writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); - pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", - iommu->name); - } - } - - if (!completion_done(&iommu->prq_complete)) - complete(&iommu->prq_complete); - - return IRQ_RETVAL(handled); -} - -void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, - struct iommu_page_response *msg) -{ - struct device_domain_info *info = dev_iommu_priv_get(dev); - struct intel_iommu *iommu = info->iommu; - u8 bus = info->bus, devfn = info->devfn; - struct iommu_fault_page_request *prm; - struct qi_desc desc; - bool pasid_present; - bool last_page; - u16 sid; - - prm = &evt->fault.prm; - sid = PCI_DEVID(bus, devfn); - pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; - last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; - - desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | - QI_PGRP_PASID_P(pasid_present) | - QI_PGRP_RESP_CODE(msg->code) | - QI_PGRP_RESP_TYPE; - desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); - desc.qw2 = 0; - desc.qw3 = 0; - - qi_submit_sync(iommu, &desc, 1, 0); -} - static void intel_svm_domain_free(struct iommu_domain *domain) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 06ffc683b28f..523355e91a2c 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -166,7 +166,6 @@ struct arm_v7s_io_pgtable { arm_v7s_iopte *pgd; struct kmem_cache *l2_tables; - spinlock_t split_lock; }; static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl); @@ -363,25 +362,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, return pte; } -static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) -{ - int prot = IOMMU_READ; - arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); - - if (!(attr & ARM_V7S_PTE_AP_RDONLY)) - prot |= IOMMU_WRITE; - if (!(attr & ARM_V7S_PTE_AP_UNPRIV)) - prot |= IOMMU_PRIV; - if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) - prot |= IOMMU_MMIO; - else if (pte & ARM_V7S_ATTR_C) - prot |= IOMMU_CACHE; - if (pte & ARM_V7S_ATTR_XN(lvl)) - prot |= IOMMU_NOEXEC; - - return prot; -} - static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl) { if (lvl == 1) { @@ -398,23 +378,6 @@ static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl) return pte; } -static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl) -{ - if (lvl == 1) { - pte &= ~ARM_V7S_CONT_SECTION; - } else if (lvl == 2) { - arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT); - arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK << - ARM_V7S_CONT_PAGE_TEX_SHIFT); - - pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE; - pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) | - (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) | - ARM_V7S_PTE_TYPE_PAGE; - } - return pte; -} - static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) { if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl)) @@ -591,77 +554,6 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop) kfree(data); } -static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, - unsigned long iova, int idx, int lvl, - arm_v7s_iopte *ptep) -{ - struct io_pgtable *iop = &data->iop; - arm_v7s_iopte pte; - size_t size = ARM_V7S_BLOCK_SIZE(lvl); - int i; - - /* Check that we didn't lose a race to get the lock */ - pte = *ptep; - if (!arm_v7s_pte_is_cont(pte, lvl)) - return pte; - - ptep -= idx & (ARM_V7S_CONT_PAGES - 1); - pte = arm_v7s_cont_to_pte(pte, lvl); - for (i = 0; i < ARM_V7S_CONT_PAGES; i++) - ptep[i] = pte + i * size; - - __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); - - size *= ARM_V7S_CONT_PAGES; - io_pgtable_tlb_flush_walk(iop, iova, size, size); - return pte; -} - -static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, - struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size, - arm_v7s_iopte blk_pte, - arm_v7s_iopte *ptep) -{ - struct io_pgtable_cfg *cfg = &data->iop.cfg; - arm_v7s_iopte pte, *tablep; - int i, unmap_idx, num_entries, num_ptes; - - tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data); - if (!tablep) - return 0; /* Bytes unmapped */ - - num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg); - num_entries = size >> ARM_V7S_LVL_SHIFT(2); - unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg); - - pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg); - if (num_entries > 1) - pte = arm_v7s_pte_to_cont(pte, 2); - - for (i = 0; i < num_ptes; i += num_entries, pte += size) { - /* Unmap! */ - if (i == unmap_idx) - continue; - - __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg); - } - - pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg); - if (pte != blk_pte) { - __arm_v7s_free_table(tablep, 2, data); - - if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) - return 0; - - tablep = iopte_deref(pte, 1, data); - return __arm_v7s_unmap(data, gather, iova, size, 2, tablep); - } - - io_pgtable_tlb_add_page(&data->iop, gather, iova, size); - return size; -} - static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, @@ -694,11 +586,8 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, * case in a lock for the sake of correctness and be done with it. */ if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) { - unsigned long flags; - - spin_lock_irqsave(&data->split_lock, flags); - pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep); - spin_unlock_irqrestore(&data->split_lock, flags); + WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed"); + return 0; } /* If the size matches this level, we're in the right place */ @@ -721,12 +610,8 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, } return size; } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) { - /* - * Insert a table at the next level to map the old region, - * minus the part we want to unmap - */ - return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0], - ptep); + WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed"); + return 0; } /* Keep on walkin' */ @@ -811,8 +696,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (!data) return NULL; - spin_lock_init(&data->split_lock); - /* * ARM_MTK_TTBR_EXT extend the translation table base support larger * memory address. @@ -936,8 +819,8 @@ static int __init arm_v7s_do_selftests(void) .quirks = IO_PGTABLE_QUIRK_ARM_NS, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, }; - unsigned int iova, size, iova_start; - unsigned int i, loopnr = 0; + unsigned int iova, size; + unsigned int i; size_t mapped; selftest_running = true; @@ -985,26 +868,6 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); iova += SZ_16M; - loopnr++; - } - - /* Partial unmap */ - i = 1; - size = 1UL << __ffs(cfg.pgsize_bitmap); - while (i < loopnr) { - iova_start = i * SZ_16M; - if (ops->unmap_pages(ops, iova_start + size, size, 1, NULL) != size) - return __FAIL(ops); - - /* Remap of partial unmap */ - if (ops->map_pages(ops, iova_start + size, size, size, 1, - IOMMU_READ, GFP_KERNEL, &mapped)) - return __FAIL(ops); - - if (ops->iova_to_phys(ops, iova_start + size + 42) - != (size + 42)) - return __FAIL(ops); - i++; } /* Full unmap */ diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 0e67f1721a3d..6b9bb58a414f 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -106,6 +106,18 @@ #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) +/* + * For !FWB these code to: + * 1111 = Normal outer write back cachable / Inner Write Back Cachable + * Permit S1 to override + * 0101 = Normal Non-cachable / Inner Non-cachable + * 0001 = Device / Device-nGnRE + * For S2FWB these code: + * 0110 Force Normal Write Back + * 0101 Normal* is forced Normal-NC, Device unchanged + * 0001 Force Device-nGnRE + */ +#define ARM_LPAE_PTE_MEMATTR_FWB_WB (((arm_lpae_iopte)0x6) << 2) #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) @@ -199,6 +211,18 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); } +/* + * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into + * a concatenated PGD, into the maximum number of entries that can be + * mapped in the same table page. + */ +static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data) +{ + int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); + + return ptes_per_table - (i & (ptes_per_table - 1)); +} + static bool selftest_running = false; static dma_addr_t __arm_lpae_dma_addr(void *pages) @@ -390,7 +414,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* If we can install a leaf entry at this level, then do so */ if (size == block_size) { - max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; + max_entries = arm_lpae_max_entries(map_idx_start, data); num_entries = min_t(int, pgcount, max_entries); ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); if (!ret) @@ -458,12 +482,16 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, */ if (data->iop.fmt == ARM_64_LPAE_S2 || data->iop.fmt == ARM_32_LPAE_S2) { - if (prot & IOMMU_MMIO) + if (prot & IOMMU_MMIO) { pte |= ARM_LPAE_PTE_MEMATTR_DEV; - else if (prot & IOMMU_CACHE) - pte |= ARM_LPAE_PTE_MEMATTR_OIWB; - else + } else if (prot & IOMMU_CACHE) { + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB) + pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB; + else + pte |= ARM_LPAE_PTE_MEMATTR_OIWB; + } else { pte |= ARM_LPAE_PTE_MEMATTR_NC; + } } else { if (prot & IOMMU_MMIO) pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV @@ -569,66 +597,6 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) kfree(data); } -static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, - struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size, - arm_lpae_iopte blk_pte, int lvl, - arm_lpae_iopte *ptep, size_t pgcount) -{ - struct io_pgtable_cfg *cfg = &data->iop.cfg; - arm_lpae_iopte pte, *tablep; - phys_addr_t blk_paddr; - size_t tablesz = ARM_LPAE_GRANULE(data); - size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); - int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); - int i, unmap_idx_start = -1, num_entries = 0, max_entries; - - if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) - return 0; - - tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie); - if (!tablep) - return 0; /* Bytes unmapped */ - - if (size == split_sz) { - unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); - max_entries = ptes_per_table - unmap_idx_start; - num_entries = min_t(int, pgcount, max_entries); - } - - blk_paddr = iopte_to_paddr(blk_pte, data); - pte = iopte_prot(blk_pte); - - for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { - /* Unmap! */ - if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) - continue; - - __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); - } - - pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); - if (pte != blk_pte) { - __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie); - /* - * We may race against someone unmapping another part of this - * block, but anything else is invalid. We can't misinterpret - * a page entry here since we're never at the last level. - */ - if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) - return 0; - - tablep = iopte_deref(pte, data); - } else if (unmap_idx_start >= 0) { - for (i = 0; i < num_entries; i++) - io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); - - return num_entries * size; - } - - return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); -} - static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, size_t pgcount, @@ -650,7 +618,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { - max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; + max_entries = arm_lpae_max_entries(unmap_idx_start, data); num_entries = min_t(int, pgcount, max_entries); /* Find and handle non-leaf entries */ @@ -678,12 +646,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return i * size; } else if (iopte_leaf(pte, lvl, iop->fmt)) { - /* - * Insert a table at the next level to map the old region, - * minus the part we want to unmap - */ - return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, - lvl + 1, ptep, pgcount); + WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed"); + return 0; } /* Keep on walkin' */ @@ -1035,8 +999,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; - /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1347,19 +1310,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) iova += SZ_1G; } - /* Partial unmap */ - size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) - return __FAIL(ops, i); - - /* Remap of partial unmap */ - if (ops->map_pages(ops, SZ_1G + size, size, size, 1, - IOMMU_READ, GFP_KERNEL, &mapped)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) - return __FAIL(ops, i); - /* Full unmap */ iova = 0; for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { @@ -1382,6 +1332,23 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) iova += SZ_1G; } + /* + * Map/unmap the last largest supported page of the IAS, this can + * trigger corner cases in the concatednated page tables. + */ + mapped = 0; + size = 1UL << __fls(cfg->pgsize_bitmap); + iova = (1UL << cfg->ias) - size; + if (ops->map_pages(ops, iova, iova, size, 1, + IOMMU_READ | IOMMU_WRITE | + IOMMU_NOEXEC | IOMMU_CACHE, + GFP_KERNEL, &mapped)) + return __FAIL(ops, i); + if (mapped != size) + return __FAIL(ops, i); + if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) + return __FAIL(ops, i); + free_io_pgtable_ops(ops); } diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index cbe378c34ba3..170022c09536 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -34,7 +34,7 @@ static void release_device(struct device *dev) kfree(dev); } -static struct class iommu_class = { +static const struct class iommu_class = { .name = "iommu", .dev_release = release_device, .dev_groups = dev_groups, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 83c8e617a2c5..9bc0c74cca3c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -32,6 +32,7 @@ #include <trace/events/iommu.h> #include <linux/sched/mm.h> #include <linux/msi.h> +#include <uapi/linux/iommufd.h> #include "dma-iommu.h" #include "iommu-priv.h" @@ -90,15 +91,17 @@ static const char * const iommu_group_resv_type_string[] = { #define IOMMU_CMD_LINE_DMA_API BIT(0) #define IOMMU_CMD_LINE_STRICT BIT(1) +static int bus_iommu_probe(const struct bus_type *bus); static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data); static void iommu_release_device(struct device *dev); -static struct iommu_domain * -__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type); static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); +static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int type, + unsigned int flags); enum { IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, @@ -133,6 +136,8 @@ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev); static void __iommu_group_free_device(struct iommu_group *group, struct group_device *grp_dev); +static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, + const struct iommu_ops *ops); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ @@ -1141,10 +1146,6 @@ map_end: } } - - if (!list_empty(&mappings) && iommu_is_dma_domain(domain)) - iommu_flush_iotlb_all(domain); - out: iommu_put_resv_regions(dev, &mappings); @@ -1586,12 +1587,59 @@ struct iommu_group *fsl_mc_device_group(struct device *dev) } EXPORT_SYMBOL_GPL(fsl_mc_device_group); +static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) +{ + const struct iommu_ops *ops = dev_iommu_ops(dev); + struct iommu_domain *domain; + + if (ops->identity_domain) + return ops->identity_domain; + + /* Older drivers create the identity domain via ops->domain_alloc() */ + if (!ops->domain_alloc) + return ERR_PTR(-EOPNOTSUPP); + + domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY); + if (IS_ERR(domain)) + return domain; + if (!domain) + return ERR_PTR(-ENOMEM); + + iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); + return domain; +} + static struct iommu_domain * __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { + struct device *dev = iommu_group_first_dev(group); + struct iommu_domain *dom; + if (group->default_domain && group->default_domain->type == req_type) return group->default_domain; - return __iommu_group_domain_alloc(group, req_type); + + /* + * When allocating the DMA API domain assume that the driver is going to + * use PASID and make sure the RID's domain is PASID compatible. + */ + if (req_type & __IOMMU_DOMAIN_PAGING) { + dom = __iommu_paging_domain_alloc_flags(dev, req_type, + dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0); + + /* + * If driver does not support PASID feature then + * try to allocate non-PASID domain + */ + if (PTR_ERR(dom) == -EOPNOTSUPP) + dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0); + + return dom; + } + + if (req_type == IOMMU_DOMAIN_IDENTITY) + return __iommu_alloc_identity_domain(dev); + + return ERR_PTR(-EINVAL); } /* @@ -1795,7 +1843,7 @@ static void iommu_group_do_probe_finalize(struct device *dev) ops->probe_finalize(dev); } -int bus_iommu_probe(const struct bus_type *bus) +static int bus_iommu_probe(const struct bus_type *bus) { struct iommu_group *group, *next; LIST_HEAD(group_list); @@ -1841,31 +1889,6 @@ int bus_iommu_probe(const struct bus_type *bus) } /** - * iommu_present() - make platform-specific assumptions about an IOMMU - * @bus: bus to check - * - * Do not use this function. You want device_iommu_mapped() instead. - * - * Return: true if some IOMMU is present and aware of devices on the given bus; - * in general it may not be the only IOMMU, and it may not have anything to do - * with whatever device you are ultimately interested in. - */ -bool iommu_present(const struct bus_type *bus) -{ - bool ret = false; - - for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { - if (iommu_buses[i] == bus) { - spin_lock(&iommu_device_lock); - ret = !list_empty(&iommu_device_list); - spin_unlock(&iommu_device_lock); - } - } - return ret; -} -EXPORT_SYMBOL_GPL(iommu_present); - -/** * device_iommu_capable() - check for a general IOMMU capability * @dev: device to which the capability would be relevant, if available * @cap: IOMMU capability @@ -1934,117 +1957,67 @@ void iommu_set_fault_handler(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_set_fault_handler); -static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, - struct device *dev, - unsigned int type) +static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, + const struct iommu_ops *ops) { - struct iommu_domain *domain; - unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; - - if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) - return ops->identity_domain; - else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) - return ops->blocked_domain; - else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) - domain = ops->domain_alloc_paging(dev); - else if (ops->domain_alloc) - domain = ops->domain_alloc(alloc_type); - else - return ERR_PTR(-EOPNOTSUPP); - - /* - * Many domain_alloc ops now return ERR_PTR, make things easier for the - * driver by accepting ERR_PTR from all domain_alloc ops instead of - * having two rules. - */ - if (IS_ERR(domain)) - return domain; - if (!domain) - return ERR_PTR(-ENOMEM); - domain->type = type; domain->owner = ops; + if (!domain->ops) + domain->ops = ops->default_domain_ops; + /* * If not already set, assume all sizes by default; the driver * may override this later */ if (!domain->pgsize_bitmap) domain->pgsize_bitmap = ops->pgsize_bitmap; - - if (!domain->ops) - domain->ops = ops->default_domain_ops; - - if (iommu_is_dma_domain(domain)) { - int rc; - - rc = iommu_get_dma_cookie(domain); - if (rc) { - iommu_domain_free(domain); - return ERR_PTR(rc); - } - } - return domain; } static struct iommu_domain * -__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) -{ - struct device *dev = iommu_group_first_dev(group); - - return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type); -} - -static int __iommu_domain_alloc_dev(struct device *dev, void *data) +__iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, + unsigned int flags) { - const struct iommu_ops **ops = data; + const struct iommu_ops *ops; + struct iommu_domain *domain; if (!dev_has_iommu(dev)) - return 0; - - if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev), - "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n", - dev_bus_name(dev))) - return -EBUSY; - - *ops = dev_iommu_ops(dev); - return 0; -} + return ERR_PTR(-ENODEV); -/* - * The iommu ops in bus has been retired. Do not use this interface in - * new drivers. - */ -struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) -{ - const struct iommu_ops *ops = NULL; - int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev); - struct iommu_domain *domain; + ops = dev_iommu_ops(dev); - if (err || !ops) - return NULL; + if (ops->domain_alloc_paging && !flags) + domain = ops->domain_alloc_paging(dev); + else if (ops->domain_alloc_paging_flags) + domain = ops->domain_alloc_paging_flags(dev, flags, NULL); + else if (ops->domain_alloc && !flags) + domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); + else + return ERR_PTR(-EOPNOTSUPP); - domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED); if (IS_ERR(domain)) - return NULL; + return domain; + if (!domain) + return ERR_PTR(-ENOMEM); + + iommu_domain_init(domain, type, ops); return domain; } -EXPORT_SYMBOL_GPL(iommu_domain_alloc); /** - * iommu_paging_domain_alloc() - Allocate a paging domain + * iommu_paging_domain_alloc_flags() - Allocate a paging domain * @dev: device for which the domain is allocated + * @flags: Bitmap of iommufd_hwpt_alloc_flags * * Allocate a paging domain which will be managed by a kernel driver. Return - * allocated domain if successful, or a ERR pointer for failure. + * allocated domain if successful, or an ERR pointer for failure. */ -struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int flags) { - if (!dev_has_iommu(dev)) - return ERR_PTR(-ENODEV); - - return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED); + return __iommu_paging_domain_alloc_flags(dev, + IOMMU_DOMAIN_UNMANAGED, flags); } -EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc); +EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); void iommu_domain_free(struct iommu_domain *domain) { @@ -2216,8 +2189,8 @@ EXPORT_SYMBOL_GPL(iommu_attach_group); /** * iommu_group_replace_domain - replace the domain that a group is attached to - * @new_domain: new IOMMU domain to replace with * @group: IOMMU group that will be attached to the new domain + * @new_domain: new IOMMU domain to replace with * * This API allows the group to switch domains without being forced to go to * the blocking domain in-between. @@ -2586,6 +2559,20 @@ static size_t __iommu_unmap(struct iommu_domain *domain, return unmapped; } +/** + * iommu_unmap() - Remove mappings from a range of IOVA + * @domain: Domain to manipulate + * @iova: IO virtual address to start + * @size: Length of the range starting from @iova + * + * iommu_unmap() will remove a translation created by iommu_map(). It cannot + * subdivide a mapping created by iommu_map(), so it should be called with IOVA + * ranges that match what was passed to iommu_map(). The range can aggregate + * contiguous iommu_map() calls so long as no individual range is split. + * + * Returns: Number of bytes of IOVA unmapped. iova + res will be the point + * unmapping stopped. + */ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { @@ -2723,16 +2710,6 @@ static int __init iommu_init(void) } core_initcall(iommu_init); -int iommu_enable_nesting(struct iommu_domain *domain) -{ - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - if (!domain->ops->enable_nesting) - return -EINVAL; - return domain->ops->enable_nesting(domain); -} -EXPORT_SYMBOL_GPL(iommu_enable_nesting); - int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirk) { @@ -2965,6 +2942,14 @@ static int iommu_setup_default_domain(struct iommu_group *group, if (group->default_domain == dom) return 0; + if (iommu_is_dma_domain(dom)) { + ret = iommu_get_dma_cookie(dom); + if (ret) { + iommu_domain_free(dom); + return ret; + } + } + /* * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be * mapped before their device is attached, in order to guarantee @@ -3152,22 +3137,25 @@ void iommu_device_unuse_default_domain(struct device *dev) static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { + struct device *dev = iommu_group_first_dev(group); + const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (group->blocking_domain) return 0; - domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); - if (IS_ERR(domain)) { - /* - * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED - * create an empty domain instead. - */ - domain = __iommu_group_domain_alloc(group, - IOMMU_DOMAIN_UNMANAGED); - if (IS_ERR(domain)) - return PTR_ERR(domain); + if (ops->blocked_domain) { + group->blocking_domain = ops->blocked_domain; + return 0; } + + /* + * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an + * empty PAGING domain instead. + */ + domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) + return PTR_ERR(domain); group->blocking_domain = domain; return 0; } @@ -3331,7 +3319,8 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain, int ret; for_each_group_device(group, device) { - ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); + ret = domain->ops->set_dev_pasid(domain, device->dev, + pasid, NULL); if (ret) goto err_revert; } diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig index 76656fe0470d..0a07f9449fd9 100644 --- a/drivers/iommu/iommufd/Kconfig +++ b/drivers/iommu/iommufd/Kconfig @@ -1,4 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only +config IOMMUFD_DRIVER_CORE + tristate + default (IOMMUFD_DRIVER || IOMMUFD) if IOMMUFD!=n + config IOMMUFD tristate "IOMMU Userspace API" select INTERVAL_TREE diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index cf4605962bea..cb784da6cddc 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -7,9 +7,13 @@ iommufd-y := \ ioas.o \ main.o \ pages.o \ - vfio_compat.o + vfio_compat.o \ + viommu.o iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o obj-$(CONFIG_IOMMUFD) += iommufd.o obj-$(CONFIG_IOMMUFD_DRIVER) += iova_bitmap.o + +iommufd_driver-y := driver.o +obj-$(CONFIG_IOMMUFD_DRIVER_CORE) += iommufd_driver.o diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c new file mode 100644 index 000000000000..7b67fdf44134 --- /dev/null +++ b/drivers/iommu/iommufd/driver.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES + */ +#include "iommufd_private.h" + +struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, + size_t size, + enum iommufd_object_type type) +{ + struct iommufd_object *obj; + int rc; + + obj = kzalloc(size, GFP_KERNEL_ACCOUNT); + if (!obj) + return ERR_PTR(-ENOMEM); + obj->type = type; + /* Starts out bias'd by 1 until it is removed from the xarray */ + refcount_set(&obj->shortterm_users, 1); + refcount_set(&obj->users, 1); + + /* + * Reserve an ID in the xarray but do not publish the pointer yet since + * the caller hasn't initialized it yet. Once the pointer is published + * in the xarray and visible to other threads we can't reliably destroy + * it anymore, so the caller must complete all errorable operations + * before calling iommufd_object_finalize(). + */ + rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b, + GFP_KERNEL_ACCOUNT); + if (rc) + goto out_free; + return obj; +out_free: + kfree(obj); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_NS_GPL(_iommufd_object_alloc, IOMMUFD); + +/* Caller should xa_lock(&viommu->vdevs) to protect the return value */ +struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, + unsigned long vdev_id) +{ + struct iommufd_vdevice *vdev; + + lockdep_assert_held(&viommu->vdevs.xa_lock); + + vdev = xa_load(&viommu->vdevs, vdev_id); + return vdev ? vdev->dev : NULL; +} +EXPORT_SYMBOL_NS_GPL(iommufd_viommu_find_dev, IOMMUFD); + +MODULE_DESCRIPTION("iommufd code shared with builtin modules"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c index e590973ce5cf..053b0e30f55a 100644 --- a/drivers/iommu/iommufd/fault.c +++ b/drivers/iommu/iommufd/fault.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> +#include <linux/pci-ats.h> #include <linux/poll.h> #include <uapi/linux/iommufd.h> @@ -27,8 +28,12 @@ static int iommufd_fault_iopf_enable(struct iommufd_device *idev) * resource between PF and VFs. There is no coordination for this * shared capability. This waits for a vPRI reset to recover. */ - if (dev_is_pci(dev) && to_pci_dev(dev)->is_virtfn) - return -EINVAL; + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + if (pdev->is_virtfn && pci_pri_supported(pdev)) + return -EINVAL; + } mutex_lock(&idev->iopf_lock); /* Device iopf has already been on. */ diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index d06bf6e6c19f..ce03c3804651 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -57,7 +57,10 @@ void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) container_of(obj, struct iommufd_hwpt_nested, common.obj); __iommufd_hwpt_destroy(&hwpt_nested->common); - refcount_dec(&hwpt_nested->parent->common.obj.users); + if (hwpt_nested->viommu) + refcount_dec(&hwpt_nested->viommu->obj.users); + else + refcount_dec(&hwpt_nested->parent->common.obj.users); } void iommufd_hwpt_nested_abort(struct iommufd_object *obj) @@ -107,7 +110,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, const struct iommu_user_data *user_data) { const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT | - IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_FAULT_ID_VALID; const struct iommu_ops *ops = dev_iommu_ops(idev->dev); struct iommufd_hwpt_paging *hwpt_paging; struct iommufd_hw_pagetable *hwpt; @@ -115,7 +119,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, lockdep_assert_held(&ioas->mutex); - if ((flags || user_data) && !ops->domain_alloc_user) + if ((flags || user_data) && !ops->domain_alloc_paging_flags) return ERR_PTR(-EOPNOTSUPP); if (flags & ~valid_flags) return ERR_PTR(-EOPNOTSUPP); @@ -135,9 +139,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, hwpt_paging->ioas = ioas; hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; - if (ops->domain_alloc_user) { - hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, - user_data); + if (ops->domain_alloc_paging_flags) { + hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags, + user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; @@ -223,7 +227,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, int rc; if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || - !user_data->len || !ops->domain_alloc_user) + !user_data->len || !ops->domain_alloc_nested) return ERR_PTR(-EOPNOTSUPP); if (parent->auto_domain || !parent->nest_parent || parent->common.domain->owner != ops) @@ -238,9 +242,9 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, refcount_inc(&parent->common.obj.users); hwpt_nested->parent = parent; - hwpt->domain = ops->domain_alloc_user(idev->dev, - flags & ~IOMMU_HWPT_FAULT_ID_VALID, - parent->common.domain, user_data); + hwpt->domain = ops->domain_alloc_nested( + idev->dev, parent->common.domain, + flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; @@ -248,8 +252,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, } hwpt->domain->owner = ops; - if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED || - !hwpt->domain->ops->cache_invalidate_user)) { + if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { rc = -EINVAL; goto out_abort; } @@ -260,6 +263,58 @@ out_abort: return ERR_PTR(rc); } +/** + * iommufd_viommu_alloc_hwpt_nested() - Get a hwpt_nested for a vIOMMU + * @viommu: vIOMMU ojbect to associate the hwpt_nested/domain with + * @flags: Flags from userspace + * @user_data: user_data pointer. Must be valid + * + * Allocate a new IOMMU_DOMAIN_NESTED for a vIOMMU and return it as a NESTED + * hw_pagetable. + */ +static struct iommufd_hwpt_nested * +iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data) +{ + struct iommufd_hwpt_nested *hwpt_nested; + struct iommufd_hw_pagetable *hwpt; + int rc; + + if (!user_data->len) + return ERR_PTR(-EOPNOTSUPP); + if (!viommu->ops || !viommu->ops->alloc_domain_nested) + return ERR_PTR(-EOPNOTSUPP); + + hwpt_nested = __iommufd_object_alloc( + viommu->ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); + if (IS_ERR(hwpt_nested)) + return ERR_CAST(hwpt_nested); + hwpt = &hwpt_nested->common; + + hwpt_nested->viommu = viommu; + refcount_inc(&viommu->obj.users); + hwpt_nested->parent = viommu->hwpt; + + hwpt->domain = + viommu->ops->alloc_domain_nested(viommu, flags, user_data); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; + goto out_abort; + } + hwpt->domain->owner = viommu->iommu_dev->ops; + + if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { + rc = -EINVAL; + goto out_abort; + } + return hwpt_nested; + +out_abort: + iommufd_object_abort_and_destroy(viommu->ictx, &hwpt->obj); + return ERR_PTR(rc); +} + int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) { struct iommu_hwpt_alloc *cmd = ucmd->cmd; @@ -316,6 +371,22 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) goto out_unlock; } hwpt = &hwpt_nested->common; + } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) { + struct iommufd_hwpt_nested *hwpt_nested; + struct iommufd_viommu *viommu; + + viommu = container_of(pt_obj, struct iommufd_viommu, obj); + if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) { + rc = -EINVAL; + goto out_unlock; + } + hwpt_nested = iommufd_viommu_alloc_hwpt_nested( + viommu, cmd->flags, &user_data); + if (IS_ERR(hwpt_nested)) { + rc = PTR_ERR(hwpt_nested); + goto out_unlock; + } + hwpt = &hwpt_nested->common; } else { rc = -EINVAL; goto out_put_pt; @@ -412,7 +483,7 @@ int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) .entry_len = cmd->entry_len, .entry_num = cmd->entry_num, }; - struct iommufd_hw_pagetable *hwpt; + struct iommufd_object *pt_obj; u32 done_num = 0; int rc; @@ -426,17 +497,40 @@ int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) goto out; } - hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id); - if (IS_ERR(hwpt)) { - rc = PTR_ERR(hwpt); + pt_obj = iommufd_get_object(ucmd->ictx, cmd->hwpt_id, IOMMUFD_OBJ_ANY); + if (IS_ERR(pt_obj)) { + rc = PTR_ERR(pt_obj); goto out; } + if (pt_obj->type == IOMMUFD_OBJ_HWPT_NESTED) { + struct iommufd_hw_pagetable *hwpt = + container_of(pt_obj, struct iommufd_hw_pagetable, obj); + + if (!hwpt->domain->ops || + !hwpt->domain->ops->cache_invalidate_user) { + rc = -EOPNOTSUPP; + goto out_put_pt; + } + rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, + &data_array); + } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) { + struct iommufd_viommu *viommu = + container_of(pt_obj, struct iommufd_viommu, obj); + + if (!viommu->ops || !viommu->ops->cache_invalidate) { + rc = -EOPNOTSUPP; + goto out_put_pt; + } + rc = viommu->ops->cache_invalidate(viommu, &data_array); + } else { + rc = -EINVAL; + goto out_put_pt; + } - rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, - &data_array); done_num = data_array.entry_num; - iommufd_put_object(ucmd->ictx, &hwpt->obj); +out_put_pt: + iommufd_put_object(ucmd->ictx, pt_obj); out: cmd->entry_num = done_num; if (iommufd_ucmd_respond(ucmd, sizeof(*cmd))) diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 4bf7ccd39d46..8a790e597e12 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -107,9 +107,9 @@ static bool __alloc_iova_check_used(struct interval_tree_span_iter *span, * Does not return a 0 IOVA even if it is valid. */ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, - unsigned long uptr, unsigned long length) + unsigned long addr, unsigned long length) { - unsigned long page_offset = uptr % PAGE_SIZE; + unsigned long page_offset = addr % PAGE_SIZE; struct interval_tree_double_span_iter used_span; struct interval_tree_span_iter allowed_span; unsigned long max_alignment = PAGE_SIZE; @@ -122,15 +122,15 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, return -EOVERFLOW; /* - * Keep alignment present in the uptr when building the IOVA, this + * Keep alignment present in addr when building the IOVA, which * increases the chance we can map a THP. */ - if (!uptr) + if (!addr) iova_alignment = roundup_pow_of_two(length); else iova_alignment = min_t(unsigned long, roundup_pow_of_two(length), - 1UL << __ffs64(uptr)); + 1UL << __ffs64(addr)); #ifdef CONFIG_TRANSPARENT_HUGEPAGE max_alignment = HPAGE_SIZE; @@ -248,6 +248,7 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt, int iommu_prot, unsigned int flags) { struct iopt_pages_list *elm; + unsigned long start; unsigned long iova; int rc = 0; @@ -267,9 +268,15 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt, /* Use the first entry to guess the ideal IOVA alignment */ elm = list_first_entry(pages_list, struct iopt_pages_list, next); - rc = iopt_alloc_iova( - iopt, dst_iova, - (uintptr_t)elm->pages->uptr + elm->start_byte, length); + switch (elm->pages->type) { + case IOPT_ADDRESS_USER: + start = elm->start_byte + (uintptr_t)elm->pages->uptr; + break; + case IOPT_ADDRESS_FILE: + start = elm->start_byte + elm->pages->start; + break; + } + rc = iopt_alloc_iova(iopt, dst_iova, start, length); if (rc) goto out_unlock; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && @@ -384,6 +391,34 @@ out_unlock_domains: return rc; } +static int iopt_map_common(struct iommufd_ctx *ictx, struct io_pagetable *iopt, + struct iopt_pages *pages, unsigned long *iova, + unsigned long length, unsigned long start_byte, + int iommu_prot, unsigned int flags) +{ + struct iopt_pages_list elm = {}; + LIST_HEAD(pages_list); + int rc; + + elm.pages = pages; + elm.start_byte = start_byte; + if (ictx->account_mode == IOPT_PAGES_ACCOUNT_MM && + elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER) + elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM; + elm.length = length; + list_add(&elm.next, &pages_list); + + rc = iopt_map_pages(iopt, &pages_list, length, iova, iommu_prot, flags); + if (rc) { + if (elm.area) + iopt_abort_area(elm.area); + if (elm.pages) + iopt_put_pages(elm.pages); + return rc; + } + return 0; +} + /** * iopt_map_user_pages() - Map a user VA to an iova in the io page table * @ictx: iommufd_ctx the iopt is part of @@ -408,29 +443,41 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, unsigned long length, int iommu_prot, unsigned int flags) { - struct iopt_pages_list elm = {}; - LIST_HEAD(pages_list); - int rc; + struct iopt_pages *pages; - elm.pages = iopt_alloc_pages(uptr, length, iommu_prot & IOMMU_WRITE); - if (IS_ERR(elm.pages)) - return PTR_ERR(elm.pages); - if (ictx->account_mode == IOPT_PAGES_ACCOUNT_MM && - elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER) - elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM; - elm.start_byte = uptr - elm.pages->uptr; - elm.length = length; - list_add(&elm.next, &pages_list); + pages = iopt_alloc_user_pages(uptr, length, iommu_prot & IOMMU_WRITE); + if (IS_ERR(pages)) + return PTR_ERR(pages); - rc = iopt_map_pages(iopt, &pages_list, length, iova, iommu_prot, flags); - if (rc) { - if (elm.area) - iopt_abort_area(elm.area); - if (elm.pages) - iopt_put_pages(elm.pages); - return rc; - } - return 0; + return iopt_map_common(ictx, iopt, pages, iova, length, + uptr - pages->uptr, iommu_prot, flags); +} + +/** + * iopt_map_file_pages() - Like iopt_map_user_pages, but map a file. + * @ictx: iommufd_ctx the iopt is part of + * @iopt: io_pagetable to act on + * @iova: If IOPT_ALLOC_IOVA is set this is unused on input and contains + * the chosen iova on output. Otherwise is the iova to map to on input + * @file: file to map + * @start: map file starting at this byte offset + * @length: Number of bytes to map + * @iommu_prot: Combination of IOMMU_READ/WRITE/etc bits for the mapping + * @flags: IOPT_ALLOC_IOVA or zero + */ +int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, + unsigned long *iova, struct file *file, + unsigned long start, unsigned long length, + int iommu_prot, unsigned int flags) +{ + struct iopt_pages *pages; + + pages = iopt_alloc_file_pages(file, start, length, + iommu_prot & IOMMU_WRITE); + if (IS_ERR(pages)) + return PTR_ERR(pages); + return iopt_map_common(ictx, iopt, pages, iova, length, + start - pages->start, iommu_prot, flags); } struct iova_bitmap_fn_arg { diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h index c61d74471684..10c928a9a463 100644 --- a/drivers/iommu/iommufd/io_pagetable.h +++ b/drivers/iommu/iommufd/io_pagetable.h @@ -173,6 +173,12 @@ enum { IOPT_PAGES_ACCOUNT_NONE = 0, IOPT_PAGES_ACCOUNT_USER = 1, IOPT_PAGES_ACCOUNT_MM = 2, + IOPT_PAGES_ACCOUNT_MODE_NUM = 3, +}; + +enum iopt_address_type { + IOPT_ADDRESS_USER = 0, + IOPT_ADDRESS_FILE = 1, }; /* @@ -195,7 +201,14 @@ struct iopt_pages { struct task_struct *source_task; struct mm_struct *source_mm; struct user_struct *source_user; - void __user *uptr; + enum iopt_address_type type; + union { + void __user *uptr; /* IOPT_ADDRESS_USER */ + struct { /* IOPT_ADDRESS_FILE */ + struct file *file; + unsigned long start; + }; + }; bool writable:1; u8 account_mode; @@ -206,8 +219,10 @@ struct iopt_pages { struct rb_root_cached domains_itree; }; -struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, - bool writable); +struct iopt_pages *iopt_alloc_user_pages(void __user *uptr, + unsigned long length, bool writable); +struct iopt_pages *iopt_alloc_file_pages(struct file *file, unsigned long start, + unsigned long length, bool writable); void iopt_release_pages(struct kref *kref); static inline void iopt_put_pages(struct iopt_pages *pages) { @@ -238,4 +253,9 @@ struct iopt_pages_access { unsigned int users; }; +struct pfn_reader_user; + +int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages, + bool inc, struct pfn_reader_user *user); + #endif diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c index 2c4b2bb11e78..1542c5fd10a8 100644 --- a/drivers/iommu/iommufd/ioas.c +++ b/drivers/iommu/iommufd/ioas.c @@ -2,6 +2,7 @@ /* * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ +#include <linux/file.h> #include <linux/interval_tree.h> #include <linux/iommu.h> #include <linux/iommufd.h> @@ -51,7 +52,10 @@ int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd) rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); if (rc) goto out_table; + + down_read(&ucmd->ictx->ioas_creation_lock); iommufd_object_finalize(ucmd->ictx, &ioas->obj); + up_read(&ucmd->ictx->ioas_creation_lock); return 0; out_table: @@ -197,6 +201,52 @@ static int conv_iommu_prot(u32 map_flags) return iommu_prot; } +int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_map_file *cmd = ucmd->cmd; + unsigned long iova = cmd->iova; + struct iommufd_ioas *ioas; + unsigned int flags = 0; + struct file *file; + int rc; + + if (cmd->flags & + ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE | + IOMMU_IOAS_MAP_READABLE)) + return -EOPNOTSUPP; + + if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) + return -EOVERFLOW; + + if (!(cmd->flags & + (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))) + return -EINVAL; + + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA)) + flags = IOPT_ALLOC_IOVA; + + file = fget(cmd->fd); + if (!file) + return -EBADF; + + rc = iopt_map_file_pages(ucmd->ictx, &ioas->iopt, &iova, file, + cmd->start, cmd->length, + conv_iommu_prot(cmd->flags), flags); + if (rc) + goto out_put; + + cmd->iova = iova; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_put: + iommufd_put_object(ucmd->ictx, &ioas->obj); + fput(file); + return rc; +} + int iommufd_ioas_map(struct iommufd_ucmd *ucmd) { struct iommu_ioas_map *cmd = ucmd->cmd; @@ -327,6 +377,215 @@ out_put: return rc; } +static void iommufd_release_all_iova_rwsem(struct iommufd_ctx *ictx, + struct xarray *ioas_list) +{ + struct iommufd_ioas *ioas; + unsigned long index; + + xa_for_each(ioas_list, index, ioas) { + up_write(&ioas->iopt.iova_rwsem); + refcount_dec(&ioas->obj.users); + } + up_write(&ictx->ioas_creation_lock); + xa_destroy(ioas_list); +} + +static int iommufd_take_all_iova_rwsem(struct iommufd_ctx *ictx, + struct xarray *ioas_list) +{ + struct iommufd_object *obj; + unsigned long index; + int rc; + + /* + * This is very ugly, it is done instead of adding a lock around + * pages->source_mm, which is a performance path for mdev, we just + * obtain the write side of all the iova_rwsems which also protects the + * pages->source_*. Due to copies we can't know which IOAS could read + * from the pages, so we just lock everything. This is the only place + * locks are nested and they are uniformly taken in ID order. + * + * ioas_creation_lock prevents new IOAS from being installed in the + * xarray while we do this, and also prevents more than one thread from + * holding nested locks. + */ + down_write(&ictx->ioas_creation_lock); + xa_lock(&ictx->objects); + xa_for_each(&ictx->objects, index, obj) { + struct iommufd_ioas *ioas; + + if (!obj || obj->type != IOMMUFD_OBJ_IOAS) + continue; + + if (!refcount_inc_not_zero(&obj->users)) + continue; + + xa_unlock(&ictx->objects); + + ioas = container_of(obj, struct iommufd_ioas, obj); + down_write_nest_lock(&ioas->iopt.iova_rwsem, + &ictx->ioas_creation_lock); + + rc = xa_err(xa_store(ioas_list, index, ioas, GFP_KERNEL)); + if (rc) { + iommufd_release_all_iova_rwsem(ictx, ioas_list); + return rc; + } + + xa_lock(&ictx->objects); + } + xa_unlock(&ictx->objects); + return 0; +} + +static bool need_charge_update(struct iopt_pages *pages) +{ + switch (pages->account_mode) { + case IOPT_PAGES_ACCOUNT_NONE: + return false; + case IOPT_PAGES_ACCOUNT_MM: + return pages->source_mm != current->mm; + case IOPT_PAGES_ACCOUNT_USER: + /* + * Update when mm changes because it also accounts + * in mm->pinned_vm. + */ + return (pages->source_user != current_user()) || + (pages->source_mm != current->mm); + } + return true; +} + +static int charge_current(unsigned long *npinned) +{ + struct iopt_pages tmp = { + .source_mm = current->mm, + .source_task = current->group_leader, + .source_user = current_user(), + }; + unsigned int account_mode; + int rc; + + for (account_mode = 0; account_mode != IOPT_PAGES_ACCOUNT_MODE_NUM; + account_mode++) { + if (!npinned[account_mode]) + continue; + + tmp.account_mode = account_mode; + rc = iopt_pages_update_pinned(&tmp, npinned[account_mode], true, + NULL); + if (rc) + goto err_undo; + } + return 0; + +err_undo: + while (account_mode != 0) { + account_mode--; + if (!npinned[account_mode]) + continue; + tmp.account_mode = account_mode; + iopt_pages_update_pinned(&tmp, npinned[account_mode], false, + NULL); + } + return rc; +} + +static void change_mm(struct iopt_pages *pages) +{ + struct task_struct *old_task = pages->source_task; + struct user_struct *old_user = pages->source_user; + struct mm_struct *old_mm = pages->source_mm; + + pages->source_mm = current->mm; + mmgrab(pages->source_mm); + mmdrop(old_mm); + + pages->source_task = current->group_leader; + get_task_struct(pages->source_task); + put_task_struct(old_task); + + pages->source_user = get_uid(current_user()); + free_uid(old_user); +} + +#define for_each_ioas_area(_xa, _index, _ioas, _area) \ + xa_for_each((_xa), (_index), (_ioas)) \ + for (_area = iopt_area_iter_first(&_ioas->iopt, 0, ULONG_MAX); \ + _area; \ + _area = iopt_area_iter_next(_area, 0, ULONG_MAX)) + +int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_change_process *cmd = ucmd->cmd; + struct iommufd_ctx *ictx = ucmd->ictx; + unsigned long all_npinned[IOPT_PAGES_ACCOUNT_MODE_NUM] = {}; + struct iommufd_ioas *ioas; + struct iopt_area *area; + struct iopt_pages *pages; + struct xarray ioas_list; + unsigned long index; + int rc; + + if (cmd->__reserved) + return -EOPNOTSUPP; + + xa_init(&ioas_list); + rc = iommufd_take_all_iova_rwsem(ictx, &ioas_list); + if (rc) + return rc; + + for_each_ioas_area(&ioas_list, index, ioas, area) { + if (area->pages->type != IOPT_ADDRESS_FILE) { + rc = -EINVAL; + goto out; + } + } + + /* + * Count last_pinned pages, then clear it to avoid double counting + * if the same iopt_pages is visited multiple times in this loop. + * Since we are under all the locks, npinned == last_npinned, so we + * can easily restore last_npinned before we return. + */ + for_each_ioas_area(&ioas_list, index, ioas, area) { + pages = area->pages; + + if (need_charge_update(pages)) { + all_npinned[pages->account_mode] += pages->last_npinned; + pages->last_npinned = 0; + } + } + + rc = charge_current(all_npinned); + + if (rc) { + /* Charge failed. Fix last_npinned and bail. */ + for_each_ioas_area(&ioas_list, index, ioas, area) + area->pages->last_npinned = area->pages->npinned; + goto out; + } + + for_each_ioas_area(&ioas_list, index, ioas, area) { + pages = area->pages; + + /* Uncharge the old one (which also restores last_npinned) */ + if (need_charge_update(pages)) { + int r = iopt_pages_update_pinned(pages, pages->npinned, + false, NULL); + + if (WARN_ON(r)) + rc = r; + } + change_mm(pages); + } + +out: + iommufd_release_all_iova_rwsem(ictx, &ioas_list); + return rc; +} + int iommufd_option_rlimit_mode(struct iommu_option *cmd, struct iommufd_ctx *ictx) { diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index f1d865e6fab6..b6d706cf2c66 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -5,8 +5,8 @@ #define __IOMMUFD_PRIVATE_H #include <linux/iommu.h> +#include <linux/iommufd.h> #include <linux/iova_bitmap.h> -#include <linux/refcount.h> #include <linux/rwsem.h> #include <linux/uaccess.h> #include <linux/xarray.h> @@ -24,6 +24,7 @@ struct iommufd_ctx { struct xarray objects; struct xarray groups; wait_queue_head_t destroy_wait; + struct rw_semaphore ioas_creation_lock; u8 account_mode; /* Compatibility with VFIO no iommu */ @@ -69,6 +70,10 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, unsigned long *iova, void __user *uptr, unsigned long length, int iommu_prot, unsigned int flags); +int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, + unsigned long *iova, struct file *file, + unsigned long start, unsigned long length, + int iommu_prot, unsigned int flags); int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list, unsigned long length, unsigned long *dst_iova, int iommu_prot, unsigned int flags); @@ -122,29 +127,6 @@ static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd, return 0; } -enum iommufd_object_type { - IOMMUFD_OBJ_NONE, - IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE, - IOMMUFD_OBJ_DEVICE, - IOMMUFD_OBJ_HWPT_PAGING, - IOMMUFD_OBJ_HWPT_NESTED, - IOMMUFD_OBJ_IOAS, - IOMMUFD_OBJ_ACCESS, - IOMMUFD_OBJ_FAULT, -#ifdef CONFIG_IOMMUFD_TEST - IOMMUFD_OBJ_SELFTEST, -#endif - IOMMUFD_OBJ_MAX, -}; - -/* Base struct for all objects with a userspace ID handle. */ -struct iommufd_object { - refcount_t shortterm_users; - refcount_t users; - enum iommufd_object_type type; - unsigned int id; -}; - static inline bool iommufd_lock_obj(struct iommufd_object *obj) { if (!refcount_inc_not_zero(&obj->users)) @@ -225,10 +207,6 @@ iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx, iommufd_object_remove(ictx, obj, obj->id, 0); } -struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, - size_t size, - enum iommufd_object_type type); - #define __iommufd_object_alloc(ictx, ptr, type, obj) \ container_of(_iommufd_object_alloc( \ ictx, \ @@ -276,6 +254,8 @@ void iommufd_ioas_destroy(struct iommufd_object *obj); int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd); int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd); int iommufd_ioas_map(struct iommufd_ucmd *ucmd); +int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd); +int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd); int iommufd_ioas_copy(struct iommufd_ucmd *ucmd); int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd); int iommufd_ioas_option(struct iommufd_ucmd *ucmd); @@ -312,6 +292,7 @@ struct iommufd_hwpt_paging { struct iommufd_hwpt_nested { struct iommufd_hw_pagetable common; struct iommufd_hwpt_paging *parent; + struct iommufd_viommu *viommu; }; static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt) @@ -528,6 +509,27 @@ static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev, return iommu_group_replace_domain(idev->igroup->group, hwpt->domain); } +static inline struct iommufd_viommu * +iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_VIOMMU), + struct iommufd_viommu, obj); +} + +int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd); +void iommufd_viommu_destroy(struct iommufd_object *obj); +int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd); +void iommufd_vdevice_destroy(struct iommufd_object *obj); + +struct iommufd_vdevice { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct iommufd_viommu *viommu; + struct device *dev; + u64 id; /* per-vIOMMU virtual ID */ +}; + #ifdef CONFIG_IOMMUFD_TEST int iommufd_test(struct iommufd_ucmd *ucmd); void iommufd_selftest_destroy(struct iommufd_object *obj); diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index f4bc23a92f9a..a6b7a163f636 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -23,6 +23,7 @@ enum { IOMMU_TEST_OP_DIRTY, IOMMU_TEST_OP_MD_CHECK_IOTLB, IOMMU_TEST_OP_TRIGGER_IOPF, + IOMMU_TEST_OP_DEV_CHECK_CACHE, }; enum { @@ -54,6 +55,11 @@ enum { MOCK_NESTED_DOMAIN_IOTLB_NUM = 4, }; +enum { + MOCK_DEV_CACHE_ID_MAX = 3, + MOCK_DEV_CACHE_NUM = 4, +}; + struct iommu_test_cmd { __u32 size; __u32 op; @@ -135,6 +141,10 @@ struct iommu_test_cmd { __u32 perm; __u64 addr; } trigger_iopf; + struct { + __u32 id; + __u32 cache; + } check_dev_cache; }; __u32 last; }; @@ -152,6 +162,7 @@ struct iommu_test_hw_info { /* Should not be equal to any defined value in enum iommu_hwpt_data_type */ #define IOMMU_HWPT_DATA_SELFTEST 0xdead #define IOMMU_TEST_IOTLB_DEFAULT 0xbadbeef +#define IOMMU_TEST_DEV_CACHE_DEFAULT 0xbaddad /** * struct iommu_hwpt_selftest @@ -180,4 +191,25 @@ struct iommu_hwpt_invalidate_selftest { __u32 iotlb_id; }; +#define IOMMU_VIOMMU_TYPE_SELFTEST 0xdeadbeef + +/* Should not be equal to any defined value in enum iommu_viommu_invalidate_data_type */ +#define IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST 0xdeadbeef +#define IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID 0xdadbeef + +/** + * struct iommu_viommu_invalidate_selftest - Invalidation data for Mock VIOMMU + * (IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) + * @flags: Invalidate flags + * @cache_id: Invalidate cache entry index + * + * If IOMMU_TEST_INVALIDATE_ALL is set in @flags, @cache_id will be ignored + */ +struct iommu_viommu_invalidate_selftest { +#define IOMMU_TEST_INVALIDATE_FLAG_ALL (1 << 0) + __u32 flags; + __u32 vdev_id; + __u32 cache_id; +}; + #endif diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index b5f5d27ee963..0a96cc8f27da 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -29,38 +29,6 @@ struct iommufd_object_ops { static const struct iommufd_object_ops iommufd_object_ops[]; static struct miscdevice vfio_misc_dev; -struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, - size_t size, - enum iommufd_object_type type) -{ - struct iommufd_object *obj; - int rc; - - obj = kzalloc(size, GFP_KERNEL_ACCOUNT); - if (!obj) - return ERR_PTR(-ENOMEM); - obj->type = type; - /* Starts out bias'd by 1 until it is removed from the xarray */ - refcount_set(&obj->shortterm_users, 1); - refcount_set(&obj->users, 1); - - /* - * Reserve an ID in the xarray but do not publish the pointer yet since - * the caller hasn't initialized it yet. Once the pointer is published - * in the xarray and visible to other threads we can't reliably destroy - * it anymore, so the caller must complete all errorable operations - * before calling iommufd_object_finalize(). - */ - rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, - xa_limit_31b, GFP_KERNEL_ACCOUNT); - if (rc) - goto out_free; - return obj; -out_free: - kfree(obj); - return ERR_PTR(rc); -} - /* * Allow concurrent access to the object. * @@ -73,20 +41,26 @@ out_free: void iommufd_object_finalize(struct iommufd_ctx *ictx, struct iommufd_object *obj) { + XA_STATE(xas, &ictx->objects, obj->id); void *old; - old = xa_store(&ictx->objects, obj->id, obj, GFP_KERNEL); - /* obj->id was returned from xa_alloc() so the xa_store() cannot fail */ - WARN_ON(old); + xa_lock(&ictx->objects); + old = xas_store(&xas, obj); + xa_unlock(&ictx->objects); + /* obj->id was returned from xa_alloc() so the xas_store() cannot fail */ + WARN_ON(old != XA_ZERO_ENTRY); } /* Undo _iommufd_object_alloc() if iommufd_object_finalize() was not called */ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) { + XA_STATE(xas, &ictx->objects, obj->id); void *old; - old = xa_erase(&ictx->objects, obj->id); - WARN_ON(old); + xa_lock(&ictx->objects); + old = xas_store(&xas, NULL); + xa_unlock(&ictx->objects); + WARN_ON(old != XA_ZERO_ENTRY); kfree(obj); } @@ -248,6 +222,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp) pr_info_once("IOMMUFD is providing /dev/vfio/vfio, not VFIO.\n"); } + init_rwsem(&ictx->ioas_creation_lock); xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT); xa_init(&ictx->groups); ictx->file = filp; @@ -333,6 +308,8 @@ union ucmd_buffer { struct iommu_ioas_unmap unmap; struct iommu_option option; struct iommu_vfio_ioas vfio_ioas; + struct iommu_viommu_alloc viommu; + struct iommu_vdevice_alloc vdev; #ifdef CONFIG_IOMMUFD_TEST struct iommu_test_cmd test; #endif @@ -372,18 +349,26 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { struct iommu_ioas_alloc, out_ioas_id), IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, struct iommu_ioas_allow_iovas, allowed_iovas), + IOCTL_OP(IOMMU_IOAS_CHANGE_PROCESS, iommufd_ioas_change_process, + struct iommu_ioas_change_process, __reserved), IOCTL_OP(IOMMU_IOAS_COPY, iommufd_ioas_copy, struct iommu_ioas_copy, src_iova), IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges, struct iommu_ioas_iova_ranges, out_iova_alignment), IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, iova), + IOCTL_OP(IOMMU_IOAS_MAP_FILE, iommufd_ioas_map_file, + struct iommu_ioas_map_file, iova), IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap, length), IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64), IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas, __reserved), + IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl, + struct iommu_viommu_alloc, out_viommu_id), + IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl, + struct iommu_vdevice_alloc, virt_id), #ifdef CONFIG_IOMMUFD_TEST IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last), #endif @@ -519,6 +504,12 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_FAULT] = { .destroy = iommufd_fault_destroy, }, + [IOMMUFD_OBJ_VIOMMU] = { + .destroy = iommufd_viommu_destroy, + }, + [IOMMUFD_OBJ_VDEVICE] = { + .destroy = iommufd_vdevice_destroy, + }, #ifdef CONFIG_IOMMUFD_TEST [IOMMUFD_OBJ_SELFTEST] = { .destroy = iommufd_selftest_destroy, diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 93d806c9c073..3427749bc5ce 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -45,6 +45,7 @@ * last_iova + 1 can overflow. An iopt_pages index will always be much less than * ULONG_MAX so last_index + 1 cannot overflow. */ +#include <linux/file.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/iommufd.h> @@ -346,27 +347,41 @@ static void batch_destroy(struct pfn_batch *batch, void *backup) kfree(batch->pfns); } -/* true if the pfn was added, false otherwise */ -static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) +static bool batch_add_pfn_num(struct pfn_batch *batch, unsigned long pfn, + u32 nr) { const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns)); - - if (batch->end && - pfn == batch->pfns[batch->end - 1] + batch->npfns[batch->end - 1] && - batch->npfns[batch->end - 1] != MAX_NPFNS) { - batch->npfns[batch->end - 1]++; - batch->total_pfns++; - return true; - } - if (batch->end == batch->array_size) + unsigned int end = batch->end; + + if (end && pfn == batch->pfns[end - 1] + batch->npfns[end - 1] && + nr <= MAX_NPFNS - batch->npfns[end - 1]) { + batch->npfns[end - 1] += nr; + } else if (end < batch->array_size) { + batch->pfns[end] = pfn; + batch->npfns[end] = nr; + batch->end++; + } else { return false; - batch->total_pfns++; - batch->pfns[batch->end] = pfn; - batch->npfns[batch->end] = 1; - batch->end++; + } + + batch->total_pfns += nr; return true; } +static void batch_remove_pfn_num(struct pfn_batch *batch, unsigned long nr) +{ + batch->npfns[batch->end - 1] -= nr; + if (batch->npfns[batch->end - 1] == 0) + batch->end--; + batch->total_pfns -= nr; +} + +/* true if the pfn was added, false otherwise */ +static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) +{ + return batch_add_pfn_num(batch, pfn, 1); +} + /* * Fill the batch with pfns from the domain. When the batch is full, or it * reaches last_index, the function will return. The caller should use @@ -622,6 +637,41 @@ static void batch_from_pages(struct pfn_batch *batch, struct page **pages, break; } +static int batch_from_folios(struct pfn_batch *batch, struct folio ***folios_p, + unsigned long *offset_p, unsigned long npages) +{ + int rc = 0; + struct folio **folios = *folios_p; + unsigned long offset = *offset_p; + + while (npages) { + struct folio *folio = *folios; + unsigned long nr = folio_nr_pages(folio) - offset; + unsigned long pfn = page_to_pfn(folio_page(folio, offset)); + + nr = min(nr, npages); + npages -= nr; + + if (!batch_add_pfn_num(batch, pfn, nr)) + break; + if (nr > 1) { + rc = folio_add_pins(folio, nr - 1); + if (rc) { + batch_remove_pfn_num(batch, nr); + goto out; + } + } + + folios++; + offset = 0; + } + +out: + *folios_p = folios; + *offset_p = offset; + return rc; +} + static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, unsigned int first_page_off, size_t npages) { @@ -703,19 +753,32 @@ struct pfn_reader_user { * neither */ int locked; + + /* The following are only valid if file != NULL. */ + struct file *file; + struct folio **ufolios; + size_t ufolios_len; + unsigned long ufolios_offset; + struct folio **ufolios_next; }; static void pfn_reader_user_init(struct pfn_reader_user *user, struct iopt_pages *pages) { user->upages = NULL; + user->upages_len = 0; user->upages_start = 0; user->upages_end = 0; user->locked = -1; - user->gup_flags = FOLL_LONGTERM; if (pages->writable) user->gup_flags |= FOLL_WRITE; + + user->file = (pages->type == IOPT_ADDRESS_FILE) ? pages->file : NULL; + user->ufolios = NULL; + user->ufolios_len = 0; + user->ufolios_next = NULL; + user->ufolios_offset = 0; } static void pfn_reader_user_destroy(struct pfn_reader_user *user, @@ -724,13 +787,67 @@ static void pfn_reader_user_destroy(struct pfn_reader_user *user, if (user->locked != -1) { if (user->locked) mmap_read_unlock(pages->source_mm); - if (pages->source_mm != current->mm) + if (!user->file && pages->source_mm != current->mm) mmput(pages->source_mm); user->locked = -1; } kfree(user->upages); user->upages = NULL; + kfree(user->ufolios); + user->ufolios = NULL; +} + +static long pin_memfd_pages(struct pfn_reader_user *user, unsigned long start, + unsigned long npages) +{ + unsigned long i; + unsigned long offset; + unsigned long npages_out = 0; + struct page **upages = user->upages; + unsigned long end = start + (npages << PAGE_SHIFT) - 1; + long nfolios = user->ufolios_len / sizeof(*user->ufolios); + + /* + * todo: memfd_pin_folios should return the last pinned offset so + * we can compute npages pinned, and avoid looping over folios here + * if upages == NULL. + */ + nfolios = memfd_pin_folios(user->file, start, end, user->ufolios, + nfolios, &offset); + if (nfolios <= 0) + return nfolios; + + offset >>= PAGE_SHIFT; + user->ufolios_next = user->ufolios; + user->ufolios_offset = offset; + + for (i = 0; i < nfolios; i++) { + struct folio *folio = user->ufolios[i]; + unsigned long nr = folio_nr_pages(folio); + unsigned long npin = min(nr - offset, npages); + + npages -= npin; + npages_out += npin; + + if (upages) { + if (npin == 1) { + *upages++ = folio_page(folio, offset); + } else { + int rc = folio_add_pins(folio, npin - 1); + + if (rc) + return rc; + + while (npin--) + *upages++ = folio_page(folio, offset++); + } + } + + offset = 0; + } + + return npages_out; } static int pfn_reader_user_pin(struct pfn_reader_user *user, @@ -739,7 +856,9 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, unsigned long last_index) { bool remote_mm = pages->source_mm != current->mm; - unsigned long npages; + unsigned long npages = last_index - start_index + 1; + unsigned long start; + unsigned long unum; uintptr_t uptr; long rc; @@ -747,40 +866,50 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, WARN_ON(last_index < start_index)) return -EINVAL; - if (!user->upages) { + if (!user->file && !user->upages) { /* All undone in pfn_reader_destroy() */ - user->upages_len = - (last_index - start_index + 1) * sizeof(*user->upages); + user->upages_len = npages * sizeof(*user->upages); user->upages = temp_kmalloc(&user->upages_len, NULL, 0); if (!user->upages) return -ENOMEM; } + if (user->file && !user->ufolios) { + user->ufolios_len = npages * sizeof(*user->ufolios); + user->ufolios = temp_kmalloc(&user->ufolios_len, NULL, 0); + if (!user->ufolios) + return -ENOMEM; + } + if (user->locked == -1) { /* * The majority of usages will run the map task within the mm * providing the pages, so we can optimize into * get_user_pages_fast() */ - if (remote_mm) { + if (!user->file && remote_mm) { if (!mmget_not_zero(pages->source_mm)) return -EFAULT; } user->locked = 0; } - npages = min_t(unsigned long, last_index - start_index + 1, - user->upages_len / sizeof(*user->upages)); - + unum = user->file ? user->ufolios_len / sizeof(*user->ufolios) : + user->upages_len / sizeof(*user->upages); + npages = min_t(unsigned long, npages, unum); if (iommufd_should_fail()) return -EFAULT; - uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); - if (!remote_mm) + if (user->file) { + start = pages->start + (start_index * PAGE_SIZE); + rc = pin_memfd_pages(user, start, npages); + } else if (!remote_mm) { + uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); rc = pin_user_pages_fast(uptr, npages, user->gup_flags, user->upages); - else { + } else { + uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); if (!user->locked) { mmap_read_lock(pages->source_mm); user->locked = 1; @@ -838,7 +967,8 @@ static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, mmap_read_unlock(pages->source_mm); user->locked = 0; /* If we had the lock then we also have a get */ - } else if ((!user || !user->upages) && + + } else if ((!user || (!user->upages && !user->ufolios)) && pages->source_mm != current->mm) { if (!mmget_not_zero(pages->source_mm)) return -EINVAL; @@ -855,8 +985,8 @@ static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, return rc; } -static int do_update_pinned(struct iopt_pages *pages, unsigned long npages, - bool inc, struct pfn_reader_user *user) +int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages, + bool inc, struct pfn_reader_user *user) { int rc = 0; @@ -890,8 +1020,8 @@ static void update_unpinned(struct iopt_pages *pages) return; if (pages->npinned == pages->last_npinned) return; - do_update_pinned(pages, pages->last_npinned - pages->npinned, false, - NULL); + iopt_pages_update_pinned(pages, pages->last_npinned - pages->npinned, + false, NULL); } /* @@ -921,7 +1051,7 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user, npages = pages->npinned - pages->last_npinned; inc = true; } - return do_update_pinned(pages, npages, inc, user); + return iopt_pages_update_pinned(pages, npages, inc, user); } /* @@ -978,6 +1108,8 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns) { struct interval_tree_double_span_iter *span = &pfns->span; unsigned long start_index = pfns->batch_end_index; + struct pfn_reader_user *user = &pfns->user; + unsigned long npages; struct iopt_area *area; int rc; @@ -1015,11 +1147,17 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns) return rc; } - batch_from_pages(&pfns->batch, - pfns->user.upages + - (start_index - pfns->user.upages_start), - pfns->user.upages_end - start_index); - return 0; + npages = user->upages_end - start_index; + start_index -= user->upages_start; + rc = 0; + + if (!user->file) + batch_from_pages(&pfns->batch, user->upages + start_index, + npages); + else + rc = batch_from_folios(&pfns->batch, &user->ufolios_next, + &user->ufolios_offset, npages); + return rc; } static bool pfn_reader_done(struct pfn_reader *pfns) @@ -1092,16 +1230,25 @@ static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, static void pfn_reader_release_pins(struct pfn_reader *pfns) { struct iopt_pages *pages = pfns->pages; + struct pfn_reader_user *user = &pfns->user; - if (pfns->user.upages_end > pfns->batch_end_index) { - size_t npages = pfns->user.upages_end - pfns->batch_end_index; - + if (user->upages_end > pfns->batch_end_index) { /* Any pages not transferred to the batch are just unpinned */ - unpin_user_pages(pfns->user.upages + (pfns->batch_end_index - - pfns->user.upages_start), - npages); + + unsigned long npages = user->upages_end - pfns->batch_end_index; + unsigned long start_index = pfns->batch_end_index - + user->upages_start; + + if (!user->file) { + unpin_user_pages(user->upages + start_index, npages); + } else { + long n = user->ufolios_len / sizeof(*user->ufolios); + + unpin_folios(user->ufolios_next, + user->ufolios + n - user->ufolios_next); + } iopt_pages_sub_npinned(pages, npages); - pfns->user.upages_end = pfns->batch_end_index; + user->upages_end = pfns->batch_end_index; } if (pfns->batch_start_index != pfns->batch_end_index) { pfn_reader_unpin(pfns); @@ -1139,11 +1286,11 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, return 0; } -struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, - bool writable) +static struct iopt_pages *iopt_alloc_pages(unsigned long start_byte, + unsigned long length, + bool writable) { struct iopt_pages *pages; - unsigned long end; /* * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP @@ -1152,9 +1299,6 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, if (length > SIZE_MAX - PAGE_SIZE || length == 0) return ERR_PTR(-EINVAL); - if (check_add_overflow((unsigned long)uptr, length, &end)) - return ERR_PTR(-EOVERFLOW); - pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); if (!pages) return ERR_PTR(-ENOMEM); @@ -1164,8 +1308,7 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, mutex_init(&pages->mutex); pages->source_mm = current->mm; mmgrab(pages->source_mm); - pages->uptr = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); - pages->npages = DIV_ROUND_UP(length + (uptr - pages->uptr), PAGE_SIZE); + pages->npages = DIV_ROUND_UP(length + start_byte, PAGE_SIZE); pages->access_itree = RB_ROOT_CACHED; pages->domains_itree = RB_ROOT_CACHED; pages->writable = writable; @@ -1179,6 +1322,45 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, return pages; } +struct iopt_pages *iopt_alloc_user_pages(void __user *uptr, + unsigned long length, bool writable) +{ + struct iopt_pages *pages; + unsigned long end; + void __user *uptr_down = + (void __user *) ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); + + if (check_add_overflow((unsigned long)uptr, length, &end)) + return ERR_PTR(-EOVERFLOW); + + pages = iopt_alloc_pages(uptr - uptr_down, length, writable); + if (IS_ERR(pages)) + return pages; + pages->uptr = uptr_down; + pages->type = IOPT_ADDRESS_USER; + return pages; +} + +struct iopt_pages *iopt_alloc_file_pages(struct file *file, unsigned long start, + unsigned long length, bool writable) + +{ + struct iopt_pages *pages; + unsigned long start_down = ALIGN_DOWN(start, PAGE_SIZE); + unsigned long end; + + if (length && check_add_overflow(start, length - 1, &end)) + return ERR_PTR(-EOVERFLOW); + + pages = iopt_alloc_pages(start - start_down, length, writable); + if (IS_ERR(pages)) + return pages; + pages->file = get_file(file); + pages->start = start_down; + pages->type = IOPT_ADDRESS_FILE; + return pages; +} + void iopt_release_pages(struct kref *kref) { struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); @@ -1191,6 +1373,8 @@ void iopt_release_pages(struct kref *kref) mutex_destroy(&pages->mutex); put_task_struct(pages->source_task); free_uid(pages->source_user); + if (pages->type == IOPT_ADDRESS_FILE) + fput(pages->file); kfree(pages); } @@ -1630,11 +1814,11 @@ static int iopt_pages_fill_from_domain(struct iopt_pages *pages, return 0; } -static int iopt_pages_fill_from_mm(struct iopt_pages *pages, - struct pfn_reader_user *user, - unsigned long start_index, - unsigned long last_index, - struct page **out_pages) +static int iopt_pages_fill(struct iopt_pages *pages, + struct pfn_reader_user *user, + unsigned long start_index, + unsigned long last_index, + struct page **out_pages) { unsigned long cur_index = start_index; int rc; @@ -1708,8 +1892,8 @@ int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, /* hole */ cur_pages = out_pages + (span.start_hole - start_index); - rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole, - span.last_hole, cur_pages); + rc = iopt_pages_fill(pages, &user, span.start_hole, + span.last_hole, cur_pages); if (rc) goto out_clean_xa; rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, @@ -1789,6 +1973,10 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, struct page *page = NULL; int rc; + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(pages->type != IOPT_ADDRESS_USER)) + return -EINVAL; + if (!mmget_not_zero(pages->source_mm)) return iopt_pages_rw_slow(pages, index, index, offset, data, length, flags); @@ -1844,6 +2032,15 @@ int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) return -EPERM; + if (pages->type == IOPT_ADDRESS_FILE) + return iopt_pages_rw_slow(pages, start_index, last_index, + start_byte % PAGE_SIZE, data, length, + flags); + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(pages->type != IOPT_ADDRESS_USER)) + return -EINVAL; + if (!(flags & IOMMUFD_ACCESS_RW_KTHREAD) && change_mm) { if (start_index == last_index) return iopt_pages_rw_page(pages, start_index, diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 540437be168a..a0de6d6d4e68 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -126,12 +126,35 @@ struct mock_iommu_domain { struct xarray pfns; }; +static inline struct mock_iommu_domain * +to_mock_domain(struct iommu_domain *domain) +{ + return container_of(domain, struct mock_iommu_domain, domain); +} + struct mock_iommu_domain_nested { struct iommu_domain domain; + struct mock_viommu *mock_viommu; struct mock_iommu_domain *parent; u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM]; }; +static inline struct mock_iommu_domain_nested * +to_mock_nested(struct iommu_domain *domain) +{ + return container_of(domain, struct mock_iommu_domain_nested, domain); +} + +struct mock_viommu { + struct iommufd_viommu core; + struct mock_iommu_domain *s2_parent; +}; + +static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu) +{ + return container_of(viommu, struct mock_viommu, core); +} + enum selftest_obj_type { TYPE_IDEV, }; @@ -140,8 +163,14 @@ struct mock_dev { struct device dev; unsigned long flags; int id; + u32 cache[MOCK_DEV_CACHE_NUM]; }; +static inline struct mock_dev *to_mock_dev(struct device *dev) +{ + return container_of(dev, struct mock_dev, dev); +} + struct selftest_obj { struct iommufd_object obj; enum selftest_obj_type type; @@ -155,10 +184,15 @@ struct selftest_obj { }; }; +static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj) +{ + return container_of(obj, struct selftest_obj, obj); +} + static int mock_domain_nop_attach(struct iommu_domain *domain, struct device *dev) { - struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + struct mock_dev *mdev = to_mock_dev(dev); if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) return -EINVAL; @@ -193,8 +227,7 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, bool enable) { - struct mock_iommu_domain *mock = - container_of(domain, struct mock_iommu_domain, domain); + struct mock_iommu_domain *mock = to_mock_domain(domain); unsigned long flags = mock->flags; if (enable && !domain->dirty_ops) @@ -243,8 +276,7 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, unsigned long flags, struct iommu_dirty_bitmap *dirty) { - struct mock_iommu_domain *mock = - container_of(domain, struct mock_iommu_domain, domain); + struct mock_iommu_domain *mock = to_mock_domain(domain); unsigned long end = iova + size; void *ent; @@ -281,7 +313,7 @@ static const struct iommu_dirty_ops dirty_ops = { static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) { - struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + struct mock_dev *mdev = to_mock_dev(dev); struct mock_iommu_domain *mock; mock = kzalloc(sizeof(*mock), GFP_KERNEL); @@ -298,76 +330,81 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) return &mock->domain; } -static struct iommu_domain * -__mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent, - const struct iommu_hwpt_selftest *user_cfg) +static struct mock_iommu_domain_nested * +__mock_domain_alloc_nested(const struct iommu_user_data *user_data) { struct mock_iommu_domain_nested *mock_nested; - int i; + struct iommu_hwpt_selftest user_cfg; + int rc, i; + + if (user_data->type != IOMMU_HWPT_DATA_SELFTEST) + return ERR_PTR(-EOPNOTSUPP); + + rc = iommu_copy_struct_from_user(&user_cfg, user_data, + IOMMU_HWPT_DATA_SELFTEST, iotlb); + if (rc) + return ERR_PTR(rc); mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL); if (!mock_nested) return ERR_PTR(-ENOMEM); - mock_nested->parent = mock_parent; mock_nested->domain.ops = &domain_nested_ops; mock_nested->domain.type = IOMMU_DOMAIN_NESTED; for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) - mock_nested->iotlb[i] = user_cfg->iotlb; - return &mock_nested->domain; + mock_nested->iotlb[i] = user_cfg.iotlb; + return mock_nested; } static struct iommu_domain * -mock_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, + u32 flags, const struct iommu_user_data *user_data) { + struct mock_iommu_domain_nested *mock_nested; struct mock_iommu_domain *mock_parent; - struct iommu_hwpt_selftest user_cfg; - int rc; - /* must be mock_domain */ - if (!parent) { - struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); - bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; - bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY; - struct iommu_domain *domain; - - if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT | - IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) - return ERR_PTR(-EOPNOTSUPP); - if (user_data || (has_dirty_flag && no_dirty_ops)) - return ERR_PTR(-EOPNOTSUPP); - domain = mock_domain_alloc_paging(dev); - if (!domain) - return ERR_PTR(-ENOMEM); - if (has_dirty_flag) - container_of(domain, struct mock_iommu_domain, domain) - ->domain.dirty_ops = &dirty_ops; - return domain; - } - - /* must be mock_domain_nested */ - if (user_data->type != IOMMU_HWPT_DATA_SELFTEST || flags) + if (flags) return ERR_PTR(-EOPNOTSUPP); if (!parent || parent->ops != mock_ops.default_domain_ops) return ERR_PTR(-EINVAL); - mock_parent = container_of(parent, struct mock_iommu_domain, domain); + mock_parent = to_mock_domain(parent); if (!mock_parent) return ERR_PTR(-EINVAL); - rc = iommu_copy_struct_from_user(&user_cfg, user_data, - IOMMU_HWPT_DATA_SELFTEST, iotlb); - if (rc) - return ERR_PTR(rc); + mock_nested = __mock_domain_alloc_nested(user_data); + if (IS_ERR(mock_nested)) + return ERR_CAST(mock_nested); + mock_nested->parent = mock_parent; + return &mock_nested->domain; +} - return __mock_domain_alloc_nested(mock_parent, &user_cfg); +static struct iommu_domain * +mock_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) +{ + bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_NEST_PARENT; + bool no_dirty_ops = to_mock_dev(dev)->flags & + MOCK_FLAGS_DEVICE_NO_DIRTY; + struct iommu_domain *domain; + + if (user_data) + return ERR_PTR(-EOPNOTSUPP); + if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops)) + return ERR_PTR(-EOPNOTSUPP); + + domain = mock_domain_alloc_paging(dev); + if (!domain) + return ERR_PTR(-ENOMEM); + if (has_dirty_flag) + domain->dirty_ops = &dirty_ops; + return domain; } static void mock_domain_free(struct iommu_domain *domain) { - struct mock_iommu_domain *mock = - container_of(domain, struct mock_iommu_domain, domain); + struct mock_iommu_domain *mock = to_mock_domain(domain); WARN_ON(!xa_empty(&mock->pfns)); kfree(mock); @@ -378,8 +415,7 @@ static int mock_domain_map_pages(struct iommu_domain *domain, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) { - struct mock_iommu_domain *mock = - container_of(domain, struct mock_iommu_domain, domain); + struct mock_iommu_domain *mock = to_mock_domain(domain); unsigned long flags = MOCK_PFN_START_IOVA; unsigned long start_iova = iova; @@ -430,8 +466,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, size_t pgcount, struct iommu_iotlb_gather *iotlb_gather) { - struct mock_iommu_domain *mock = - container_of(domain, struct mock_iommu_domain, domain); + struct mock_iommu_domain *mock = to_mock_domain(domain); bool first = true; size_t ret = 0; void *ent; @@ -479,8 +514,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - struct mock_iommu_domain *mock = - container_of(domain, struct mock_iommu_domain, domain); + struct mock_iommu_domain *mock = to_mock_domain(domain); void *ent; WARN_ON(iova % MOCK_IO_PAGE_SIZE); @@ -491,7 +525,7 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) { - struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + struct mock_dev *mdev = to_mock_dev(dev); switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: @@ -507,14 +541,17 @@ static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) static struct iopf_queue *mock_iommu_iopf_queue; -static struct iommu_device mock_iommu_device = { -}; +static struct mock_iommu_device { + struct iommu_device iommu_dev; + struct completion complete; + refcount_t users; +} mock_iommu; static struct iommu_device *mock_probe_device(struct device *dev) { if (dev->bus != &iommufd_mock_bus_type.bus) return ERR_PTR(-ENODEV); - return &mock_iommu_device; + return &mock_iommu.iommu_dev; } static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt, @@ -540,6 +577,132 @@ static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features fea return 0; } +static void mock_viommu_destroy(struct iommufd_viommu *viommu) +{ + struct mock_iommu_device *mock_iommu = container_of( + viommu->iommu_dev, struct mock_iommu_device, iommu_dev); + + if (refcount_dec_and_test(&mock_iommu->users)) + complete(&mock_iommu->complete); + + /* iommufd core frees mock_viommu and viommu */ +} + +static struct iommu_domain * +mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data) +{ + struct mock_viommu *mock_viommu = to_mock_viommu(viommu); + struct mock_iommu_domain_nested *mock_nested; + + if (flags & ~IOMMU_HWPT_FAULT_ID_VALID) + return ERR_PTR(-EOPNOTSUPP); + + mock_nested = __mock_domain_alloc_nested(user_data); + if (IS_ERR(mock_nested)) + return ERR_CAST(mock_nested); + mock_nested->mock_viommu = mock_viommu; + mock_nested->parent = mock_viommu->s2_parent; + return &mock_nested->domain; +} + +static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu, + struct iommu_user_data_array *array) +{ + struct iommu_viommu_invalidate_selftest *cmds; + struct iommu_viommu_invalidate_selftest *cur; + struct iommu_viommu_invalidate_selftest *end; + int rc; + + /* A zero-length array is allowed to validate the array type */ + if (array->entry_num == 0 && + array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) { + array->entry_num = 0; + return 0; + } + + cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + cur = cmds; + end = cmds + array->entry_num; + + static_assert(sizeof(*cmds) == 3 * sizeof(u32)); + rc = iommu_copy_struct_from_full_user_array( + cmds, sizeof(*cmds), array, + IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST); + if (rc) + goto out; + + while (cur != end) { + struct mock_dev *mdev; + struct device *dev; + int i; + + if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) { + rc = -EOPNOTSUPP; + goto out; + } + + if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) { + rc = -EINVAL; + goto out; + } + + xa_lock(&viommu->vdevs); + dev = iommufd_viommu_find_dev(viommu, + (unsigned long)cur->vdev_id); + if (!dev) { + xa_unlock(&viommu->vdevs); + rc = -EINVAL; + goto out; + } + mdev = container_of(dev, struct mock_dev, dev); + + if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) { + /* Invalidate all cache entries and ignore cache_id */ + for (i = 0; i < MOCK_DEV_CACHE_NUM; i++) + mdev->cache[i] = 0; + } else { + mdev->cache[cur->cache_id] = 0; + } + xa_unlock(&viommu->vdevs); + + cur++; + } +out: + array->entry_num = cur - cmds; + kfree(cmds); + return rc; +} + +static struct iommufd_viommu_ops mock_viommu_ops = { + .destroy = mock_viommu_destroy, + .alloc_domain_nested = mock_viommu_alloc_domain_nested, + .cache_invalidate = mock_viommu_cache_invalidate, +}; + +static struct iommufd_viommu *mock_viommu_alloc(struct device *dev, + struct iommu_domain *domain, + struct iommufd_ctx *ictx, + unsigned int viommu_type) +{ + struct mock_iommu_device *mock_iommu = + iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev); + struct mock_viommu *mock_viommu; + + if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST) + return ERR_PTR(-EOPNOTSUPP); + + mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core, + &mock_viommu_ops); + if (IS_ERR(mock_viommu)) + return ERR_CAST(mock_viommu); + + refcount_inc(&mock_iommu->users); + return &mock_viommu->core; +} + static const struct iommu_ops mock_ops = { /* * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type() @@ -551,7 +714,8 @@ static const struct iommu_ops mock_ops = { .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, .domain_alloc_paging = mock_domain_alloc_paging, - .domain_alloc_user = mock_domain_alloc_user, + .domain_alloc_paging_flags = mock_domain_alloc_paging_flags, + .domain_alloc_nested = mock_domain_alloc_nested, .capable = mock_domain_capable, .device_group = generic_device_group, .probe_device = mock_probe_device, @@ -559,6 +723,7 @@ static const struct iommu_ops mock_ops = { .dev_enable_feat = mock_dev_enable_feat, .dev_disable_feat = mock_dev_disable_feat, .user_pasid_table = true, + .viommu_alloc = mock_viommu_alloc, .default_domain_ops = &(struct iommu_domain_ops){ .free = mock_domain_free, @@ -571,18 +736,14 @@ static const struct iommu_ops mock_ops = { static void mock_domain_free_nested(struct iommu_domain *domain) { - struct mock_iommu_domain_nested *mock_nested = - container_of(domain, struct mock_iommu_domain_nested, domain); - - kfree(mock_nested); + kfree(to_mock_nested(domain)); } static int mock_domain_cache_invalidate_user(struct iommu_domain *domain, struct iommu_user_data_array *array) { - struct mock_iommu_domain_nested *mock_nested = - container_of(domain, struct mock_iommu_domain_nested, domain); + struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain); struct iommu_hwpt_invalidate_selftest inv; u32 processed = 0; int i = 0, j; @@ -657,7 +818,7 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, iommufd_put_object(ucmd->ictx, &hwpt->obj); return ERR_PTR(-EINVAL); } - *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain); + *mock = to_mock_domain(hwpt->domain); return hwpt; } @@ -675,14 +836,13 @@ get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id, iommufd_put_object(ucmd->ictx, &hwpt->obj); return ERR_PTR(-EINVAL); } - *mock_nested = container_of(hwpt->domain, - struct mock_iommu_domain_nested, domain); + *mock_nested = to_mock_nested(hwpt->domain); return hwpt; } static void mock_dev_release(struct device *dev) { - struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + struct mock_dev *mdev = to_mock_dev(dev); ida_free(&mock_dev_ida, mdev->id); kfree(mdev); @@ -691,7 +851,7 @@ static void mock_dev_release(struct device *dev) static struct mock_dev *mock_dev_create(unsigned long dev_flags) { struct mock_dev *mdev; - int rc; + int rc, i; if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA)) @@ -705,6 +865,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags) mdev->flags = dev_flags; mdev->dev.release = mock_dev_release; mdev->dev.bus = &iommufd_mock_bus_type.bus; + for (i = 0; i < MOCK_DEV_CACHE_NUM; i++) + mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT; rc = ida_alloc(&mock_dev_ida, GFP_KERNEL); if (rc < 0) @@ -813,7 +975,7 @@ static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd, if (IS_ERR(dev_obj)) return PTR_ERR(dev_obj); - sobj = container_of(dev_obj, struct selftest_obj, obj); + sobj = to_selftest_obj(dev_obj); if (sobj->type != TYPE_IDEV) { rc = -EINVAL; goto out_dev_obj; @@ -951,8 +1113,7 @@ static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, if (IS_ERR(hwpt)) return PTR_ERR(hwpt); - mock_nested = container_of(hwpt->domain, - struct mock_iommu_domain_nested, domain); + mock_nested = to_mock_nested(hwpt->domain); if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX || mock_nested->iotlb[iotlb_id] != iotlb) @@ -961,6 +1122,24 @@ static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, return rc; } +static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id, + unsigned int cache_id, u32 cache) +{ + struct iommufd_device *idev; + struct mock_dev *mdev; + int rc = 0; + + idev = iommufd_get_device(ucmd, idev_id); + if (IS_ERR(idev)) + return PTR_ERR(idev); + mdev = container_of(idev->dev, struct mock_dev, dev); + + if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache) + rc = -EINVAL; + iommufd_put_object(ucmd->ictx, &idev->obj); + return rc; +} + struct selftest_access { struct iommufd_access *access; struct file *file; @@ -1431,7 +1610,7 @@ static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd, void iommufd_selftest_destroy(struct iommufd_object *obj) { - struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); + struct selftest_obj *sobj = to_selftest_obj(obj); switch (sobj->type) { case TYPE_IDEV: @@ -1470,6 +1649,10 @@ int iommufd_test(struct iommufd_ucmd *ucmd) return iommufd_test_md_check_iotlb(ucmd, cmd->id, cmd->check_iotlb.id, cmd->check_iotlb.iotlb); + case IOMMU_TEST_OP_DEV_CHECK_CACHE: + return iommufd_test_dev_check_cache(ucmd, cmd->id, + cmd->check_dev_cache.id, + cmd->check_dev_cache.cache); case IOMMU_TEST_OP_CREATE_ACCESS: return iommufd_test_create_access(ucmd, cmd->id, cmd->create_access.flags); @@ -1536,24 +1719,27 @@ int __init iommufd_test_init(void) if (rc) goto err_platform; - rc = iommu_device_sysfs_add(&mock_iommu_device, + rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev, &selftest_iommu_dev->dev, NULL, "%s", dev_name(&selftest_iommu_dev->dev)); if (rc) goto err_bus; - rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops, + rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops, &iommufd_mock_bus_type.bus, &iommufd_mock_bus_type.nb); if (rc) goto err_sysfs; + refcount_set(&mock_iommu.users, 1); + init_completion(&mock_iommu.complete); + mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq"); return 0; err_sysfs: - iommu_device_sysfs_remove(&mock_iommu_device); + iommu_device_sysfs_remove(&mock_iommu.iommu_dev); err_bus: bus_unregister(&iommufd_mock_bus_type.bus); err_platform: @@ -1563,6 +1749,22 @@ err_dbgfs: return rc; } +static void iommufd_test_wait_for_users(void) +{ + if (refcount_dec_and_test(&mock_iommu.users)) + return; + /* + * Time out waiting for iommu device user count to become 0. + * + * Note that this is just making an example here, since the selftest is + * built into the iommufd module, i.e. it only unplugs the iommu device + * when unloading the module. So, it is expected that this WARN_ON will + * not trigger, as long as any iommufd FDs are open. + */ + WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete, + msecs_to_jiffies(10000))); +} + void iommufd_test_exit(void) { if (mock_iommu_iopf_queue) { @@ -1570,8 +1772,9 @@ void iommufd_test_exit(void) mock_iommu_iopf_queue = NULL; } - iommu_device_sysfs_remove(&mock_iommu_device); - iommu_device_unregister_bus(&mock_iommu_device, + iommufd_test_wait_for_users(); + iommu_device_sysfs_remove(&mock_iommu.iommu_dev); + iommu_device_unregister_bus(&mock_iommu.iommu_dev, &iommufd_mock_bus_type.bus, &iommufd_mock_bus_type.nb); bus_unregister(&iommufd_mock_bus_type.bus); diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c index a3ad5f0b6c59..514aacd64009 100644 --- a/drivers/iommu/iommufd/vfio_compat.c +++ b/drivers/iommu/iommufd/vfio_compat.c @@ -291,12 +291,7 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx, case VFIO_DMA_CC_IOMMU: return iommufd_vfio_cc_iommu(ictx); - /* - * This is obsolete, and to be removed from VFIO. It was an incomplete - * idea that got merged. - * https://lore.kernel.org/kvm/0-v1-0093c9b0e345+19-vfio_no_nesting_jgg@nvidia.com/ - */ - case VFIO_TYPE1_NESTING_IOMMU: + case __VFIO_RESERVED_TYPE1_NESTING_IOMMU: return 0; /* diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c new file mode 100644 index 000000000000..69b88e8c7c26 --- /dev/null +++ b/drivers/iommu/iommufd/viommu.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES + */ +#include "iommufd_private.h" + +void iommufd_viommu_destroy(struct iommufd_object *obj) +{ + struct iommufd_viommu *viommu = + container_of(obj, struct iommufd_viommu, obj); + + if (viommu->ops && viommu->ops->destroy) + viommu->ops->destroy(viommu); + refcount_dec(&viommu->hwpt->common.obj.users); + xa_destroy(&viommu->vdevs); +} + +int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd) +{ + struct iommu_viommu_alloc *cmd = ucmd->cmd; + struct iommufd_hwpt_paging *hwpt_paging; + struct iommufd_viommu *viommu; + struct iommufd_device *idev; + const struct iommu_ops *ops; + int rc; + + if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT) + return -EOPNOTSUPP; + + idev = iommufd_get_device(ucmd, cmd->dev_id); + if (IS_ERR(idev)) + return PTR_ERR(idev); + + ops = dev_iommu_ops(idev->dev); + if (!ops->viommu_alloc) { + rc = -EOPNOTSUPP; + goto out_put_idev; + } + + hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt_paging)) { + rc = PTR_ERR(hwpt_paging); + goto out_put_idev; + } + + if (!hwpt_paging->nest_parent) { + rc = -EINVAL; + goto out_put_hwpt; + } + + viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain, + ucmd->ictx, cmd->type); + if (IS_ERR(viommu)) { + rc = PTR_ERR(viommu); + goto out_put_hwpt; + } + + xa_init(&viommu->vdevs); + viommu->type = cmd->type; + viommu->ictx = ucmd->ictx; + viommu->hwpt = hwpt_paging; + refcount_inc(&viommu->hwpt->common.obj.users); + /* + * It is the most likely case that a physical IOMMU is unpluggable. A + * pluggable IOMMU instance (if exists) is responsible for refcounting + * on its own. + */ + viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev); + + cmd->out_viommu_id = viommu->obj.id; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_abort; + iommufd_object_finalize(ucmd->ictx, &viommu->obj); + goto out_put_hwpt; + +out_abort: + iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj); +out_put_hwpt: + iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); +out_put_idev: + iommufd_put_object(ucmd->ictx, &idev->obj); + return rc; +} + +void iommufd_vdevice_destroy(struct iommufd_object *obj) +{ + struct iommufd_vdevice *vdev = + container_of(obj, struct iommufd_vdevice, obj); + struct iommufd_viommu *viommu = vdev->viommu; + + /* xa_cmpxchg is okay to fail if alloc failed xa_cmpxchg previously */ + xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL); + refcount_dec(&viommu->obj.users); + put_device(vdev->dev); +} + +int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd) +{ + struct iommu_vdevice_alloc *cmd = ucmd->cmd; + struct iommufd_vdevice *vdev, *curr; + struct iommufd_viommu *viommu; + struct iommufd_device *idev; + u64 virt_id = cmd->virt_id; + int rc = 0; + + /* virt_id indexes an xarray */ + if (virt_id > ULONG_MAX) + return -EINVAL; + + viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); + if (IS_ERR(viommu)) + return PTR_ERR(viommu); + + idev = iommufd_get_device(ucmd, cmd->dev_id); + if (IS_ERR(idev)) { + rc = PTR_ERR(idev); + goto out_put_viommu; + } + + if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) { + rc = -EINVAL; + goto out_put_idev; + } + + vdev = iommufd_object_alloc(ucmd->ictx, vdev, IOMMUFD_OBJ_VDEVICE); + if (IS_ERR(vdev)) { + rc = PTR_ERR(vdev); + goto out_put_idev; + } + + vdev->id = virt_id; + vdev->dev = idev->dev; + get_device(idev->dev); + vdev->viommu = viommu; + refcount_inc(&viommu->obj.users); + + curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL); + if (curr) { + rc = xa_err(curr) ?: -EEXIST; + goto out_abort; + } + + cmd->out_vdevice_id = vdev->obj.id; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_abort; + iommufd_object_finalize(ucmd->ictx, &vdev->obj); + goto out_put_idev; + +out_abort: + iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj); +out_put_idev: + iommufd_put_object(ucmd->ictx, &idev->obj); +out_put_viommu: + iommufd_put_object(ucmd->ictx, &viommu->obj); + return rc; +} diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 16c6adff3eb7..18f839721813 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -6,6 +6,7 @@ */ #include <linux/iova.h> +#include <linux/kmemleak.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/smp.h> @@ -506,7 +507,7 @@ __adjust_overlap_range(struct iova *iova, * reserve_iova - reserves an iova in the given range * @iovad: - iova domain pointer * @pfn_lo: - lower page frame address - * @pfn_hi:- higher pfn adderss + * @pfn_hi:- higher pfn address * This function allocates reserves the address range from pfn_lo to pfn_hi so * that this address is not dished out as part of alloc_iova. */ @@ -673,6 +674,11 @@ static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache) { struct iova_magazine *mag = rcache->depot; + /* + * As the mag->next pointer is moved to rcache->depot and reset via + * the mag->size assignment, mark it as a transient false positive. + */ + kmemleak_transient_leak(mag->next); rcache->depot = mag->next; mag->size = IOVA_MAG_SIZE; rcache->depot_size--; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 6a2707fe7a78..c45313c43b9e 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -1599,7 +1599,7 @@ static const unsigned int mt8186_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK static const struct mtk_iommu_plat_data mt8186_data_mm = { .m4u_plat = M4U_MT8186, .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | - WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, + WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN, .larbid_remap = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20}, {MTK_INVALID_LARBID, 14, 16}, {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}}, diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index c9528065a59a..3f72aef8bd5b 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1230,25 +1230,24 @@ static int omap_iommu_probe(struct platform_device *pdev) if (err) return err; - err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); - if (err) - goto out_sysfs; obj->has_iommu_driver = true; } + err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); + if (err) + goto out_sysfs; + pm_runtime_enable(obj->dev); omap_iommu_debugfs_add(obj); dev_info(&pdev->dev, "%s registered\n", obj->name); - /* Re-probe bus to probe device attached to this IOMMU */ - bus_iommu_probe(&platform_bus_type); - return 0; out_sysfs: - iommu_device_sysfs_remove(&obj->iommu); + if (obj->has_iommu_driver) + iommu_device_sysfs_remove(&obj->iommu); return err; } @@ -1256,10 +1255,10 @@ static void omap_iommu_remove(struct platform_device *pdev) { struct omap_iommu *obj = platform_get_drvdata(pdev); - if (obj->has_iommu_driver) { + if (obj->has_iommu_driver) iommu_device_sysfs_remove(&obj->iommu); - iommu_device_unregister(&obj->iommu); - } + + iommu_device_unregister(&obj->iommu); omap_iommu_debugfs_remove(obj); @@ -1723,12 +1722,19 @@ static void omap_iommu_release_device(struct device *dev) } +static int omap_iommu_of_xlate(struct device *dev, const struct of_phandle_args *args) +{ + /* TODO: collect args->np to save re-parsing in probe above */ + return 0; +} + static const struct iommu_ops omap_iommu_ops = { .identity_domain = &omap_iommu_identity_domain, .domain_alloc_paging = omap_iommu_domain_alloc_paging, .probe_device = omap_iommu_probe_device, .release_device = omap_iommu_release_device, .device_group = generic_single_device_group, + .of_xlate = omap_iommu_of_xlate, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = omap_iommu_attach_dev, diff --git a/drivers/iommu/riscv/Kconfig b/drivers/iommu/riscv/Kconfig new file mode 100644 index 000000000000..c071816f59a6 --- /dev/null +++ b/drivers/iommu/riscv/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# RISC-V IOMMU support + +config RISCV_IOMMU + bool "RISC-V IOMMU Support" + depends on RISCV && 64BIT + default y + select IOMMU_API + help + Support for implementations of the RISC-V IOMMU architecture that + complements the RISC-V MMU capabilities, providing similar address + translation and protection functions for accesses from I/O devices. + + Say Y here if your SoC includes an IOMMU device implementing + the RISC-V IOMMU architecture. + +config RISCV_IOMMU_PCI + def_bool y if RISCV_IOMMU && PCI_MSI + help + Support for the PCIe implementation of RISC-V IOMMU architecture. diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile new file mode 100644 index 000000000000..f54c9ed17d41 --- /dev/null +++ b/drivers/iommu/riscv/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o +obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o diff --git a/drivers/iommu/riscv/iommu-bits.h b/drivers/iommu/riscv/iommu-bits.h new file mode 100644 index 000000000000..98daf0e1a306 --- /dev/null +++ b/drivers/iommu/riscv/iommu-bits.h @@ -0,0 +1,784 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2022-2024 Rivos Inc. + * Copyright © 2023 FORTH-ICS/CARV + * Copyright © 2023 RISC-V IOMMU Task Group + * + * RISC-V IOMMU - Register Layout and Data Structures. + * + * Based on the 'RISC-V IOMMU Architecture Specification', Version 1.0 + * Published at https://github.com/riscv-non-isa/riscv-iommu + * + */ + +#ifndef _RISCV_IOMMU_BITS_H_ +#define _RISCV_IOMMU_BITS_H_ + +#include <linux/types.h> +#include <linux/bitfield.h> +#include <linux/bits.h> + +/* + * Chapter 5: Memory Mapped register interface + */ + +/* Common field positions */ +#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10) +#define RISCV_IOMMU_QUEUE_LOG2SZ_FIELD GENMASK_ULL(4, 0) +#define RISCV_IOMMU_QUEUE_INDEX_FIELD GENMASK_ULL(31, 0) +#define RISCV_IOMMU_QUEUE_ENABLE BIT(0) +#define RISCV_IOMMU_QUEUE_INTR_ENABLE BIT(1) +#define RISCV_IOMMU_QUEUE_MEM_FAULT BIT(8) +#define RISCV_IOMMU_QUEUE_OVERFLOW BIT(9) +#define RISCV_IOMMU_QUEUE_ACTIVE BIT(16) +#define RISCV_IOMMU_QUEUE_BUSY BIT(17) + +#define RISCV_IOMMU_ATP_PPN_FIELD GENMASK_ULL(43, 0) +#define RISCV_IOMMU_ATP_MODE_FIELD GENMASK_ULL(63, 60) + +/* 5.3 IOMMU Capabilities (64bits) */ +#define RISCV_IOMMU_REG_CAPABILITIES 0x0000 +#define RISCV_IOMMU_CAPABILITIES_VERSION GENMASK_ULL(7, 0) +#define RISCV_IOMMU_CAPABILITIES_SV32 BIT_ULL(8) +#define RISCV_IOMMU_CAPABILITIES_SV39 BIT_ULL(9) +#define RISCV_IOMMU_CAPABILITIES_SV48 BIT_ULL(10) +#define RISCV_IOMMU_CAPABILITIES_SV57 BIT_ULL(11) +#define RISCV_IOMMU_CAPABILITIES_SVPBMT BIT_ULL(15) +#define RISCV_IOMMU_CAPABILITIES_SV32X4 BIT_ULL(16) +#define RISCV_IOMMU_CAPABILITIES_SV39X4 BIT_ULL(17) +#define RISCV_IOMMU_CAPABILITIES_SV48X4 BIT_ULL(18) +#define RISCV_IOMMU_CAPABILITIES_SV57X4 BIT_ULL(19) +#define RISCV_IOMMU_CAPABILITIES_AMO_MRIF BIT_ULL(21) +#define RISCV_IOMMU_CAPABILITIES_MSI_FLAT BIT_ULL(22) +#define RISCV_IOMMU_CAPABILITIES_MSI_MRIF BIT_ULL(23) +#define RISCV_IOMMU_CAPABILITIES_AMO_HWAD BIT_ULL(24) +#define RISCV_IOMMU_CAPABILITIES_ATS BIT_ULL(25) +#define RISCV_IOMMU_CAPABILITIES_T2GPA BIT_ULL(26) +#define RISCV_IOMMU_CAPABILITIES_END BIT_ULL(27) +#define RISCV_IOMMU_CAPABILITIES_IGS GENMASK_ULL(29, 28) +#define RISCV_IOMMU_CAPABILITIES_HPM BIT_ULL(30) +#define RISCV_IOMMU_CAPABILITIES_DBG BIT_ULL(31) +#define RISCV_IOMMU_CAPABILITIES_PAS GENMASK_ULL(37, 32) +#define RISCV_IOMMU_CAPABILITIES_PD8 BIT_ULL(38) +#define RISCV_IOMMU_CAPABILITIES_PD17 BIT_ULL(39) +#define RISCV_IOMMU_CAPABILITIES_PD20 BIT_ULL(40) + +/** + * enum riscv_iommu_igs_settings - Interrupt Generation Support Settings + * @RISCV_IOMMU_CAPABILITIES_IGS_MSI: IOMMU supports only MSI generation + * @RISCV_IOMMU_CAPABILITIES_IGS_WSI: IOMMU supports only Wired-Signaled interrupt + * @RISCV_IOMMU_CAPABILITIES_IGS_BOTH: IOMMU supports both MSI and WSI generation + * @RISCV_IOMMU_CAPABILITIES_IGS_RSRV: Reserved for standard use + */ +enum riscv_iommu_igs_settings { + RISCV_IOMMU_CAPABILITIES_IGS_MSI = 0, + RISCV_IOMMU_CAPABILITIES_IGS_WSI = 1, + RISCV_IOMMU_CAPABILITIES_IGS_BOTH = 2, + RISCV_IOMMU_CAPABILITIES_IGS_RSRV = 3 +}; + +/* 5.4 Features control register (32bits) */ +#define RISCV_IOMMU_REG_FCTL 0x0008 +#define RISCV_IOMMU_FCTL_BE BIT(0) +#define RISCV_IOMMU_FCTL_WSI BIT(1) +#define RISCV_IOMMU_FCTL_GXL BIT(2) + +/* 5.5 Device-directory-table pointer (64bits) */ +#define RISCV_IOMMU_REG_DDTP 0x0010 +#define RISCV_IOMMU_DDTP_IOMMU_MODE GENMASK_ULL(3, 0) +#define RISCV_IOMMU_DDTP_BUSY BIT_ULL(4) +#define RISCV_IOMMU_DDTP_PPN RISCV_IOMMU_PPN_FIELD + +/** + * enum riscv_iommu_ddtp_modes - IOMMU translation modes + * @RISCV_IOMMU_DDTP_IOMMU_MODE_OFF: No inbound transactions allowed + * @RISCV_IOMMU_DDTP_IOMMU_MODE_BARE: Pass-through mode + * @RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL: One-level DDT + * @RISCV_IOMMU_DDTP_IOMMU_MODE_2LVL: Two-level DDT + * @RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL: Three-level DDT + * @RISCV_IOMMU_DDTP_IOMMU_MODE_MAX: Max value allowed by specification + */ +enum riscv_iommu_ddtp_modes { + RISCV_IOMMU_DDTP_IOMMU_MODE_OFF = 0, + RISCV_IOMMU_DDTP_IOMMU_MODE_BARE = 1, + RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL = 2, + RISCV_IOMMU_DDTP_IOMMU_MODE_2LVL = 3, + RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL = 4, + RISCV_IOMMU_DDTP_IOMMU_MODE_MAX = 4 +}; + +/* 5.6 Command Queue Base (64bits) */ +#define RISCV_IOMMU_REG_CQB 0x0018 +#define RISCV_IOMMU_CQB_ENTRIES RISCV_IOMMU_QUEUE_LOG2SZ_FIELD +#define RISCV_IOMMU_CQB_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.7 Command Queue head (32bits) */ +#define RISCV_IOMMU_REG_CQH 0x0020 +#define RISCV_IOMMU_CQH_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD + +/* 5.8 Command Queue tail (32bits) */ +#define RISCV_IOMMU_REG_CQT 0x0024 +#define RISCV_IOMMU_CQT_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD + +/* 5.9 Fault Queue Base (64bits) */ +#define RISCV_IOMMU_REG_FQB 0x0028 +#define RISCV_IOMMU_FQB_ENTRIES RISCV_IOMMU_QUEUE_LOG2SZ_FIELD +#define RISCV_IOMMU_FQB_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.10 Fault Queue Head (32bits) */ +#define RISCV_IOMMU_REG_FQH 0x0030 +#define RISCV_IOMMU_FQH_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD + +/* 5.11 Fault Queue tail (32bits) */ +#define RISCV_IOMMU_REG_FQT 0x0034 +#define RISCV_IOMMU_FQT_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD + +/* 5.12 Page Request Queue base (64bits) */ +#define RISCV_IOMMU_REG_PQB 0x0038 +#define RISCV_IOMMU_PQB_ENTRIES RISCV_IOMMU_QUEUE_LOG2SZ_FIELD +#define RISCV_IOMMU_PQB_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.13 Page Request Queue head (32bits) */ +#define RISCV_IOMMU_REG_PQH 0x0040 +#define RISCV_IOMMU_PQH_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD + +/* 5.14 Page Request Queue tail (32bits) */ +#define RISCV_IOMMU_REG_PQT 0x0044 +#define RISCV_IOMMU_PQT_INDEX_MASK RISCV_IOMMU_QUEUE_INDEX_FIELD + +/* 5.15 Command Queue CSR (32bits) */ +#define RISCV_IOMMU_REG_CQCSR 0x0048 +#define RISCV_IOMMU_CQCSR_CQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_CQCSR_CIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_CQCSR_CQMF RISCV_IOMMU_QUEUE_MEM_FAULT +#define RISCV_IOMMU_CQCSR_CMD_TO BIT(9) +#define RISCV_IOMMU_CQCSR_CMD_ILL BIT(10) +#define RISCV_IOMMU_CQCSR_FENCE_W_IP BIT(11) +#define RISCV_IOMMU_CQCSR_CQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_CQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +/* 5.16 Fault Queue CSR (32bits) */ +#define RISCV_IOMMU_REG_FQCSR 0x004C +#define RISCV_IOMMU_FQCSR_FQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_FQCSR_FIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_FQCSR_FQMF RISCV_IOMMU_QUEUE_MEM_FAULT +#define RISCV_IOMMU_FQCSR_FQOF RISCV_IOMMU_QUEUE_OVERFLOW +#define RISCV_IOMMU_FQCSR_FQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_FQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +/* 5.17 Page Request Queue CSR (32bits) */ +#define RISCV_IOMMU_REG_PQCSR 0x0050 +#define RISCV_IOMMU_PQCSR_PQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_PQCSR_PIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_PQCSR_PQMF RISCV_IOMMU_QUEUE_MEM_FAULT +#define RISCV_IOMMU_PQCSR_PQOF RISCV_IOMMU_QUEUE_OVERFLOW +#define RISCV_IOMMU_PQCSR_PQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_PQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +/* 5.18 Interrupt Pending Status (32bits) */ +#define RISCV_IOMMU_REG_IPSR 0x0054 + +#define RISCV_IOMMU_INTR_CQ 0 +#define RISCV_IOMMU_INTR_FQ 1 +#define RISCV_IOMMU_INTR_PM 2 +#define RISCV_IOMMU_INTR_PQ 3 +#define RISCV_IOMMU_INTR_COUNT 4 + +#define RISCV_IOMMU_IPSR_CIP BIT(RISCV_IOMMU_INTR_CQ) +#define RISCV_IOMMU_IPSR_FIP BIT(RISCV_IOMMU_INTR_FQ) +#define RISCV_IOMMU_IPSR_PMIP BIT(RISCV_IOMMU_INTR_PM) +#define RISCV_IOMMU_IPSR_PIP BIT(RISCV_IOMMU_INTR_PQ) + +/* 5.19 Performance monitoring counter overflow status (32bits) */ +#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058 +#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0) +#define RISCV_IOMMU_IOCOUNTOVF_HPM GENMASK_ULL(31, 1) + +/* 5.20 Performance monitoring counter inhibits (32bits) */ +#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C +#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0) +#define RISCV_IOMMU_IOCOUNTINH_HPM GENMASK(31, 1) + +/* 5.21 Performance monitoring cycles counter (64bits) */ +#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060 +#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0) +#define RISCV_IOMMU_IOHPMCYCLES_OF BIT_ULL(63) + +/* 5.22 Performance monitoring event counters (31 * 64bits) */ +#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068 +#define RISCV_IOMMU_REG_IOHPMCTR(_n) (RISCV_IOMMU_REG_IOHPMCTR_BASE + ((_n) * 0x8)) + +/* 5.23 Performance monitoring event selectors (31 * 64bits) */ +#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160 +#define RISCV_IOMMU_REG_IOHPMEVT(_n) (RISCV_IOMMU_REG_IOHPMEVT_BASE + ((_n) * 0x8)) +#define RISCV_IOMMU_IOHPMEVT_EVENTID GENMASK_ULL(14, 0) +#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15) +#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16) +#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36) +#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60) +#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61) +#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62) +#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63) + +/* Number of defined performance-monitoring event selectors */ +#define RISCV_IOMMU_IOHPMEVT_CNT 31 + +/** + * enum riscv_iommu_hpmevent_id - Performance-monitoring event identifier + * + * @RISCV_IOMMU_HPMEVENT_INVALID: Invalid event, do not count + * @RISCV_IOMMU_HPMEVENT_URQ: Untranslated requests + * @RISCV_IOMMU_HPMEVENT_TRQ: Translated requests + * @RISCV_IOMMU_HPMEVENT_ATS_RQ: ATS translation requests + * @RISCV_IOMMU_HPMEVENT_TLB_MISS: TLB misses + * @RISCV_IOMMU_HPMEVENT_DD_WALK: Device directory walks + * @RISCV_IOMMU_HPMEVENT_PD_WALK: Process directory walks + * @RISCV_IOMMU_HPMEVENT_S_VS_WALKS: First-stage page table walks + * @RISCV_IOMMU_HPMEVENT_G_WALKS: Second-stage page table walks + * @RISCV_IOMMU_HPMEVENT_MAX: Value to denote maximum Event IDs + */ +enum riscv_iommu_hpmevent_id { + RISCV_IOMMU_HPMEVENT_INVALID = 0, + RISCV_IOMMU_HPMEVENT_URQ = 1, + RISCV_IOMMU_HPMEVENT_TRQ = 2, + RISCV_IOMMU_HPMEVENT_ATS_RQ = 3, + RISCV_IOMMU_HPMEVENT_TLB_MISS = 4, + RISCV_IOMMU_HPMEVENT_DD_WALK = 5, + RISCV_IOMMU_HPMEVENT_PD_WALK = 6, + RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7, + RISCV_IOMMU_HPMEVENT_G_WALKS = 8, + RISCV_IOMMU_HPMEVENT_MAX = 9 +}; + +/* 5.24 Translation request IOVA (64bits) */ +#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258 +#define RISCV_IOMMU_TR_REQ_IOVA_VPN GENMASK_ULL(63, 12) + +/* 5.25 Translation request control (64bits) */ +#define RISCV_IOMMU_REG_TR_REQ_CTL 0x0260 +#define RISCV_IOMMU_TR_REQ_CTL_GO_BUSY BIT_ULL(0) +#define RISCV_IOMMU_TR_REQ_CTL_PRIV BIT_ULL(1) +#define RISCV_IOMMU_TR_REQ_CTL_EXE BIT_ULL(2) +#define RISCV_IOMMU_TR_REQ_CTL_NW BIT_ULL(3) +#define RISCV_IOMMU_TR_REQ_CTL_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_TR_REQ_CTL_PV BIT_ULL(32) +#define RISCV_IOMMU_TR_REQ_CTL_DID GENMASK_ULL(63, 40) + +/* 5.26 Translation request response (64bits) */ +#define RISCV_IOMMU_REG_TR_RESPONSE 0x0268 +#define RISCV_IOMMU_TR_RESPONSE_FAULT BIT_ULL(0) +#define RISCV_IOMMU_TR_RESPONSE_PBMT GENMASK_ULL(8, 7) +#define RISCV_IOMMU_TR_RESPONSE_SZ BIT_ULL(9) +#define RISCV_IOMMU_TR_RESPONSE_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.27 Interrupt cause to vector (64bits) */ +#define RISCV_IOMMU_REG_ICVEC 0x02F8 +#define RISCV_IOMMU_ICVEC_CIV GENMASK_ULL(3, 0) +#define RISCV_IOMMU_ICVEC_FIV GENMASK_ULL(7, 4) +#define RISCV_IOMMU_ICVEC_PMIV GENMASK_ULL(11, 8) +#define RISCV_IOMMU_ICVEC_PIV GENMASK_ULL(15, 12) + +/* 5.28 MSI Configuration table (32 * 64bits) */ +#define RISCV_IOMMU_REG_MSI_CFG_TBL 0x0300 +#define RISCV_IOMMU_REG_MSI_CFG_TBL_ADDR(_n) \ + (RISCV_IOMMU_REG_MSI_CFG_TBL + ((_n) * 0x10)) +#define RISCV_IOMMU_MSI_CFG_TBL_ADDR GENMASK_ULL(55, 2) +#define RISCV_IOMMU_REG_MSI_CFG_TBL_DATA(_n) \ + (RISCV_IOMMU_REG_MSI_CFG_TBL + ((_n) * 0x10) + 0x08) +#define RISCV_IOMMU_MSI_CFG_TBL_DATA GENMASK_ULL(31, 0) +#define RISCV_IOMMU_REG_MSI_CFG_TBL_CTRL(_n) \ + (RISCV_IOMMU_REG_MSI_CFG_TBL + ((_n) * 0x10) + 0x0C) +#define RISCV_IOMMU_MSI_CFG_TBL_CTRL_M BIT_ULL(0) + +#define RISCV_IOMMU_REG_SIZE 0x1000 + +/* + * Chapter 2: Data structures + */ + +/* + * Device Directory Table macros for non-leaf nodes + */ +#define RISCV_IOMMU_DDTE_V BIT_ULL(0) +#define RISCV_IOMMU_DDTE_PPN RISCV_IOMMU_PPN_FIELD + +/** + * struct riscv_iommu_dc - Device Context + * @tc: Translation Control + * @iohgatp: I/O Hypervisor guest address translation and protection + * (Second stage context) + * @ta: Translation Attributes + * @fsc: First stage context + * @msiptp: MSI page table pointer + * @msi_addr_mask: MSI address mask + * @msi_addr_pattern: MSI address pattern + * @_reserved: Reserved for future use, padding + * + * This structure is used for leaf nodes on the Device Directory Table, + * in case RISCV_IOMMU_CAPABILITIES_MSI_FLAT is not set, the bottom 4 fields + * are not present and are skipped with pointer arithmetic to avoid + * casting, check out riscv_iommu_get_dc(). + * See section 2.1 for more details + */ +struct riscv_iommu_dc { + u64 tc; + u64 iohgatp; + u64 ta; + u64 fsc; + u64 msiptp; + u64 msi_addr_mask; + u64 msi_addr_pattern; + u64 _reserved; +}; + +/* Translation control fields */ +#define RISCV_IOMMU_DC_TC_V BIT_ULL(0) +#define RISCV_IOMMU_DC_TC_EN_ATS BIT_ULL(1) +#define RISCV_IOMMU_DC_TC_EN_PRI BIT_ULL(2) +#define RISCV_IOMMU_DC_TC_T2GPA BIT_ULL(3) +#define RISCV_IOMMU_DC_TC_DTF BIT_ULL(4) +#define RISCV_IOMMU_DC_TC_PDTV BIT_ULL(5) +#define RISCV_IOMMU_DC_TC_PRPR BIT_ULL(6) +#define RISCV_IOMMU_DC_TC_GADE BIT_ULL(7) +#define RISCV_IOMMU_DC_TC_SADE BIT_ULL(8) +#define RISCV_IOMMU_DC_TC_DPE BIT_ULL(9) +#define RISCV_IOMMU_DC_TC_SBE BIT_ULL(10) +#define RISCV_IOMMU_DC_TC_SXL BIT_ULL(11) + +/* Second-stage (aka G-stage) context fields */ +#define RISCV_IOMMU_DC_IOHGATP_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_DC_IOHGATP_GSCID GENMASK_ULL(59, 44) +#define RISCV_IOMMU_DC_IOHGATP_MODE RISCV_IOMMU_ATP_MODE_FIELD + +/** + * enum riscv_iommu_dc_iohgatp_modes - Guest address translation/protection modes + * @RISCV_IOMMU_DC_IOHGATP_MODE_BARE: No translation/protection + * @RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4: Sv32x4 (2-bit extension of Sv32), when fctl.GXL == 1 + * @RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4: Sv39x4 (2-bit extension of Sv39), when fctl.GXL == 0 + * @RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4: Sv48x4 (2-bit extension of Sv48), when fctl.GXL == 0 + * @RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4: Sv57x4 (2-bit extension of Sv57), when fctl.GXL == 0 + */ +enum riscv_iommu_dc_iohgatp_modes { + RISCV_IOMMU_DC_IOHGATP_MODE_BARE = 0, + RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4 = 8, + RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4 = 8, + RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4 = 9, + RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4 = 10 +}; + +/* Translation attributes fields */ +#define RISCV_IOMMU_DC_TA_PSCID GENMASK_ULL(31, 12) + +/* First-stage context fields */ +#define RISCV_IOMMU_DC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_DC_FSC_MODE RISCV_IOMMU_ATP_MODE_FIELD + +/** + * enum riscv_iommu_dc_fsc_atp_modes - First stage address translation/protection modes + * @RISCV_IOMMU_DC_FSC_MODE_BARE: No translation/protection + * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32: Sv32, when dc.tc.SXL == 1 + * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: Sv39, when dc.tc.SXL == 0 + * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: Sv48, when dc.tc.SXL == 0 + * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: Sv57, when dc.tc.SXL == 0 + * @RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8: 1lvl PDT, 8bit process ids + * @RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17: 2lvl PDT, 17bit process ids + * @RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20: 3lvl PDT, 20bit process ids + * + * FSC holds IOSATP when RISCV_IOMMU_DC_TC_PDTV is 0 and PDTP otherwise. + * IOSATP controls the first stage address translation (same as the satp register on + * the RISC-V MMU), and PDTP holds the process directory table, used to select a + * first stage page table based on a process id (for devices that support multiple + * process ids). + */ +enum riscv_iommu_dc_fsc_atp_modes { + RISCV_IOMMU_DC_FSC_MODE_BARE = 0, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32 = 8, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 = 8, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48 = 9, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57 = 10, + RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8 = 1, + RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17 = 2, + RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20 = 3 +}; + +/* MSI page table pointer */ +#define RISCV_IOMMU_DC_MSIPTP_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_DC_MSIPTP_MODE RISCV_IOMMU_ATP_MODE_FIELD +#define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0 +#define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1 + +/* MSI address mask */ +#define RISCV_IOMMU_DC_MSI_ADDR_MASK GENMASK_ULL(51, 0) + +/* MSI address pattern */ +#define RISCV_IOMMU_DC_MSI_PATTERN GENMASK_ULL(51, 0) + +/** + * struct riscv_iommu_pc - Process Context + * @ta: Translation Attributes + * @fsc: First stage context + * + * This structure is used for leaf nodes on the Process Directory Table + * See section 2.3 for more details + */ +struct riscv_iommu_pc { + u64 ta; + u64 fsc; +}; + +/* Translation attributes fields */ +#define RISCV_IOMMU_PC_TA_V BIT_ULL(0) +#define RISCV_IOMMU_PC_TA_ENS BIT_ULL(1) +#define RISCV_IOMMU_PC_TA_SUM BIT_ULL(2) +#define RISCV_IOMMU_PC_TA_PSCID GENMASK_ULL(31, 12) + +/* First stage context fields */ +#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_PC_FSC_MODE RISCV_IOMMU_ATP_MODE_FIELD + +/* + * Chapter 3: In-memory queue interface + */ + +/** + * struct riscv_iommu_command - Generic IOMMU command structure + * @dword0: Includes the opcode and the function identifier + * @dword1: Opcode specific data + * + * The commands are interpreted as two 64bit fields, where the first + * 7bits of the first field are the opcode which also defines the + * command's format, followed by a 3bit field that specifies the + * function invoked by that command, and the rest is opcode-specific. + * This is a generic struct which will be populated differently + * according to each command. For more infos on the commands and + * the command queue check section 3.1. + */ +struct riscv_iommu_command { + u64 dword0; + u64 dword1; +}; + +/* Fields on dword0, common for all commands */ +#define RISCV_IOMMU_CMD_OPCODE GENMASK_ULL(6, 0) +#define RISCV_IOMMU_CMD_FUNC GENMASK_ULL(9, 7) + +/* 3.1.1 IOMMU Page-table cache invalidation */ +/* Fields on dword0 */ +#define RISCV_IOMMU_CMD_IOTINVAL_OPCODE 1 +#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA 0 +#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA 1 +#define RISCV_IOMMU_CMD_IOTINVAL_AV BIT_ULL(10) +#define RISCV_IOMMU_CMD_IOTINVAL_PSCID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_CMD_IOTINVAL_PSCV BIT_ULL(32) +#define RISCV_IOMMU_CMD_IOTINVAL_GV BIT_ULL(33) +#define RISCV_IOMMU_CMD_IOTINVAL_GSCID GENMASK_ULL(59, 44) +/* dword1[61:10] is the 4K-aligned page address */ +#define RISCV_IOMMU_CMD_IOTINVAL_ADDR GENMASK_ULL(61, 10) + +/* 3.1.2 IOMMU Command Queue Fences */ +/* Fields on dword0 */ +#define RISCV_IOMMU_CMD_IOFENCE_OPCODE 2 +#define RISCV_IOMMU_CMD_IOFENCE_FUNC_C 0 +#define RISCV_IOMMU_CMD_IOFENCE_AV BIT_ULL(10) +#define RISCV_IOMMU_CMD_IOFENCE_WSI BIT_ULL(11) +#define RISCV_IOMMU_CMD_IOFENCE_PR BIT_ULL(12) +#define RISCV_IOMMU_CMD_IOFENCE_PW BIT_ULL(13) +#define RISCV_IOMMU_CMD_IOFENCE_DATA GENMASK_ULL(63, 32) +/* dword1 is the address, word-size aligned and shifted to the right by two bits. */ + +/* 3.1.3 IOMMU Directory cache invalidation */ +/* Fields on dword0 */ +#define RISCV_IOMMU_CMD_IODIR_OPCODE 3 +#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT 0 +#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT 1 +#define RISCV_IOMMU_CMD_IODIR_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_CMD_IODIR_DV BIT_ULL(33) +#define RISCV_IOMMU_CMD_IODIR_DID GENMASK_ULL(63, 40) +/* dword1 is reserved for standard use */ + +/* 3.1.4 IOMMU PCIe ATS */ +/* Fields on dword0 */ +#define RISCV_IOMMU_CMD_ATS_OPCODE 4 +#define RISCV_IOMMU_CMD_ATS_FUNC_INVAL 0 +#define RISCV_IOMMU_CMD_ATS_FUNC_PRGR 1 +#define RISCV_IOMMU_CMD_ATS_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_CMD_ATS_PV BIT_ULL(32) +#define RISCV_IOMMU_CMD_ATS_DSV BIT_ULL(33) +#define RISCV_IOMMU_CMD_ATS_RID GENMASK_ULL(55, 40) +#define RISCV_IOMMU_CMD_ATS_DSEG GENMASK_ULL(63, 56) +/* dword1 is the ATS payload, two different payload types for INVAL and PRGR */ + +/* ATS.INVAL payload*/ +#define RISCV_IOMMU_CMD_ATS_INVAL_G BIT_ULL(0) +/* Bits 1 - 10 are zeroed */ +#define RISCV_IOMMU_CMD_ATS_INVAL_S BIT_ULL(11) +#define RISCV_IOMMU_CMD_ATS_INVAL_UADDR GENMASK_ULL(63, 12) + +/* ATS.PRGR payload */ +/* Bits 0 - 31 are zeroed */ +#define RISCV_IOMMU_CMD_ATS_PRGR_PRG_INDEX GENMASK_ULL(40, 32) +/* Bits 41 - 43 are zeroed */ +#define RISCV_IOMMU_CMD_ATS_PRGR_RESP_CODE GENMASK_ULL(47, 44) +#define RISCV_IOMMU_CMD_ATS_PRGR_DST_ID GENMASK_ULL(63, 48) + +/** + * struct riscv_iommu_fq_record - Fault/Event Queue Record + * @hdr: Header, includes fault/event cause, PID/DID, transaction type etc + * @_reserved: Low 32bits for custom use, high 32bits for standard use + * @iotval: Transaction-type/cause specific format + * @iotval2: Cause specific format + * + * The fault/event queue reports events and failures raised when + * processing transactions. Each record is a 32byte structure where + * the first dword has a fixed format for providing generic infos + * regarding the fault/event, and two more dwords are there for + * fault/event-specific information. For more details see section + * 3.2. + */ +struct riscv_iommu_fq_record { + u64 hdr; + u64 _reserved; + u64 iotval; + u64 iotval2; +}; + +/* Fields on header */ +#define RISCV_IOMMU_FQ_HDR_CAUSE GENMASK_ULL(11, 0) +#define RISCV_IOMMU_FQ_HDR_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_FQ_HDR_PV BIT_ULL(32) +#define RISCV_IOMMU_FQ_HDR_PRIV BIT_ULL(33) +#define RISCV_IOMMU_FQ_HDR_TTYP GENMASK_ULL(39, 34) +#define RISCV_IOMMU_FQ_HDR_DID GENMASK_ULL(63, 40) + +/** + * enum riscv_iommu_fq_causes - Fault/event cause values + * @RISCV_IOMMU_FQ_CAUSE_INST_FAULT: Instruction access fault + * @RISCV_IOMMU_FQ_CAUSE_RD_ADDR_MISALIGNED: Read address misaligned + * @RISCV_IOMMU_FQ_CAUSE_RD_FAULT: Read load fault + * @RISCV_IOMMU_FQ_CAUSE_WR_ADDR_MISALIGNED: Write/AMO address misaligned + * @RISCV_IOMMU_FQ_CAUSE_WR_FAULT: Write/AMO access fault + * @RISCV_IOMMU_FQ_CAUSE_INST_FAULT_S: Instruction page fault + * @RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S: Read page fault + * @RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S: Write/AMO page fault + * @RISCV_IOMMU_FQ_CAUSE_INST_FAULT_VS: Instruction guest page fault + * @RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS: Read guest page fault + * @RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS: Write/AMO guest page fault + * @RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED: All inbound transactions disallowed + * @RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT: DDT entry load access fault + * @RISCV_IOMMU_FQ_CAUSE_DDT_INVALID: DDT entry invalid + * @RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED: DDT entry misconfigured + * @RISCV_IOMMU_FQ_CAUSE_TTYP_BLOCKED: Transaction type disallowed + * @RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT: MSI PTE load access fault + * @RISCV_IOMMU_FQ_CAUSE_MSI_INVALID: MSI PTE invalid + * @RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED: MSI PTE misconfigured + * @RISCV_IOMMU_FQ_CAUSE_MRIF_FAULT: MRIF access fault + * @RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT: PDT entry load access fault + * @RISCV_IOMMU_FQ_CAUSE_PDT_INVALID: PDT entry invalid + * @RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED: PDT entry misconfigured + * @RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED: DDT data corruption + * @RISCV_IOMMU_FQ_CAUSE_PDT_CORRUPTED: PDT data corruption + * @RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED: MSI page table data corruption + * @RISCV_IOMMU_FQ_CAUSE_MRIF_CORRUIPTED: MRIF data corruption + * @RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR: Internal data path error + * @RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT: IOMMU MSI write access fault + * @RISCV_IOMMU_FQ_CAUSE_PT_CORRUPTED: First/second stage page table data corruption + * + * Values are on table 11 of the spec, encodings 275 - 2047 are reserved for standard + * use, and 2048 - 4095 for custom use. + */ +enum riscv_iommu_fq_causes { + RISCV_IOMMU_FQ_CAUSE_INST_FAULT = 1, + RISCV_IOMMU_FQ_CAUSE_RD_ADDR_MISALIGNED = 4, + RISCV_IOMMU_FQ_CAUSE_RD_FAULT = 5, + RISCV_IOMMU_FQ_CAUSE_WR_ADDR_MISALIGNED = 6, + RISCV_IOMMU_FQ_CAUSE_WR_FAULT = 7, + RISCV_IOMMU_FQ_CAUSE_INST_FAULT_S = 12, + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S = 13, + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S = 15, + RISCV_IOMMU_FQ_CAUSE_INST_FAULT_VS = 20, + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS = 21, + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS = 23, + RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED = 256, + RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT = 257, + RISCV_IOMMU_FQ_CAUSE_DDT_INVALID = 258, + RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED = 259, + RISCV_IOMMU_FQ_CAUSE_TTYP_BLOCKED = 260, + RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT = 261, + RISCV_IOMMU_FQ_CAUSE_MSI_INVALID = 262, + RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED = 263, + RISCV_IOMMU_FQ_CAUSE_MRIF_FAULT = 264, + RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT = 265, + RISCV_IOMMU_FQ_CAUSE_PDT_INVALID = 266, + RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED = 267, + RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED = 268, + RISCV_IOMMU_FQ_CAUSE_PDT_CORRUPTED = 269, + RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED = 270, + RISCV_IOMMU_FQ_CAUSE_MRIF_CORRUIPTED = 271, + RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR = 272, + RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT = 273, + RISCV_IOMMU_FQ_CAUSE_PT_CORRUPTED = 274 +}; + +/** + * enum riscv_iommu_fq_ttypes: Fault/event transaction types + * @RISCV_IOMMU_FQ_TTYP_NONE: None. Fault not caused by an inbound transaction. + * @RISCV_IOMMU_FQ_TTYP_UADDR_INST_FETCH: Instruction fetch from untranslated address + * @RISCV_IOMMU_FQ_TTYP_UADDR_RD: Read from untranslated address + * @RISCV_IOMMU_FQ_TTYP_UADDR_WR: Write/AMO to untranslated address + * @RISCV_IOMMU_FQ_TTYP_TADDR_INST_FETCH: Instruction fetch from translated address + * @RISCV_IOMMU_FQ_TTYP_TADDR_RD: Read from translated address + * @RISCV_IOMMU_FQ_TTYP_TADDR_WR: Write/AMO to translated address + * @RISCV_IOMMU_FQ_TTYP_PCIE_ATS_REQ: PCIe ATS translation request + * @RISCV_IOMMU_FQ_TTYP_PCIE_MSG_REQ: PCIe message request + * + * Values are on table 12 of the spec, type 4 and 10 - 31 are reserved for standard use + * and 31 - 63 for custom use. + */ +enum riscv_iommu_fq_ttypes { + RISCV_IOMMU_FQ_TTYP_NONE = 0, + RISCV_IOMMU_FQ_TTYP_UADDR_INST_FETCH = 1, + RISCV_IOMMU_FQ_TTYP_UADDR_RD = 2, + RISCV_IOMMU_FQ_TTYP_UADDR_WR = 3, + RISCV_IOMMU_FQ_TTYP_TADDR_INST_FETCH = 5, + RISCV_IOMMU_FQ_TTYP_TADDR_RD = 6, + RISCV_IOMMU_FQ_TTYP_TADDR_WR = 7, + RISCV_IOMMU_FQ_TTYP_PCIE_ATS_REQ = 8, + RISCV_IOMMU_FQ_TTYP_PCIE_MSG_REQ = 9, +}; + +/** + * struct riscv_iommu_pq_record - PCIe Page Request record + * @hdr: Header, includes PID, DID etc + * @payload: Holds the page address, request group and permission bits + * + * For more infos on the PCIe Page Request queue see chapter 3.3. + */ +struct riscv_iommu_pq_record { + u64 hdr; + u64 payload; +}; + +/* Header fields */ +#define RISCV_IOMMU_PQ_HDR_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_PQ_HDR_PV BIT_ULL(32) +#define RISCV_IOMMU_PQ_HDR_PRIV BIT_ULL(33) +#define RISCV_IOMMU_PQ_HDR_EXEC BIT_ULL(34) +#define RISCV_IOMMU_PQ_HDR_DID GENMASK_ULL(63, 40) + +/* Payload fields */ +#define RISCV_IOMMU_PQ_PAYLOAD_R BIT_ULL(0) +#define RISCV_IOMMU_PQ_PAYLOAD_W BIT_ULL(1) +#define RISCV_IOMMU_PQ_PAYLOAD_L BIT_ULL(2) +#define RISCV_IOMMU_PQ_PAYLOAD_RWL_MASK GENMASK_ULL(2, 0) +#define RISCV_IOMMU_PQ_PAYLOAD_PRGI GENMASK_ULL(11, 3) /* Page Request Group Index */ +#define RISCV_IOMMU_PQ_PAYLOAD_ADDR GENMASK_ULL(63, 12) + +/** + * struct riscv_iommu_msipte - MSI Page Table Entry + * @pte: MSI PTE + * @mrif_info: Memory-resident interrupt file info + * + * The MSI Page Table is used for virtualizing MSIs, so that when + * a device sends an MSI to a guest, the IOMMU can reroute it + * by translating the MSI address, either to a guest interrupt file + * or a memory resident interrupt file (MRIF). Note that this page table + * is an array of MSI PTEs, not a multi-level pt, each entry + * is a leaf entry. For more infos check out the AIA spec, chapter 9.5. + * + * Also in basic mode the mrif_info field is ignored by the IOMMU and can + * be used by software, any other reserved fields on pte must be zeroed-out + * by software. + */ +struct riscv_iommu_msipte { + u64 pte; + u64 mrif_info; +}; + +/* Fields on pte */ +#define RISCV_IOMMU_MSIPTE_V BIT_ULL(0) +#define RISCV_IOMMU_MSIPTE_M GENMASK_ULL(2, 1) +#define RISCV_IOMMU_MSIPTE_MRIF_ADDR GENMASK_ULL(53, 7) /* When M == 1 (MRIF mode) */ +#define RISCV_IOMMU_MSIPTE_PPN RISCV_IOMMU_PPN_FIELD /* When M == 3 (basic mode) */ +#define RISCV_IOMMU_MSIPTE_C BIT_ULL(63) + +/* Fields on mrif_info */ +#define RISCV_IOMMU_MSIPTE_MRIF_NID GENMASK_ULL(9, 0) +#define RISCV_IOMMU_MSIPTE_MRIF_NPPN RISCV_IOMMU_PPN_FIELD +#define RISCV_IOMMU_MSIPTE_MRIF_NID_MSB BIT_ULL(60) + +/* Helper functions: command structure builders. */ + +static inline void riscv_iommu_cmd_inval_vma(struct riscv_iommu_command *cmd) +{ + cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOTINVAL_OPCODE) | + FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA); + cmd->dword1 = 0; +} + +static inline void riscv_iommu_cmd_inval_set_addr(struct riscv_iommu_command *cmd, + u64 addr) +{ + cmd->dword1 = FIELD_PREP(RISCV_IOMMU_CMD_IOTINVAL_ADDR, phys_to_pfn(addr)); + cmd->dword0 |= RISCV_IOMMU_CMD_IOTINVAL_AV; +} + +static inline void riscv_iommu_cmd_inval_set_pscid(struct riscv_iommu_command *cmd, + int pscid) +{ + cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IOTINVAL_PSCID, pscid) | + RISCV_IOMMU_CMD_IOTINVAL_PSCV; +} + +static inline void riscv_iommu_cmd_inval_set_gscid(struct riscv_iommu_command *cmd, + int gscid) +{ + cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IOTINVAL_GSCID, gscid) | + RISCV_IOMMU_CMD_IOTINVAL_GV; +} + +static inline void riscv_iommu_cmd_iofence(struct riscv_iommu_command *cmd) +{ + cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOFENCE_OPCODE) | + FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOFENCE_FUNC_C) | + RISCV_IOMMU_CMD_IOFENCE_PR | RISCV_IOMMU_CMD_IOFENCE_PW; + cmd->dword1 = 0; +} + +static inline void riscv_iommu_cmd_iofence_set_av(struct riscv_iommu_command *cmd, + u64 addr, u32 data) +{ + cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOFENCE_OPCODE) | + FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOFENCE_FUNC_C) | + FIELD_PREP(RISCV_IOMMU_CMD_IOFENCE_DATA, data) | + RISCV_IOMMU_CMD_IOFENCE_AV; + cmd->dword1 = addr >> 2; +} + +static inline void riscv_iommu_cmd_iodir_inval_ddt(struct riscv_iommu_command *cmd) +{ + cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IODIR_OPCODE) | + FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT); + cmd->dword1 = 0; +} + +static inline void riscv_iommu_cmd_iodir_inval_pdt(struct riscv_iommu_command *cmd) +{ + cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IODIR_OPCODE) | + FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT); + cmd->dword1 = 0; +} + +static inline void riscv_iommu_cmd_iodir_set_did(struct riscv_iommu_command *cmd, + unsigned int devid) +{ + cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IODIR_DID, devid) | + RISCV_IOMMU_CMD_IODIR_DV; +} + +static inline void riscv_iommu_cmd_iodir_set_pid(struct riscv_iommu_command *cmd, + unsigned int pasid) +{ + cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IODIR_PID, pasid); +} + +#endif /* _RISCV_IOMMU_BITS_H_ */ diff --git a/drivers/iommu/riscv/iommu-pci.c b/drivers/iommu/riscv/iommu-pci.c new file mode 100644 index 000000000000..c7a89143014c --- /dev/null +++ b/drivers/iommu/riscv/iommu-pci.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright © 2022-2024 Rivos Inc. + * Copyright © 2023 FORTH-ICS/CARV + * + * RISCV IOMMU as a PCIe device + * + * Authors + * Tomasz Jeznach <tjeznach@rivosinc.com> + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <linux/compiler.h> +#include <linux/init.h> +#include <linux/iommu.h> +#include <linux/kernel.h> +#include <linux/pci.h> + +#include "iommu-bits.h" +#include "iommu.h" + +/* QEMU RISC-V IOMMU implementation */ +#define PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014 + +/* Rivos Inc. assigned PCI Vendor and Device IDs */ +#ifndef PCI_VENDOR_ID_RIVOS +#define PCI_VENDOR_ID_RIVOS 0x1efd +#endif + +#define PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA 0x0008 + +static int riscv_iommu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct riscv_iommu_device *iommu; + int rc, vec; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) + return -ENODEV; + + if (pci_resource_len(pdev, 0) < RISCV_IOMMU_REG_SIZE) + return -ENODEV; + + rc = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (rc) + return dev_err_probe(dev, rc, "pcim_iomap_regions failed\n"); + + iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + iommu->dev = dev; + iommu->reg = pcim_iomap_table(pdev)[0]; + + pci_set_master(pdev); + dev_set_drvdata(dev, iommu); + + /* Check device reported capabilities / features. */ + iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES); + iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); + + /* The PCI driver only uses MSIs, make sure the IOMMU supports this */ + switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) { + case RISCV_IOMMU_CAPABILITIES_IGS_MSI: + case RISCV_IOMMU_CAPABILITIES_IGS_BOTH: + break; + default: + return dev_err_probe(dev, -ENODEV, + "unable to use message-signaled interrupts\n"); + } + + /* Allocate and assign IRQ vectors for the various events */ + rc = pci_alloc_irq_vectors(pdev, 1, RISCV_IOMMU_INTR_COUNT, + PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (rc <= 0) + return dev_err_probe(dev, -ENODEV, + "unable to allocate irq vectors\n"); + + iommu->irqs_count = rc; + for (vec = 0; vec < iommu->irqs_count; vec++) + iommu->irqs[vec] = msi_get_virq(dev, vec); + + /* Enable message-signaled interrupts, fctl.WSI */ + if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) { + iommu->fctl ^= RISCV_IOMMU_FCTL_WSI; + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); + } + + return riscv_iommu_init(iommu); +} + +static void riscv_iommu_pci_remove(struct pci_dev *pdev) +{ + struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev); + + riscv_iommu_remove(iommu); +} + +static const struct pci_device_id riscv_iommu_pci_tbl[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0}, + {PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0}, + {0,} +}; + +static struct pci_driver riscv_iommu_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = riscv_iommu_pci_tbl, + .probe = riscv_iommu_pci_probe, + .remove = riscv_iommu_pci_remove, + .driver = { + .suppress_bind_attrs = true, + }, +}; + +builtin_pci_driver(riscv_iommu_pci_driver); diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c new file mode 100644 index 000000000000..da336863f152 --- /dev/null +++ b/drivers/iommu/riscv/iommu-platform.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * RISC-V IOMMU as a platform device + * + * Copyright © 2023 FORTH-ICS/CARV + * Copyright © 2023-2024 Rivos Inc. + * + * Authors + * Nick Kossifidis <mick@ics.forth.gr> + * Tomasz Jeznach <tjeznach@rivosinc.com> + */ + +#include <linux/kernel.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#include "iommu-bits.h" +#include "iommu.h" + +static int riscv_iommu_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct riscv_iommu_device *iommu = NULL; + struct resource *res = NULL; + int vec; + + iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + iommu->dev = dev; + iommu->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(iommu->reg)) + return dev_err_probe(dev, PTR_ERR(iommu->reg), + "could not map register region\n"); + + dev_set_drvdata(dev, iommu); + + /* Check device reported capabilities / features. */ + iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES); + iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); + + /* For now we only support WSI */ + switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) { + case RISCV_IOMMU_CAPABILITIES_IGS_WSI: + case RISCV_IOMMU_CAPABILITIES_IGS_BOTH: + break; + default: + return dev_err_probe(dev, -ENODEV, + "unable to use wire-signaled interrupts\n"); + } + + iommu->irqs_count = platform_irq_count(pdev); + if (iommu->irqs_count <= 0) + return dev_err_probe(dev, -ENODEV, + "no IRQ resources provided\n"); + if (iommu->irqs_count > RISCV_IOMMU_INTR_COUNT) + iommu->irqs_count = RISCV_IOMMU_INTR_COUNT; + + for (vec = 0; vec < iommu->irqs_count; vec++) + iommu->irqs[vec] = platform_get_irq(pdev, vec); + + /* Enable wire-signaled interrupts, fctl.WSI */ + if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) { + iommu->fctl |= RISCV_IOMMU_FCTL_WSI; + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); + } + + return riscv_iommu_init(iommu); +}; + +static void riscv_iommu_platform_remove(struct platform_device *pdev) +{ + riscv_iommu_remove(dev_get_drvdata(&pdev->dev)); +}; + +static const struct of_device_id riscv_iommu_of_match[] = { + {.compatible = "riscv,iommu",}, + {}, +}; + +static struct platform_driver riscv_iommu_platform_driver = { + .probe = riscv_iommu_platform_probe, + .remove_new = riscv_iommu_platform_remove, + .driver = { + .name = "riscv,iommu", + .of_match_table = riscv_iommu_of_match, + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(riscv_iommu_platform_driver); diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c new file mode 100644 index 000000000000..8a05def774bd --- /dev/null +++ b/drivers/iommu/riscv/iommu.c @@ -0,0 +1,1661 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IOMMU API for RISC-V IOMMU implementations. + * + * Copyright © 2022-2024 Rivos Inc. + * Copyright © 2023 FORTH-ICS/CARV + * + * Authors + * Tomasz Jeznach <tjeznach@rivosinc.com> + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#define pr_fmt(fmt) "riscv-iommu: " fmt + +#include <linux/compiler.h> +#include <linux/crash_dump.h> +#include <linux/init.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/pci.h> + +#include "../iommu-pages.h" +#include "iommu-bits.h" +#include "iommu.h" + +/* Timeouts in [us] */ +#define RISCV_IOMMU_QCSR_TIMEOUT 150000 +#define RISCV_IOMMU_QUEUE_TIMEOUT 150000 +#define RISCV_IOMMU_DDTP_TIMEOUT 10000000 +#define RISCV_IOMMU_IOTINVAL_TIMEOUT 90000000 + +/* Number of entries per CMD/FLT queue, should be <= INT_MAX */ +#define RISCV_IOMMU_DEF_CQ_COUNT 8192 +#define RISCV_IOMMU_DEF_FQ_COUNT 4096 + +/* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */ +#define phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10)) +#define ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12)) + +#define dev_to_iommu(dev) \ + iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu) + +/* IOMMU PSCID allocation namespace. */ +static DEFINE_IDA(riscv_iommu_pscids); +#define RISCV_IOMMU_MAX_PSCID (BIT(20) - 1) + +/* Device resource-managed allocations */ +struct riscv_iommu_devres { + void *addr; + int order; +}; + +static void riscv_iommu_devres_pages_release(struct device *dev, void *res) +{ + struct riscv_iommu_devres *devres = res; + + iommu_free_pages(devres->addr, devres->order); +} + +static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p) +{ + struct riscv_iommu_devres *devres = res; + struct riscv_iommu_devres *target = p; + + return devres->addr == target->addr; +} + +static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order) +{ + struct riscv_iommu_devres *devres; + void *addr; + + addr = iommu_alloc_pages_node(dev_to_node(iommu->dev), + GFP_KERNEL_ACCOUNT, order); + if (unlikely(!addr)) + return NULL; + + devres = devres_alloc(riscv_iommu_devres_pages_release, + sizeof(struct riscv_iommu_devres), GFP_KERNEL); + + if (unlikely(!devres)) { + iommu_free_pages(addr, order); + return NULL; + } + + devres->addr = addr; + devres->order = order; + + devres_add(iommu->dev, devres); + + return addr; +} + +static void riscv_iommu_free_pages(struct riscv_iommu_device *iommu, void *addr) +{ + struct riscv_iommu_devres devres = { .addr = addr }; + + devres_release(iommu->dev, riscv_iommu_devres_pages_release, + riscv_iommu_devres_pages_match, &devres); +} + +/* + * Hardware queue allocation and management. + */ + +/* Setup queue base, control registers and default queue length */ +#define RISCV_IOMMU_QUEUE_INIT(q, name) do { \ + struct riscv_iommu_queue *_q = q; \ + _q->qid = RISCV_IOMMU_INTR_ ## name; \ + _q->qbr = RISCV_IOMMU_REG_ ## name ## B; \ + _q->qcr = RISCV_IOMMU_REG_ ## name ## CSR; \ + _q->mask = _q->mask ?: (RISCV_IOMMU_DEF_ ## name ## _COUNT) - 1;\ +} while (0) + +/* Note: offsets are the same for all queues */ +#define Q_HEAD(q) ((q)->qbr + (RISCV_IOMMU_REG_CQH - RISCV_IOMMU_REG_CQB)) +#define Q_TAIL(q) ((q)->qbr + (RISCV_IOMMU_REG_CQT - RISCV_IOMMU_REG_CQB)) +#define Q_ITEM(q, index) ((q)->mask & (index)) +#define Q_IPSR(q) BIT((q)->qid) + +/* + * Discover queue ring buffer hardware configuration, allocate in-memory + * ring buffer or use fixed I/O memory location, configure queue base register. + * Must be called before hardware queue is enabled. + * + * @queue - data structure, configured with RISCV_IOMMU_QUEUE_INIT() + * @entry_size - queue single element size in bytes. + */ +static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu, + struct riscv_iommu_queue *queue, + size_t entry_size) +{ + unsigned int logsz; + u64 qb, rb; + + /* + * Use WARL base register property to discover maximum allowed + * number of entries and optional fixed IO address for queue location. + */ + riscv_iommu_writeq(iommu, queue->qbr, RISCV_IOMMU_QUEUE_LOG2SZ_FIELD); + qb = riscv_iommu_readq(iommu, queue->qbr); + + /* + * Calculate and verify hardware supported queue length, as reported + * by the field LOG2SZ, where max queue length is equal to 2^(LOG2SZ + 1). + * Update queue size based on hardware supported value. + */ + logsz = ilog2(queue->mask); + if (logsz > FIELD_GET(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, qb)) + logsz = FIELD_GET(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, qb); + + /* + * Use WARL base register property to discover an optional fixed IO + * address for queue ring buffer location. Otherwise allocate contiguous + * system memory. + */ + if (FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb)) { + const size_t queue_size = entry_size << (logsz + 1); + + queue->phys = pfn_to_phys(FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb)); + queue->base = devm_ioremap(iommu->dev, queue->phys, queue_size); + } else { + do { + const size_t queue_size = entry_size << (logsz + 1); + const int order = get_order(queue_size); + + queue->base = riscv_iommu_get_pages(iommu, order); + queue->phys = __pa(queue->base); + } while (!queue->base && logsz-- > 0); + } + + if (!queue->base) + return -ENOMEM; + + qb = phys_to_ppn(queue->phys) | + FIELD_PREP(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, logsz); + + /* Update base register and read back to verify hw accepted our write */ + riscv_iommu_writeq(iommu, queue->qbr, qb); + rb = riscv_iommu_readq(iommu, queue->qbr); + if (rb != qb) { + dev_err(iommu->dev, "queue #%u allocation failed\n", queue->qid); + return -ENODEV; + } + + /* Update actual queue mask */ + queue->mask = (2U << logsz) - 1; + + dev_dbg(iommu->dev, "queue #%u allocated 2^%u entries", + queue->qid, logsz + 1); + + return 0; +} + +/* Check interrupt queue status, IPSR */ +static irqreturn_t riscv_iommu_queue_ipsr(int irq, void *data) +{ + struct riscv_iommu_queue *queue = (struct riscv_iommu_queue *)data; + + if (riscv_iommu_readl(queue->iommu, RISCV_IOMMU_REG_IPSR) & Q_IPSR(queue)) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +static int riscv_iommu_queue_vec(struct riscv_iommu_device *iommu, int n) +{ + /* Reuse ICVEC.CIV mask for all interrupt vectors mapping. */ + return (iommu->icvec >> (n * 4)) & RISCV_IOMMU_ICVEC_CIV; +} + +/* + * Enable queue processing in the hardware, register interrupt handler. + * + * @queue - data structure, already allocated with riscv_iommu_queue_alloc() + * @irq_handler - threaded interrupt handler. + */ +static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu, + struct riscv_iommu_queue *queue, + irq_handler_t irq_handler) +{ + const unsigned int irq = iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)]; + u32 csr; + int rc; + + if (queue->iommu) + return -EBUSY; + + /* Polling not implemented */ + if (!irq) + return -ENODEV; + + queue->iommu = iommu; + rc = request_threaded_irq(irq, riscv_iommu_queue_ipsr, irq_handler, + IRQF_ONESHOT | IRQF_SHARED, + dev_name(iommu->dev), queue); + if (rc) { + queue->iommu = NULL; + return rc; + } + + /* + * Enable queue with interrupts, clear any memory fault if any. + * Wait for the hardware to acknowledge request and activate queue + * processing. + * Note: All CSR bitfields are in the same offsets for all queues. + */ + riscv_iommu_writel(iommu, queue->qcr, + RISCV_IOMMU_QUEUE_ENABLE | + RISCV_IOMMU_QUEUE_INTR_ENABLE | + RISCV_IOMMU_QUEUE_MEM_FAULT); + + riscv_iommu_readl_timeout(iommu, queue->qcr, + csr, !(csr & RISCV_IOMMU_QUEUE_BUSY), + 10, RISCV_IOMMU_QCSR_TIMEOUT); + + if (RISCV_IOMMU_QUEUE_ACTIVE != (csr & (RISCV_IOMMU_QUEUE_ACTIVE | + RISCV_IOMMU_QUEUE_BUSY | + RISCV_IOMMU_QUEUE_MEM_FAULT))) { + /* Best effort to stop and disable failing hardware queue. */ + riscv_iommu_writel(iommu, queue->qcr, 0); + free_irq(irq, queue); + queue->iommu = NULL; + dev_err(iommu->dev, "queue #%u failed to start\n", queue->qid); + return -EBUSY; + } + + /* Clear any pending interrupt flag. */ + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); + + return 0; +} + +/* + * Disable queue. Wait for the hardware to acknowledge request and + * stop processing enqueued requests. Report errors but continue. + */ +static void riscv_iommu_queue_disable(struct riscv_iommu_queue *queue) +{ + struct riscv_iommu_device *iommu = queue->iommu; + u32 csr; + + if (!iommu) + return; + + free_irq(iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)], queue); + riscv_iommu_writel(iommu, queue->qcr, 0); + riscv_iommu_readl_timeout(iommu, queue->qcr, + csr, !(csr & RISCV_IOMMU_QUEUE_BUSY), + 10, RISCV_IOMMU_QCSR_TIMEOUT); + + if (csr & (RISCV_IOMMU_QUEUE_ACTIVE | RISCV_IOMMU_QUEUE_BUSY)) + dev_err(iommu->dev, "fail to disable hardware queue #%u, csr 0x%x\n", + queue->qid, csr); + + queue->iommu = NULL; +} + +/* + * Returns number of available valid queue entries and the first item index. + * Update shadow producer index if necessary. + */ +static int riscv_iommu_queue_consume(struct riscv_iommu_queue *queue, + unsigned int *index) +{ + unsigned int head = atomic_read(&queue->head); + unsigned int tail = atomic_read(&queue->tail); + unsigned int last = Q_ITEM(queue, tail); + int available = (int)(tail - head); + + *index = head; + + if (available > 0) + return available; + + /* read hardware producer index, check reserved register bits are not set. */ + if (riscv_iommu_readl_timeout(queue->iommu, Q_TAIL(queue), + tail, (tail & ~queue->mask) == 0, + 0, RISCV_IOMMU_QUEUE_TIMEOUT)) { + dev_err_once(queue->iommu->dev, + "Hardware error: queue access timeout\n"); + return 0; + } + + if (tail == last) + return 0; + + /* update shadow producer index */ + return (int)(atomic_add_return((tail - last) & queue->mask, &queue->tail) - head); +} + +/* + * Release processed queue entries, should match riscv_iommu_queue_consume() calls. + */ +static void riscv_iommu_queue_release(struct riscv_iommu_queue *queue, int count) +{ + const unsigned int head = atomic_add_return(count, &queue->head); + + riscv_iommu_writel(queue->iommu, Q_HEAD(queue), Q_ITEM(queue, head)); +} + +/* Return actual consumer index based on hardware reported queue head index. */ +static unsigned int riscv_iommu_queue_cons(struct riscv_iommu_queue *queue) +{ + const unsigned int cons = atomic_read(&queue->head); + const unsigned int last = Q_ITEM(queue, cons); + unsigned int head; + + if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, + !(head & ~queue->mask), + 0, RISCV_IOMMU_QUEUE_TIMEOUT)) + return cons; + + return cons + ((head - last) & queue->mask); +} + +/* Wait for submitted item to be processed. */ +static int riscv_iommu_queue_wait(struct riscv_iommu_queue *queue, + unsigned int index, + unsigned int timeout_us) +{ + unsigned int cons = atomic_read(&queue->head); + + /* Already processed by the consumer */ + if ((int)(cons - index) > 0) + return 0; + + /* Monitor consumer index */ + return readx_poll_timeout(riscv_iommu_queue_cons, queue, cons, + (int)(cons - index) > 0, 0, timeout_us); +} + +/* Enqueue an entry and wait to be processed if timeout_us > 0 + * + * Error handling for IOMMU hardware not responding in reasonable time + * will be added as separate patch series along with other RAS features. + * For now, only report hardware failure and continue. + */ +static unsigned int riscv_iommu_queue_send(struct riscv_iommu_queue *queue, + void *entry, size_t entry_size) +{ + unsigned int prod; + unsigned int head; + unsigned int tail; + unsigned long flags; + + /* Do not preempt submission flow. */ + local_irq_save(flags); + + /* 1. Allocate some space in the queue */ + prod = atomic_inc_return(&queue->prod) - 1; + head = atomic_read(&queue->head); + + /* 2. Wait for space availability. */ + if ((prod - head) > queue->mask) { + if (readx_poll_timeout(atomic_read, &queue->head, + head, (prod - head) < queue->mask, + 0, RISCV_IOMMU_QUEUE_TIMEOUT)) + goto err_busy; + } else if ((prod - head) == queue->mask) { + const unsigned int last = Q_ITEM(queue, head); + + if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, + !(head & ~queue->mask) && head != last, + 0, RISCV_IOMMU_QUEUE_TIMEOUT)) + goto err_busy; + atomic_add((head - last) & queue->mask, &queue->head); + } + + /* 3. Store entry in the ring buffer */ + memcpy(queue->base + Q_ITEM(queue, prod) * entry_size, entry, entry_size); + + /* 4. Wait for all previous entries to be ready */ + if (readx_poll_timeout(atomic_read, &queue->tail, tail, prod == tail, + 0, RISCV_IOMMU_QUEUE_TIMEOUT)) + goto err_busy; + + /* + * 5. Make sure the ring buffer update (whether in normal or I/O memory) is + * completed and visible before signaling the tail doorbell to fetch + * the next command. 'fence ow, ow' + */ + dma_wmb(); + riscv_iommu_writel(queue->iommu, Q_TAIL(queue), Q_ITEM(queue, prod + 1)); + + /* + * 6. Make sure the doorbell write to the device has finished before updating + * the shadow tail index in normal memory. 'fence o, w' + */ + mmiowb(); + atomic_inc(&queue->tail); + + /* 7. Complete submission and restore local interrupts */ + local_irq_restore(flags); + + return prod; + +err_busy: + local_irq_restore(flags); + dev_err_once(queue->iommu->dev, "Hardware error: command enqueue failed\n"); + + return prod; +} + +/* + * IOMMU Command queue chapter 3.1 + */ + +/* Command queue interrupt handler thread function */ +static irqreturn_t riscv_iommu_cmdq_process(int irq, void *data) +{ + const struct riscv_iommu_queue *queue = (struct riscv_iommu_queue *)data; + unsigned int ctrl; + + /* Clear MF/CQ errors, complete error recovery to be implemented. */ + ctrl = riscv_iommu_readl(queue->iommu, queue->qcr); + if (ctrl & (RISCV_IOMMU_CQCSR_CQMF | RISCV_IOMMU_CQCSR_CMD_TO | + RISCV_IOMMU_CQCSR_CMD_ILL | RISCV_IOMMU_CQCSR_FENCE_W_IP)) { + riscv_iommu_writel(queue->iommu, queue->qcr, ctrl); + dev_warn(queue->iommu->dev, + "Queue #%u error; fault:%d timeout:%d illegal:%d fence_w_ip:%d\n", + queue->qid, + !!(ctrl & RISCV_IOMMU_CQCSR_CQMF), + !!(ctrl & RISCV_IOMMU_CQCSR_CMD_TO), + !!(ctrl & RISCV_IOMMU_CQCSR_CMD_ILL), + !!(ctrl & RISCV_IOMMU_CQCSR_FENCE_W_IP)); + } + + /* Placeholder for command queue interrupt notifiers */ + + /* Clear command interrupt pending. */ + riscv_iommu_writel(queue->iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); + + return IRQ_HANDLED; +} + +/* Send command to the IOMMU command queue */ +static void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu, + struct riscv_iommu_command *cmd) +{ + riscv_iommu_queue_send(&iommu->cmdq, cmd, sizeof(*cmd)); +} + +/* Send IOFENCE.C command and wait for all scheduled commands to complete. */ +static void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu, + unsigned int timeout_us) +{ + struct riscv_iommu_command cmd; + unsigned int prod; + + riscv_iommu_cmd_iofence(&cmd); + prod = riscv_iommu_queue_send(&iommu->cmdq, &cmd, sizeof(cmd)); + + if (!timeout_us) + return; + + if (riscv_iommu_queue_wait(&iommu->cmdq, prod, timeout_us)) + dev_err_once(iommu->dev, + "Hardware error: command execution timeout\n"); +} + +/* + * IOMMU Fault/Event queue chapter 3.2 + */ + +static void riscv_iommu_fault(struct riscv_iommu_device *iommu, + struct riscv_iommu_fq_record *event) +{ + unsigned int err = FIELD_GET(RISCV_IOMMU_FQ_HDR_CAUSE, event->hdr); + unsigned int devid = FIELD_GET(RISCV_IOMMU_FQ_HDR_DID, event->hdr); + + /* Placeholder for future fault handling implementation, report only. */ + if (err) + dev_warn_ratelimited(iommu->dev, + "Fault %d devid: 0x%x iotval: %llx iotval2: %llx\n", + err, devid, event->iotval, event->iotval2); +} + +/* Fault queue interrupt handler thread function */ +static irqreturn_t riscv_iommu_fltq_process(int irq, void *data) +{ + struct riscv_iommu_queue *queue = (struct riscv_iommu_queue *)data; + struct riscv_iommu_device *iommu = queue->iommu; + struct riscv_iommu_fq_record *events; + unsigned int ctrl, idx; + int cnt, len; + + events = (struct riscv_iommu_fq_record *)queue->base; + + /* Clear fault interrupt pending and process all received fault events. */ + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); + + do { + cnt = riscv_iommu_queue_consume(queue, &idx); + for (len = 0; len < cnt; idx++, len++) + riscv_iommu_fault(iommu, &events[Q_ITEM(queue, idx)]); + riscv_iommu_queue_release(queue, cnt); + } while (cnt > 0); + + /* Clear MF/OF errors, complete error recovery to be implemented. */ + ctrl = riscv_iommu_readl(iommu, queue->qcr); + if (ctrl & (RISCV_IOMMU_FQCSR_FQMF | RISCV_IOMMU_FQCSR_FQOF)) { + riscv_iommu_writel(iommu, queue->qcr, ctrl); + dev_warn(iommu->dev, + "Queue #%u error; memory fault:%d overflow:%d\n", + queue->qid, + !!(ctrl & RISCV_IOMMU_FQCSR_FQMF), + !!(ctrl & RISCV_IOMMU_FQCSR_FQOF)); + } + + return IRQ_HANDLED; +} + +/* Lookup and initialize device context info structure. */ +static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iommu, + unsigned int devid) +{ + const bool base_format = !(iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT); + unsigned int depth; + unsigned long ddt, old, new; + void *ptr; + u8 ddi_bits[3] = { 0 }; + u64 *ddtp = NULL; + + /* Make sure the mode is valid */ + if (iommu->ddt_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL || + iommu->ddt_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL) + return NULL; + + /* + * Device id partitioning for base format: + * DDI[0]: bits 0 - 6 (1st level) (7 bits) + * DDI[1]: bits 7 - 15 (2nd level) (9 bits) + * DDI[2]: bits 16 - 23 (3rd level) (8 bits) + * + * For extended format: + * DDI[0]: bits 0 - 5 (1st level) (6 bits) + * DDI[1]: bits 6 - 14 (2nd level) (9 bits) + * DDI[2]: bits 15 - 23 (3rd level) (9 bits) + */ + if (base_format) { + ddi_bits[0] = 7; + ddi_bits[1] = 7 + 9; + ddi_bits[2] = 7 + 9 + 8; + } else { + ddi_bits[0] = 6; + ddi_bits[1] = 6 + 9; + ddi_bits[2] = 6 + 9 + 9; + } + + /* Make sure device id is within range */ + depth = iommu->ddt_mode - RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL; + if (devid >= (1 << ddi_bits[depth])) + return NULL; + + /* Get to the level of the non-leaf node that holds the device context */ + for (ddtp = iommu->ddt_root; depth-- > 0;) { + const int split = ddi_bits[depth]; + /* + * Each non-leaf node is 64bits wide and on each level + * nodes are indexed by DDI[depth]. + */ + ddtp += (devid >> split) & 0x1FF; + + /* + * Check if this node has been populated and if not + * allocate a new level and populate it. + */ + do { + ddt = READ_ONCE(*(unsigned long *)ddtp); + if (ddt & RISCV_IOMMU_DDTE_V) { + ddtp = __va(ppn_to_phys(ddt)); + break; + } + + ptr = riscv_iommu_get_pages(iommu, 0); + if (!ptr) + return NULL; + + new = phys_to_ppn(__pa(ptr)) | RISCV_IOMMU_DDTE_V; + old = cmpxchg_relaxed((unsigned long *)ddtp, ddt, new); + + if (old == ddt) { + ddtp = (u64 *)ptr; + break; + } + + /* Race setting DDT detected, re-read and retry. */ + riscv_iommu_free_pages(iommu, ptr); + } while (1); + } + + /* + * Grab the node that matches DDI[depth], note that when using base + * format the device context is 4 * 64bits, and the extended format + * is 8 * 64bits, hence the (3 - base_format) below. + */ + ddtp += (devid & ((64 << base_format) - 1)) << (3 - base_format); + + return (struct riscv_iommu_dc *)ddtp; +} + +/* + * This is best effort IOMMU translation shutdown flow. + * Disable IOMMU without waiting for hardware response. + */ +static void riscv_iommu_disable(struct riscv_iommu_device *iommu) +{ + riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, 0); + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0); + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0); + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0); +} + +#define riscv_iommu_read_ddtp(iommu) ({ \ + u64 ddtp; \ + riscv_iommu_readq_timeout((iommu), RISCV_IOMMU_REG_DDTP, ddtp, \ + !(ddtp & RISCV_IOMMU_DDTP_BUSY), 10, \ + RISCV_IOMMU_DDTP_TIMEOUT); \ + ddtp; }) + +static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu) +{ + u64 ddtp; + unsigned int mode; + + ddtp = riscv_iommu_read_ddtp(iommu); + if (ddtp & RISCV_IOMMU_DDTP_BUSY) + return -EBUSY; + + /* + * It is optional for the hardware to report a fixed address for device + * directory root page when DDT.MODE is OFF or BARE. + */ + mode = FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp); + if (mode == RISCV_IOMMU_DDTP_IOMMU_MODE_BARE || + mode == RISCV_IOMMU_DDTP_IOMMU_MODE_OFF) { + /* Use WARL to discover hardware fixed DDT PPN */ + riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, + FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE, mode)); + ddtp = riscv_iommu_read_ddtp(iommu); + if (ddtp & RISCV_IOMMU_DDTP_BUSY) + return -EBUSY; + + iommu->ddt_phys = ppn_to_phys(ddtp); + if (iommu->ddt_phys) + iommu->ddt_root = devm_ioremap(iommu->dev, + iommu->ddt_phys, PAGE_SIZE); + if (iommu->ddt_root) + memset(iommu->ddt_root, 0, PAGE_SIZE); + } + + if (!iommu->ddt_root) { + iommu->ddt_root = riscv_iommu_get_pages(iommu, 0); + iommu->ddt_phys = __pa(iommu->ddt_root); + } + + if (!iommu->ddt_root) + return -ENOMEM; + + return 0; +} + +/* + * Discover supported DDT modes starting from requested value, + * configure DDTP register with accepted mode and root DDT address. + * Accepted iommu->ddt_mode is updated on success. + */ +static int riscv_iommu_iodir_set_mode(struct riscv_iommu_device *iommu, + unsigned int ddtp_mode) +{ + struct device *dev = iommu->dev; + u64 ddtp, rq_ddtp; + unsigned int mode, rq_mode = ddtp_mode; + struct riscv_iommu_command cmd; + + ddtp = riscv_iommu_read_ddtp(iommu); + if (ddtp & RISCV_IOMMU_DDTP_BUSY) + return -EBUSY; + + /* Disallow state transition from xLVL to xLVL. */ + mode = FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp); + if (mode != RISCV_IOMMU_DDTP_IOMMU_MODE_BARE && + mode != RISCV_IOMMU_DDTP_IOMMU_MODE_OFF && + rq_mode != RISCV_IOMMU_DDTP_IOMMU_MODE_BARE && + rq_mode != RISCV_IOMMU_DDTP_IOMMU_MODE_OFF) + return -EINVAL; + + do { + rq_ddtp = FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE, rq_mode); + if (rq_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) + rq_ddtp |= phys_to_ppn(iommu->ddt_phys); + + riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, rq_ddtp); + ddtp = riscv_iommu_read_ddtp(iommu); + if (ddtp & RISCV_IOMMU_DDTP_BUSY) { + dev_err(dev, "timeout when setting ddtp (ddt mode: %u, read: %llx)\n", + rq_mode, ddtp); + return -EBUSY; + } + + /* Verify IOMMU hardware accepts new DDTP config. */ + mode = FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp); + + if (rq_mode == mode) + break; + + /* Hardware mandatory DDTP mode has not been accepted. */ + if (rq_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL && rq_ddtp != ddtp) { + dev_err(dev, "DDTP update failed hw: %llx vs %llx\n", + ddtp, rq_ddtp); + return -EINVAL; + } + + /* + * Mode field is WARL, an IOMMU may support a subset of + * directory table levels in which case if we tried to set + * an unsupported number of levels we'll readback either + * a valid xLVL or off/bare. If we got off/bare, try again + * with a smaller xLVL. + */ + if (mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL && + rq_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL) { + dev_dbg(dev, "DDTP hw mode %u vs %u\n", mode, rq_mode); + rq_mode--; + continue; + } + + /* + * We tried all supported modes and IOMMU hardware failed to + * accept new settings, something went very wrong since off/bare + * and at least one xLVL must be supported. + */ + dev_err(dev, "DDTP hw mode %u, failed to set %u\n", + mode, ddtp_mode); + return -EINVAL; + } while (1); + + iommu->ddt_mode = mode; + if (mode != ddtp_mode) + dev_dbg(dev, "DDTP hw mode %u, requested %u\n", mode, ddtp_mode); + + /* Invalidate device context cache */ + riscv_iommu_cmd_iodir_inval_ddt(&cmd); + riscv_iommu_cmd_send(iommu, &cmd); + + /* Invalidate address translation cache */ + riscv_iommu_cmd_inval_vma(&cmd); + riscv_iommu_cmd_send(iommu, &cmd); + + /* IOFENCE.C */ + riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); + + return 0; +} + +/* This struct contains protection domain specific IOMMU driver data. */ +struct riscv_iommu_domain { + struct iommu_domain domain; + struct list_head bonds; + spinlock_t lock; /* protect bonds list updates. */ + int pscid; + bool amo_enabled; + int numa_node; + unsigned int pgd_mode; + unsigned long *pgd_root; +}; + +#define iommu_domain_to_riscv(iommu_domain) \ + container_of(iommu_domain, struct riscv_iommu_domain, domain) + +/* Private IOMMU data for managed devices, dev_iommu_priv_* */ +struct riscv_iommu_info { + struct riscv_iommu_domain *domain; +}; + +/* + * Linkage between an iommu_domain and attached devices. + * + * Protection domain requiring IOATC and DevATC translation cache invalidations, + * should be linked to attached devices using a riscv_iommu_bond structure. + * Devices should be linked to the domain before first use and unlinked after + * the translations from the referenced protection domain can no longer be used. + * Blocking and identity domains are not tracked here, as the IOMMU hardware + * does not cache negative and/or identity (BARE mode) translations, and DevATC + * is disabled for those protection domains. + * + * The device pointer and IOMMU data remain stable in the bond struct after + * _probe_device() where it's attached to the managed IOMMU, up to the + * completion of the _release_device() call. The release of the bond structure + * is synchronized with the device release. + */ +struct riscv_iommu_bond { + struct list_head list; + struct rcu_head rcu; + struct device *dev; +}; + +static int riscv_iommu_bond_link(struct riscv_iommu_domain *domain, + struct device *dev) +{ + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_bond *bond; + struct list_head *bonds; + + bond = kzalloc(sizeof(*bond), GFP_KERNEL); + if (!bond) + return -ENOMEM; + bond->dev = dev; + + /* + * List of devices attached to the domain is arranged based on + * managed IOMMU device. + */ + + spin_lock(&domain->lock); + list_for_each(bonds, &domain->bonds) + if (dev_to_iommu(list_entry(bonds, struct riscv_iommu_bond, list)->dev) == iommu) + break; + list_add_rcu(&bond->list, bonds); + spin_unlock(&domain->lock); + + /* Synchronize with riscv_iommu_iotlb_inval() sequence. See comment below. */ + smp_mb(); + + return 0; +} + +static void riscv_iommu_bond_unlink(struct riscv_iommu_domain *domain, + struct device *dev) +{ + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_bond *bond, *found = NULL; + struct riscv_iommu_command cmd; + int count = 0; + + if (!domain) + return; + + spin_lock(&domain->lock); + list_for_each_entry(bond, &domain->bonds, list) { + if (found && count) + break; + else if (bond->dev == dev) + found = bond; + else if (dev_to_iommu(bond->dev) == iommu) + count++; + } + if (found) + list_del_rcu(&found->list); + spin_unlock(&domain->lock); + kfree_rcu(found, rcu); + + /* + * If this was the last bond between this domain and the IOMMU + * invalidate all cached entries for domain's PSCID. + */ + if (!count) { + riscv_iommu_cmd_inval_vma(&cmd); + riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); + riscv_iommu_cmd_send(iommu, &cmd); + + riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); + } +} + +/* + * Send IOTLB.INVAL for whole address space for ranges larger than 2MB. + * This limit will be replaced with range invalidations, if supported by + * the hardware, when RISC-V IOMMU architecture specification update for + * range invalidations update will be available. + */ +#define RISCV_IOMMU_IOTLB_INVAL_LIMIT (2 << 20) + +static void riscv_iommu_iotlb_inval(struct riscv_iommu_domain *domain, + unsigned long start, unsigned long end) +{ + struct riscv_iommu_bond *bond; + struct riscv_iommu_device *iommu, *prev; + struct riscv_iommu_command cmd; + unsigned long len = end - start + 1; + unsigned long iova; + + /* + * For each IOMMU linked with this protection domain (via bonds->dev), + * an IOTLB invaliation command will be submitted and executed. + * + * Possbile race with domain attach flow is handled by sequencing + * bond creation - riscv_iommu_bond_link(), and device directory + * update - riscv_iommu_iodir_update(). + * + * PTE Update / IOTLB Inval Device attach & directory update + * -------------------------- -------------------------- + * update page table entries add dev to the bond list + * FENCE RW,RW FENCE RW,RW + * For all IOMMUs: (can be empty) Update FSC/PSCID + * FENCE IOW,IOW FENCE IOW,IOW + * IOTLB.INVAL IODIR.INVAL + * IOFENCE.C + * + * If bond list is not updated with new device, directory context will + * be configured with already valid page table content. If an IOMMU is + * linked to the protection domain it will receive invalidation + * requests for updated page table entries. + */ + smp_mb(); + + rcu_read_lock(); + + prev = NULL; + list_for_each_entry_rcu(bond, &domain->bonds, list) { + iommu = dev_to_iommu(bond->dev); + + /* + * IOTLB invalidation request can be safely omitted if already sent + * to the IOMMU for the same PSCID, and with domain->bonds list + * arranged based on the device's IOMMU, it's sufficient to check + * last device the invalidation was sent to. + */ + if (iommu == prev) + continue; + + riscv_iommu_cmd_inval_vma(&cmd); + riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); + if (len && len < RISCV_IOMMU_IOTLB_INVAL_LIMIT) { + for (iova = start; iova < end; iova += PAGE_SIZE) { + riscv_iommu_cmd_inval_set_addr(&cmd, iova); + riscv_iommu_cmd_send(iommu, &cmd); + } + } else { + riscv_iommu_cmd_send(iommu, &cmd); + } + prev = iommu; + } + + prev = NULL; + list_for_each_entry_rcu(bond, &domain->bonds, list) { + iommu = dev_to_iommu(bond->dev); + if (iommu == prev) + continue; + + riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); + prev = iommu; + } + rcu_read_unlock(); +} + +#define RISCV_IOMMU_FSC_BARE 0 + +/* + * Update IODIR for the device. + * + * During the execution of riscv_iommu_probe_device(), IODIR entries are + * allocated for the device's identifiers. Device context invalidation + * becomes necessary only if one of the updated entries was previously + * marked as valid, given that invalid device context entries are not + * cached by the IOMMU hardware. + * In this implementation, updating a valid device context while the + * device is not quiesced might be disruptive, potentially causing + * interim translation faults. + */ +static void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu, + struct device *dev, u64 fsc, u64 ta) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct riscv_iommu_dc *dc; + struct riscv_iommu_command cmd; + bool sync_required = false; + u64 tc; + int i; + + for (i = 0; i < fwspec->num_ids; i++) { + dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); + tc = READ_ONCE(dc->tc); + if (!(tc & RISCV_IOMMU_DC_TC_V)) + continue; + + WRITE_ONCE(dc->tc, tc & ~RISCV_IOMMU_DC_TC_V); + + /* Invalidate device context cached values */ + riscv_iommu_cmd_iodir_inval_ddt(&cmd); + riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); + riscv_iommu_cmd_send(iommu, &cmd); + sync_required = true; + } + + if (sync_required) + riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); + + /* + * For device context with DC_TC_PDTV = 0, translation attributes valid bit + * is stored as DC_TC_V bit (both sharing the same location at BIT(0)). + */ + for (i = 0; i < fwspec->num_ids; i++) { + dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); + tc = READ_ONCE(dc->tc); + tc |= ta & RISCV_IOMMU_DC_TC_V; + + WRITE_ONCE(dc->fsc, fsc); + WRITE_ONCE(dc->ta, ta & RISCV_IOMMU_PC_TA_PSCID); + /* Update device context, write TC.V as the last step. */ + dma_wmb(); + WRITE_ONCE(dc->tc, tc); + + /* Invalidate device context after update */ + riscv_iommu_cmd_iodir_inval_ddt(&cmd); + riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); + riscv_iommu_cmd_send(iommu, &cmd); + } + + riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); +} + +/* + * IOVA page translation tree management. + */ + +static void riscv_iommu_iotlb_flush_all(struct iommu_domain *iommu_domain) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + + riscv_iommu_iotlb_inval(domain, 0, ULONG_MAX); +} + +static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain, + struct iommu_iotlb_gather *gather) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + + riscv_iommu_iotlb_inval(domain, gather->start, gather->end); +} + +#define PT_SHIFT (PAGE_SHIFT - ilog2(sizeof(pte_t))) + +#define _io_pte_present(pte) ((pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) +#define _io_pte_leaf(pte) ((pte) & _PAGE_LEAF) +#define _io_pte_none(pte) ((pte) == 0) +#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot)) + +static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, + unsigned long pte, struct list_head *freelist) +{ + unsigned long *ptr; + int i; + + if (!_io_pte_present(pte) || _io_pte_leaf(pte)) + return; + + ptr = (unsigned long *)pfn_to_virt(__page_val_to_pfn(pte)); + + /* Recursively free all sub page table pages */ + for (i = 0; i < PTRS_PER_PTE; i++) { + pte = READ_ONCE(ptr[i]); + if (!_io_pte_none(pte) && cmpxchg_relaxed(ptr + i, pte, 0) == pte) + riscv_iommu_pte_free(domain, pte, freelist); + } + + if (freelist) + list_add_tail(&virt_to_page(ptr)->lru, freelist); + else + iommu_free_page(ptr); +} + +static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain, + unsigned long iova, size_t pgsize, + gfp_t gfp) +{ + unsigned long *ptr = domain->pgd_root; + unsigned long pte, old; + int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; + void *addr; + + do { + const int shift = PAGE_SHIFT + PT_SHIFT * level; + + ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); + /* + * Note: returned entry might be a non-leaf if there was + * existing mapping with smaller granularity. Up to the caller + * to replace and invalidate. + */ + if (((size_t)1 << shift) == pgsize) + return ptr; +pte_retry: + pte = READ_ONCE(*ptr); + /* + * This is very likely incorrect as we should not be adding + * new mapping with smaller granularity on top + * of existing 2M/1G mapping. Fail. + */ + if (_io_pte_present(pte) && _io_pte_leaf(pte)) + return NULL; + /* + * Non-leaf entry is missing, allocate and try to add to the + * page table. This might race with other mappings, retry. + */ + if (_io_pte_none(pte)) { + addr = iommu_alloc_page_node(domain->numa_node, gfp); + if (!addr) + return NULL; + old = pte; + pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE); + if (cmpxchg_relaxed(ptr, old, pte) != old) { + iommu_free_page(addr); + goto pte_retry; + } + } + ptr = (unsigned long *)pfn_to_virt(__page_val_to_pfn(pte)); + } while (level-- > 0); + + return NULL; +} + +static unsigned long *riscv_iommu_pte_fetch(struct riscv_iommu_domain *domain, + unsigned long iova, size_t *pte_pgsize) +{ + unsigned long *ptr = domain->pgd_root; + unsigned long pte; + int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; + + do { + const int shift = PAGE_SHIFT + PT_SHIFT * level; + + ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); + pte = READ_ONCE(*ptr); + if (_io_pte_present(pte) && _io_pte_leaf(pte)) { + *pte_pgsize = (size_t)1 << shift; + return ptr; + } + if (_io_pte_none(pte)) + return NULL; + ptr = (unsigned long *)pfn_to_virt(__page_val_to_pfn(pte)); + } while (level-- > 0); + + return NULL; +} + +static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain, + unsigned long iova, phys_addr_t phys, + size_t pgsize, size_t pgcount, int prot, + gfp_t gfp, size_t *mapped) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + size_t size = 0; + unsigned long *ptr; + unsigned long pte, old, pte_prot; + int rc = 0; + LIST_HEAD(freelist); + + if (!(prot & IOMMU_WRITE)) + pte_prot = _PAGE_BASE | _PAGE_READ; + else if (domain->amo_enabled) + pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE; + else + pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY; + + while (pgcount) { + ptr = riscv_iommu_pte_alloc(domain, iova, pgsize, gfp); + if (!ptr) { + rc = -ENOMEM; + break; + } + + old = READ_ONCE(*ptr); + pte = _io_pte_entry(phys_to_pfn(phys), pte_prot); + if (cmpxchg_relaxed(ptr, old, pte) != old) + continue; + + riscv_iommu_pte_free(domain, old, &freelist); + + size += pgsize; + iova += pgsize; + phys += pgsize; + --pgcount; + } + + *mapped = size; + + if (!list_empty(&freelist)) { + /* + * In 1.0 spec version, the smallest scope we can use to + * invalidate all levels of page table (i.e. leaf and non-leaf) + * is an invalidate-all-PSCID IOTINVAL.VMA with AV=0. + * This will be updated with hardware support for + * capability.NL (non-leaf) IOTINVAL command. + */ + riscv_iommu_iotlb_inval(domain, 0, ULONG_MAX); + iommu_put_pages_list(&freelist); + } + + return rc; +} + +static size_t riscv_iommu_unmap_pages(struct iommu_domain *iommu_domain, + unsigned long iova, size_t pgsize, + size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + size_t size = pgcount << __ffs(pgsize); + unsigned long *ptr, old; + size_t unmapped = 0; + size_t pte_size; + + while (unmapped < size) { + ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); + if (!ptr) + return unmapped; + + /* partial unmap is not allowed, fail. */ + if (iova & (pte_size - 1)) + return unmapped; + + old = READ_ONCE(*ptr); + if (cmpxchg_relaxed(ptr, old, 0) != old) + continue; + + iommu_iotlb_gather_add_page(&domain->domain, gather, iova, + pte_size); + + iova += pte_size; + unmapped += pte_size; + } + + return unmapped; +} + +static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain, + dma_addr_t iova) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + unsigned long pte_size; + unsigned long *ptr; + + ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); + if (_io_pte_none(*ptr) || !_io_pte_present(*ptr)) + return 0; + + return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); +} + +static void riscv_iommu_free_paging_domain(struct iommu_domain *iommu_domain) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + const unsigned long pfn = virt_to_pfn(domain->pgd_root); + + WARN_ON(!list_empty(&domain->bonds)); + + if ((int)domain->pscid > 0) + ida_free(&riscv_iommu_pscids, domain->pscid); + + riscv_iommu_pte_free(domain, _io_pte_entry(pfn, _PAGE_TABLE), NULL); + kfree(domain); +} + +static bool riscv_iommu_pt_supported(struct riscv_iommu_device *iommu, int pgd_mode) +{ + switch (pgd_mode) { + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: + return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39; + + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: + return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48; + + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: + return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57; + } + return false; +} + +static int riscv_iommu_attach_paging_domain(struct iommu_domain *iommu_domain, + struct device *dev) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); + u64 fsc, ta; + + if (!riscv_iommu_pt_supported(iommu, domain->pgd_mode)) + return -ENODEV; + + fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | + FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)); + ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | + RISCV_IOMMU_PC_TA_V; + + if (riscv_iommu_bond_link(domain, dev)) + return -ENOMEM; + + riscv_iommu_iodir_update(iommu, dev, fsc, ta); + riscv_iommu_bond_unlink(info->domain, dev); + info->domain = domain; + + return 0; +} + +static const struct iommu_domain_ops riscv_iommu_paging_domain_ops = { + .attach_dev = riscv_iommu_attach_paging_domain, + .free = riscv_iommu_free_paging_domain, + .map_pages = riscv_iommu_map_pages, + .unmap_pages = riscv_iommu_unmap_pages, + .iova_to_phys = riscv_iommu_iova_to_phys, + .iotlb_sync = riscv_iommu_iotlb_sync, + .flush_iotlb_all = riscv_iommu_iotlb_flush_all, +}; + +static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) +{ + struct riscv_iommu_domain *domain; + struct riscv_iommu_device *iommu; + unsigned int pgd_mode; + dma_addr_t va_mask; + int va_bits; + + iommu = dev_to_iommu(dev); + if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57) { + pgd_mode = RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57; + va_bits = 57; + } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48) { + pgd_mode = RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48; + va_bits = 48; + } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39) { + pgd_mode = RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39; + va_bits = 39; + } else { + dev_err(dev, "cannot find supported page table mode\n"); + return ERR_PTR(-ENODEV); + } + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD_RCU(&domain->bonds); + spin_lock_init(&domain->lock); + domain->numa_node = dev_to_node(iommu->dev); + domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD); + domain->pgd_mode = pgd_mode; + domain->pgd_root = iommu_alloc_page_node(domain->numa_node, + GFP_KERNEL_ACCOUNT); + if (!domain->pgd_root) { + kfree(domain); + return ERR_PTR(-ENOMEM); + } + + domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, + RISCV_IOMMU_MAX_PSCID, GFP_KERNEL); + if (domain->pscid < 0) { + iommu_free_page(domain->pgd_root); + kfree(domain); + return ERR_PTR(-ENOMEM); + } + + /* + * Note: RISC-V Privilege spec mandates that virtual addresses + * need to be sign-extended, so if (VA_BITS - 1) is set, all + * bits >= VA_BITS need to also be set or else we'll get a + * page fault. However the code that creates the mappings + * above us (e.g. iommu_dma_alloc_iova()) won't do that for us + * for now, so we'll end up with invalid virtual addresses + * to map. As a workaround until we get this sorted out + * limit the available virtual addresses to VA_BITS - 1. + */ + va_mask = DMA_BIT_MASK(va_bits - 1); + + domain->domain.geometry.aperture_start = 0; + domain->domain.geometry.aperture_end = va_mask; + domain->domain.geometry.force_aperture = true; + domain->domain.pgsize_bitmap = va_mask & (SZ_4K | SZ_2M | SZ_1G | SZ_512G); + + domain->domain.ops = &riscv_iommu_paging_domain_ops; + + return &domain->domain; +} + +static int riscv_iommu_attach_blocking_domain(struct iommu_domain *iommu_domain, + struct device *dev) +{ + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); + + /* Make device context invalid, translation requests will fault w/ #258 */ + riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, 0); + riscv_iommu_bond_unlink(info->domain, dev); + info->domain = NULL; + + return 0; +} + +static struct iommu_domain riscv_iommu_blocking_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = riscv_iommu_attach_blocking_domain, + } +}; + +static int riscv_iommu_attach_identity_domain(struct iommu_domain *iommu_domain, + struct device *dev) +{ + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); + + riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, RISCV_IOMMU_PC_TA_V); + riscv_iommu_bond_unlink(info->domain, dev); + info->domain = NULL; + + return 0; +} + +static struct iommu_domain riscv_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = riscv_iommu_attach_identity_domain, + } +}; + +static struct iommu_group *riscv_iommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + return generic_device_group(dev); +} + +static int riscv_iommu_of_xlate(struct device *dev, const struct of_phandle_args *args) +{ + return iommu_fwspec_add_ids(dev, args->args, 1); +} + +static struct iommu_device *riscv_iommu_probe_device(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct riscv_iommu_device *iommu; + struct riscv_iommu_info *info; + struct riscv_iommu_dc *dc; + u64 tc; + int i; + + if (!fwspec || !fwspec->iommu_fwnode->dev || !fwspec->num_ids) + return ERR_PTR(-ENODEV); + + iommu = dev_get_drvdata(fwspec->iommu_fwnode->dev); + if (!iommu) + return ERR_PTR(-ENODEV); + + /* + * IOMMU hardware operating in fail-over BARE mode will provide + * identity translation for all connected devices anyway... + */ + if (iommu->ddt_mode <= RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) + return ERR_PTR(-ENODEV); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + /* + * Allocate and pre-configure device context entries in + * the device directory. Do not mark the context valid yet. + */ + tc = 0; + if (iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD) + tc |= RISCV_IOMMU_DC_TC_SADE; + for (i = 0; i < fwspec->num_ids; i++) { + dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); + if (!dc) { + kfree(info); + return ERR_PTR(-ENODEV); + } + if (READ_ONCE(dc->tc) & RISCV_IOMMU_DC_TC_V) + dev_warn(dev, "already attached to IOMMU device directory\n"); + WRITE_ONCE(dc->tc, tc); + } + + dev_iommu_priv_set(dev, info); + + return &iommu->iommu; +} + +static void riscv_iommu_release_device(struct device *dev) +{ + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); + + kfree_rcu_mightsleep(info); +} + +static const struct iommu_ops riscv_iommu_ops = { + .pgsize_bitmap = SZ_4K, + .of_xlate = riscv_iommu_of_xlate, + .identity_domain = &riscv_iommu_identity_domain, + .blocked_domain = &riscv_iommu_blocking_domain, + .release_domain = &riscv_iommu_blocking_domain, + .domain_alloc_paging = riscv_iommu_alloc_paging_domain, + .device_group = riscv_iommu_device_group, + .probe_device = riscv_iommu_probe_device, + .release_device = riscv_iommu_release_device, +}; + +static int riscv_iommu_init_check(struct riscv_iommu_device *iommu) +{ + u64 ddtp; + + /* + * Make sure the IOMMU is switched off or in pass-through mode during + * regular boot flow and disable translation when we boot into a kexec + * kernel and the previous kernel left them enabled. + */ + ddtp = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_DDTP); + if (ddtp & RISCV_IOMMU_DDTP_BUSY) + return -EBUSY; + + if (FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp) > + RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) { + if (!is_kdump_kernel()) + return -EBUSY; + riscv_iommu_disable(iommu); + } + + /* Configure accesses to in-memory data structures for CPU-native byte order. */ + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) != + !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) { + if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_END)) + return -EINVAL; + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, + iommu->fctl ^ RISCV_IOMMU_FCTL_BE); + iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) != + !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) + return -EINVAL; + } + + /* + * Distribute interrupt vectors, always use first vector for CIV. + * At least one interrupt is required. Read back and verify. + */ + if (!iommu->irqs_count) + return -EINVAL; + + iommu->icvec = FIELD_PREP(RISCV_IOMMU_ICVEC_FIV, 1 % iommu->irqs_count) | + FIELD_PREP(RISCV_IOMMU_ICVEC_PIV, 2 % iommu->irqs_count) | + FIELD_PREP(RISCV_IOMMU_ICVEC_PMIV, 3 % iommu->irqs_count); + riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_ICVEC, iommu->icvec); + iommu->icvec = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_ICVEC); + if (max(max(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec), + FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec)), + max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec), + FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count) + return -EINVAL; + + return 0; +} + +void riscv_iommu_remove(struct riscv_iommu_device *iommu) +{ + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF); + riscv_iommu_queue_disable(&iommu->cmdq); + riscv_iommu_queue_disable(&iommu->fltq); +} + +int riscv_iommu_init(struct riscv_iommu_device *iommu) +{ + int rc; + + RISCV_IOMMU_QUEUE_INIT(&iommu->cmdq, CQ); + RISCV_IOMMU_QUEUE_INIT(&iommu->fltq, FQ); + + rc = riscv_iommu_init_check(iommu); + if (rc) + return dev_err_probe(iommu->dev, rc, "unexpected device state\n"); + + rc = riscv_iommu_iodir_alloc(iommu); + if (rc) + return rc; + + rc = riscv_iommu_queue_alloc(iommu, &iommu->cmdq, + sizeof(struct riscv_iommu_command)); + if (rc) + return rc; + + rc = riscv_iommu_queue_alloc(iommu, &iommu->fltq, + sizeof(struct riscv_iommu_fq_record)); + if (rc) + return rc; + + rc = riscv_iommu_queue_enable(iommu, &iommu->cmdq, riscv_iommu_cmdq_process); + if (rc) + return rc; + + rc = riscv_iommu_queue_enable(iommu, &iommu->fltq, riscv_iommu_fltq_process); + if (rc) + goto err_queue_disable; + + rc = riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_MAX); + if (rc) + goto err_queue_disable; + + rc = iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "riscv-iommu@%s", + dev_name(iommu->dev)); + if (rc) { + dev_err_probe(iommu->dev, rc, "cannot register sysfs interface\n"); + goto err_iodir_off; + } + + rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev); + if (rc) { + dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n"); + goto err_remove_sysfs; + } + + return 0; + +err_remove_sysfs: + iommu_device_sysfs_remove(&iommu->iommu); +err_iodir_off: + riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF); +err_queue_disable: + riscv_iommu_queue_disable(&iommu->fltq); + riscv_iommu_queue_disable(&iommu->cmdq); + return rc; +} diff --git a/drivers/iommu/riscv/iommu.h b/drivers/iommu/riscv/iommu.h new file mode 100644 index 000000000000..b1c4664542b4 --- /dev/null +++ b/drivers/iommu/riscv/iommu.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2022-2024 Rivos Inc. + * Copyright © 2023 FORTH-ICS/CARV + * + * Authors + * Tomasz Jeznach <tjeznach@rivosinc.com> + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#ifndef _RISCV_IOMMU_H_ +#define _RISCV_IOMMU_H_ + +#include <linux/iommu.h> +#include <linux/types.h> +#include <linux/iopoll.h> + +#include "iommu-bits.h" + +struct riscv_iommu_device; + +struct riscv_iommu_queue { + atomic_t prod; /* unbounded producer allocation index */ + atomic_t head; /* unbounded shadow ring buffer consumer index */ + atomic_t tail; /* unbounded shadow ring buffer producer index */ + unsigned int mask; /* index mask, queue length - 1 */ + unsigned int irq; /* allocated interrupt number */ + struct riscv_iommu_device *iommu; /* iommu device handling the queue when active */ + void *base; /* ring buffer kernel pointer */ + dma_addr_t phys; /* ring buffer physical address */ + u16 qbr; /* base register offset, head and tail reference */ + u16 qcr; /* control and status register offset */ + u8 qid; /* queue identifier, same as RISCV_IOMMU_INTR_XX */ +}; + +struct riscv_iommu_device { + /* iommu core interface */ + struct iommu_device iommu; + + /* iommu hardware */ + struct device *dev; + + /* hardware control register space */ + void __iomem *reg; + + /* supported and enabled hardware capabilities */ + u64 caps; + u32 fctl; + + /* available interrupt numbers, MSI or WSI */ + unsigned int irqs[RISCV_IOMMU_INTR_COUNT]; + unsigned int irqs_count; + unsigned int icvec; + + /* hardware queues */ + struct riscv_iommu_queue cmdq; + struct riscv_iommu_queue fltq; + + /* device directory */ + unsigned int ddt_mode; + dma_addr_t ddt_phys; + u64 *ddt_root; +}; + +int riscv_iommu_init(struct riscv_iommu_device *iommu); +void riscv_iommu_remove(struct riscv_iommu_device *iommu); + +#define riscv_iommu_readl(iommu, addr) \ + readl_relaxed((iommu)->reg + (addr)) + +#define riscv_iommu_readq(iommu, addr) \ + readq_relaxed((iommu)->reg + (addr)) + +#define riscv_iommu_writel(iommu, addr, val) \ + writel_relaxed((val), (iommu)->reg + (addr)) + +#define riscv_iommu_writeq(iommu, addr, val) \ + writeq_relaxed((val), (iommu)->reg + (addr)) + +#define riscv_iommu_readq_timeout(iommu, addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readq_relaxed, (iommu)->reg + (addr), val, cond, \ + delay_us, timeout_us) + +#define riscv_iommu_readl_timeout(iommu, addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readl_relaxed, (iommu)->reg + (addr), val, cond, \ + delay_us, timeout_us) + +#endif diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index d8eaa7ea380b..fbdeded3d48b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -33,6 +33,8 @@ struct s390_domain { struct rcu_head rcu; }; +static struct iommu_domain blocking_domain; + static inline unsigned int calc_rtx(dma_addr_t ptr) { return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; @@ -369,20 +371,36 @@ static void s390_domain_free(struct iommu_domain *domain) call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain); } -static void s390_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) +static void zdev_s390_domain_update(struct zpci_dev *zdev, + struct iommu_domain *domain) +{ + unsigned long flags; + + spin_lock_irqsave(&zdev->dom_lock, flags); + zdev->s390_domain = domain; + spin_unlock_irqrestore(&zdev->dom_lock, flags); +} + +static int blocking_domain_attach_device(struct iommu_domain *domain, + struct device *dev) { - struct s390_domain *s390_domain = to_s390_domain(domain); struct zpci_dev *zdev = to_zpci_dev(dev); + struct s390_domain *s390_domain; unsigned long flags; + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) + return 0; + + s390_domain = to_s390_domain(zdev->s390_domain); spin_lock_irqsave(&s390_domain->list_lock, flags); list_del_rcu(&zdev->iommu_list); spin_unlock_irqrestore(&s390_domain->list_lock, flags); zpci_unregister_ioat(zdev, 0); - zdev->s390_domain = NULL; zdev->dma_table = NULL; + zdev_s390_domain_update(zdev, domain); + + return 0; } static int s390_iommu_attach_device(struct iommu_domain *domain, @@ -401,20 +419,15 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, domain->geometry.aperture_end < zdev->start_dma)) return -EINVAL; - if (zdev->s390_domain) - s390_iommu_detach_device(&zdev->s390_domain->domain, dev); + blocking_domain_attach_device(&blocking_domain, dev); + /* If we fail now DMA remains blocked via blocking domain */ cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, virt_to_phys(s390_domain->dma_table), &status); - /* - * If the device is undergoing error recovery the reset code - * will re-establish the new domain. - */ if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) return -EIO; - zdev->dma_table = s390_domain->dma_table; - zdev->s390_domain = s390_domain; + zdev_s390_domain_update(zdev, domain); spin_lock_irqsave(&s390_domain->list_lock, flags); list_add_rcu(&zdev->iommu_list, &s390_domain->devices); @@ -466,19 +479,11 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev) if (zdev->tlb_refresh) dev->iommu->shadow_on_flush = 1; - return &zdev->iommu_dev; -} + /* Start with DMA blocked */ + spin_lock_init(&zdev->dom_lock); + zdev_s390_domain_update(zdev, &blocking_domain); -static void s390_iommu_release_device(struct device *dev) -{ - struct zpci_dev *zdev = to_zpci_dev(dev); - - /* - * release_device is expected to detach any domain currently attached - * to the device, but keep it attached to other devices in the group. - */ - if (zdev) - s390_iommu_detach_device(&zdev->s390_domain->domain, dev); + return &zdev->iommu_dev; } static int zpci_refresh_all(struct zpci_dev *zdev) @@ -697,9 +702,15 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) { - if (!zdev || !zdev->s390_domain) + struct s390_domain *s390_domain; + + lockdep_assert_held(&zdev->dom_lock); + + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) return NULL; - return &zdev->s390_domain->ctrs; + + s390_domain = to_s390_domain(zdev->s390_domain); + return &s390_domain->ctrs; } int zpci_init_iommu(struct zpci_dev *zdev) @@ -776,11 +787,19 @@ static int __init s390_iommu_init(void) } subsys_initcall(s390_iommu_init); +static struct iommu_domain blocking_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocking_domain_attach_device, + } +}; + static const struct iommu_ops s390_iommu_ops = { + .blocked_domain = &blocking_domain, + .release_domain = &blocking_domain, .capable = s390_iommu_capable, .domain_alloc_paging = s390_domain_alloc_paging, .probe_device = s390_iommu_probe_device, - .release_device = s390_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = SZ_4K, .get_resv_regions = s390_iommu_get_resv_regions, diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index e24db71a8783..bb79e19dfb59 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -14,6 +14,7 @@ #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/kernel.h> +#include <linux/kvm_para.h> #include <linux/syscore_ops.h> #include <asm/numa.h> @@ -26,15 +27,37 @@ #define EIOINTC_REG_ISR 0x1800 #define EIOINTC_REG_ROUTE 0x1c00 +#define EXTIOI_VIRT_FEATURES 0x40000000 +#define EXTIOI_HAS_VIRT_EXTENSION BIT(0) +#define EXTIOI_HAS_ENABLE_OPTION BIT(1) +#define EXTIOI_HAS_INT_ENCODE BIT(2) +#define EXTIOI_HAS_CPU_ENCODE BIT(3) +#define EXTIOI_VIRT_CONFIG 0x40000004 +#define EXTIOI_ENABLE BIT(1) +#define EXTIOI_ENABLE_INT_ENCODE BIT(2) +#define EXTIOI_ENABLE_CPU_ENCODE BIT(3) + #define VEC_REG_COUNT 4 #define VEC_COUNT_PER_REG 64 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) #define EIOINTC_ALL_ENABLE 0xffffffff +#define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f)) +#define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2)) +#define EIOINTC_USE_CPU_ENCODE BIT(0) #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) +/* + * Routing registers are 32bit, and there is 8-bit route setting for every + * interrupt vector. So one Route register contains four vectors routing + * information. + */ +#define EIOINTC_REG_ROUTE_VEC(vector) (EIOINTC_REG_ROUTE + (vector & ~0x03)) +#define EIOINTC_REG_ROUTE_VEC_SHIFT(vector) ((vector & 0x03) << 3) +#define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector)) + static int nr_pics; struct eiointc_priv { @@ -44,6 +67,7 @@ struct eiointc_priv { cpumask_t cpuspan_map; struct fwnode_handle *domain_handle; struct irq_domain *eiointc_domain; + int flags; }; static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; @@ -59,7 +83,10 @@ static void eiointc_enable(void) static int cpu_to_eio_node(int cpu) { - return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + if (!kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) + return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + else + return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE; } #ifdef CONFIG_SMP @@ -89,6 +116,17 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, } } +static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu) +{ + unsigned long reg = EIOINTC_REG_ROUTE_VEC(vector); + unsigned int data; + + data = iocsr_read32(reg); + data &= ~EIOINTC_REG_ROUTE_VEC_MASK(vector); + data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector); + iocsr_write32(data, reg); +} + static DEFINE_RAW_SPINLOCK(affinity_lock); static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) @@ -107,18 +145,24 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af } vector = d->hwirq; - regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - - /* Mask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), - 0x0, priv->node * CORES_PER_EIO_NODE); - - /* Set route for target vector */ - eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); - - /* Unmask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE, - 0x0, priv->node * CORES_PER_EIO_NODE); + regaddr = EIOINTC_REG_ENABLE_VEC(vector); + + if (priv->flags & EIOINTC_USE_CPU_ENCODE) { + iocsr_write32(EIOINTC_ALL_ENABLE_VEC_MASK(vector), regaddr); + veiointc_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE_VEC_MASK(vector), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -142,17 +186,23 @@ static int eiointc_index(int node) static int eiointc_router_init(unsigned int cpu) { - int i, bit; - uint32_t data; - uint32_t node = cpu_to_eio_node(cpu); - int index = eiointc_index(node); + int i, bit, cores, index, node; + unsigned int data; + + node = cpu_to_eio_node(cpu); + index = eiointc_index(node); if (index < 0) { pr_err("Error: invalid nodemap!\n"); - return -1; + return -EINVAL; } - if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)) + cores = CORES_PER_EIO_NODE; + else + cores = CORES_PER_VEIO_NODE; + + if ((cpu_logical_map(cpu) % cores) == 0) { eiointc_enable(); for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { @@ -168,7 +218,9 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ - if (index == 0) + if (eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE) + bit = cpu_logical_map(0); + else if (index == 0) bit = BIT(cpu_logical_map(0)); else bit = (eiointc_priv[index]->node << 4) | 1; @@ -375,7 +427,7 @@ static int __init acpi_cascade_irqdomain_init(void) static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, u64 node_map) { - int i; + int i, val; node_map = node_map ? node_map : -1ULL; for_each_possible_cpu(i) { @@ -395,6 +447,20 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, return -ENOMEM; } + if (kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) { + val = iocsr_read32(EXTIOI_VIRT_FEATURES); + /* + * With EXTIOI_ENABLE_CPU_ENCODE set + * interrupts can route to 256 vCPUs. + */ + if (val & EXTIOI_HAS_CPU_ENCODE) { + val = iocsr_read32(EXTIOI_VIRT_CONFIG); + val |= EXTIOI_ENABLE_CPU_ENCODE; + iocsr_write32(val, EXTIOI_VIRT_CONFIG); + priv->flags = EIOINTC_USE_CPU_ENCODE; + } + } + eiointc_priv[nr_pics++] = priv; eiointc_router_init(0); irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); diff --git a/drivers/leds/blink/leds-bcm63138.c b/drivers/leds/blink/leds-bcm63138.c index 3a5e0b98bfbc..ef2e511438cc 100644 --- a/drivers/leds/blink/leds-bcm63138.c +++ b/drivers/leds/blink/leds-bcm63138.c @@ -2,6 +2,8 @@ /* * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl> */ +#include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/leds.h> @@ -19,8 +21,10 @@ #define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */ #define BCM63138_GLB_CTRL 0x00 -#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002 -#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008 +#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL BIT(1) +#define BCM63138_GLB_CTRL_SERIAL_LED_CLK_POL BIT(2) +#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL BIT(3) +#define BCM63138_GLB_CTRL_SERIAL_LED_MSB_FIRST BIT(4) #define BCM63138_MASK 0x04 #define BCM63138_HW_LED_EN 0x08 #define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c @@ -33,6 +37,7 @@ #define BCM63138_BRIGHT_CTRL3 0x28 #define BCM63138_BRIGHT_CTRL4 0x2c #define BCM63138_POWER_LED_CFG 0x30 +#define BCM63138_POWER_LUT_BASE0 0x34 /* -> b0 */ #define BCM63138_HW_POLARITY 0xb4 #define BCM63138_SW_DATA 0xb8 #define BCM63138_SW_POLARITY 0xbc @@ -125,17 +130,14 @@ static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev, { struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev); struct bcm63138_leds *leds = led->leds; - unsigned long flags; - spin_lock_irqsave(&leds->lock, flags); + guard(spinlock_irqsave)(&leds->lock); bcm63138_leds_enable_led(leds, led, value); if (!value) bcm63138_leds_set_flash_rate(leds, led, 0); else bcm63138_leds_set_bright(leds, led, value); - - spin_unlock_irqrestore(&leds->lock, flags); } static int bcm63138_leds_blink_set(struct led_classdev *led_cdev, @@ -144,7 +146,6 @@ static int bcm63138_leds_blink_set(struct led_classdev *led_cdev, { struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev); struct bcm63138_leds *leds = led->leds; - unsigned long flags; u8 value; if (!*delay_on && !*delay_off) { @@ -179,13 +180,11 @@ static int bcm63138_leds_blink_set(struct led_classdev *led_cdev, return -EINVAL; } - spin_lock_irqsave(&leds->lock, flags); + guard(spinlock_irqsave)(&leds->lock); bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS); bcm63138_leds_set_flash_rate(leds, led, value); - spin_unlock_irqrestore(&leds->lock, flags); - return 0; } @@ -259,7 +258,7 @@ static int bcm63138_leds_probe(struct platform_device *pdev) struct device_node *np = dev_of_node(&pdev->dev); struct device *dev = &pdev->dev; struct bcm63138_leds *leds; - struct device_node *child; + u32 shift_bits; leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL); if (!leds) @@ -273,6 +272,12 @@ static int bcm63138_leds_probe(struct platform_device *pdev) spin_lock_init(&leds->lock); + /* If this property is not present, we use boot defaults */ + if (!of_property_read_u32(np, "brcm,serial-shift-bits", &shift_bits)) { + bcm63138_leds_write(leds, BCM63138_SERIAL_LED_SHIFT_SEL, + GENMASK(shift_bits - 1, 0)); + } + bcm63138_leds_write(leds, BCM63138_GLB_CTRL, BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL | BCM63138_GLB_CTRL_SERIAL_LED_EN_POL); @@ -280,7 +285,7 @@ static int bcm63138_leds_probe(struct platform_device *pdev) bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0); bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0); - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { bcm63138_leds_create_led(leds, child); } diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c index 7b04ea146260..effaaaf302b5 100644 --- a/drivers/leds/blink/leds-lgm-sso.c +++ b/drivers/leds/blink/leds-lgm-sso.c @@ -861,7 +861,7 @@ MODULE_DEVICE_TABLE(of, of_sso_led_match); static struct platform_driver intel_sso_led_driver = { .probe = intel_sso_led_probe, - .remove_new = intel_sso_led_remove, + .remove = intel_sso_led_remove, .driver = { .name = "lgm-ssoled", .of_match_table = of_sso_led_match, diff --git a/drivers/leds/flash/leds-aat1290.c b/drivers/leds/flash/leds-aat1290.c index c7b6a1f01288..49251cfd3350 100644 --- a/drivers/leds/flash/leds-aat1290.c +++ b/drivers/leds/flash/leds-aat1290.c @@ -536,7 +536,7 @@ MODULE_DEVICE_TABLE(of, aat1290_led_dt_match); static struct platform_driver aat1290_led_driver = { .probe = aat1290_led_probe, - .remove_new = aat1290_led_remove, + .remove = aat1290_led_remove, .driver = { .name = "aat1290", .of_match_table = aat1290_led_dt_match, diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c index 16a01a200c0b..743830a10f99 100644 --- a/drivers/leds/flash/leds-ktd2692.c +++ b/drivers/leds/flash/leds-ktd2692.c @@ -292,6 +292,7 @@ static int ktd2692_probe(struct platform_device *pdev) fled_cdev = &led->fled_cdev; led_cdev = &fled_cdev->led_cdev; + led->props.timing = ktd2692_timing; ret = ktd2692_parse_dt(led, &pdev->dev, &led_cfg); if (ret) @@ -343,7 +344,7 @@ static struct platform_driver ktd2692_driver = { .of_match_table = ktd2692_match, }, .probe = ktd2692_probe, - .remove_new = ktd2692_remove, + .remove = ktd2692_remove, }; module_platform_driver(ktd2692_driver); diff --git a/drivers/leds/flash/leds-max77693.c b/drivers/leds/flash/leds-max77693.c index 90d78b3d22f8..daee10986108 100644 --- a/drivers/leds/flash/leds-max77693.c +++ b/drivers/leds/flash/leds-max77693.c @@ -1042,7 +1042,7 @@ MODULE_DEVICE_TABLE(of, max77693_led_dt_match); static struct platform_driver max77693_led_driver = { .probe = max77693_led_probe, - .remove_new = max77693_led_remove, + .remove = max77693_led_remove, .driver = { .name = "max77693-led", .of_match_table = max77693_led_dt_match, diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c index 4c74f1cf01f0..462a902f54e0 100644 --- a/drivers/leds/flash/leds-mt6360.c +++ b/drivers/leds/flash/leds-mt6360.c @@ -784,7 +784,6 @@ static void mt6360_v4l2_flash_release(struct mt6360_priv *priv) static int mt6360_led_probe(struct platform_device *pdev) { struct mt6360_priv *priv; - struct fwnode_handle *child; size_t count; int i = 0, ret; @@ -811,7 +810,7 @@ static int mt6360_led_probe(struct platform_device *pdev) return -ENODEV; } - device_for_each_child_node(&pdev->dev, child) { + device_for_each_child_node_scoped(&pdev->dev, child) { struct mt6360_led *led = priv->leds + i; struct led_init_data init_data = { .fwnode = child, }; u32 reg, led_color; @@ -887,7 +886,7 @@ static struct platform_driver mt6360_led_driver = { .of_match_table = mt6360_led_of_id, }, .probe = mt6360_led_probe, - .remove_new = mt6360_led_remove, + .remove = mt6360_led_remove, }; module_platform_driver(mt6360_led_driver); diff --git a/drivers/leds/flash/leds-mt6370-flash.c b/drivers/leds/flash/leds-mt6370-flash.c index 912d9d622320..dbdbe92309db 100644 --- a/drivers/leds/flash/leds-mt6370-flash.c +++ b/drivers/leds/flash/leds-mt6370-flash.c @@ -509,7 +509,6 @@ static int mt6370_led_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mt6370_priv *priv; - struct fwnode_handle *child; size_t count; int i = 0, ret; @@ -529,22 +528,18 @@ static int mt6370_led_probe(struct platform_device *pdev) if (!priv->regmap) return dev_err_probe(dev, -ENODEV, "Failed to get parent regmap\n"); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct mt6370_led *led = priv->leds + i; led->priv = priv; ret = mt6370_init_flash_properties(dev, led, child); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - } ret = mt6370_led_register(dev, led, child); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - } i++; } diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c index 41ce034f700e..b4c19be51c4d 100644 --- a/drivers/leds/flash/leds-qcom-flash.c +++ b/drivers/leds/flash/leds-qcom-flash.c @@ -812,7 +812,6 @@ static int qcom_flash_led_probe(struct platform_device *pdev) { struct qcom_flash_data *flash_data; struct qcom_flash_led *led; - struct fwnode_handle *child; struct device *dev = &pdev->dev; struct regmap *regmap; struct reg_field *regs; @@ -896,7 +895,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev) if (!flash_data->v4l2_flash) return -ENOMEM; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); if (!led) { rc = -ENOMEM; @@ -914,7 +913,6 @@ static int qcom_flash_led_probe(struct platform_device *pdev) return 0; release: - fwnode_handle_put(child); while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count) v4l2_flash_release(flash_data->v4l2_flash[flash_data->leds_count--]); return rc; @@ -942,7 +940,7 @@ static struct platform_driver qcom_flash_led_driver = { .of_match_table = qcom_flash_led_match_table, }, .probe = qcom_flash_led_probe, - .remove_new = qcom_flash_led_remove, + .remove = qcom_flash_led_remove, }; module_platform_driver(qcom_flash_led_driver); diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c index eef426924eaf..6b051f182b72 100644 --- a/drivers/leds/flash/leds-rt8515.c +++ b/drivers/leds/flash/leds-rt8515.c @@ -388,7 +388,7 @@ static struct platform_driver rt8515_driver = { .of_match_table = rt8515_match, }, .probe = rt8515_probe, - .remove_new = rt8515_remove, + .remove = rt8515_remove, }; module_platform_driver(rt8515_driver); diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c index db0ac6641954..3c01739c0b46 100644 --- a/drivers/leds/flash/leds-sgm3140.c +++ b/drivers/leds/flash/leds-sgm3140.c @@ -300,7 +300,7 @@ MODULE_DEVICE_TABLE(of, sgm3140_dt_match); static struct platform_driver sgm3140_driver = { .probe = sgm3140_probe, - .remove_new = sgm3140_remove, + .remove = sgm3140_remove, .driver = { .name = "sgm3140", .of_match_table = sgm3140_dt_match, diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c index 6fe9d700dfef..f4e26ce84862 100644 --- a/drivers/leds/led-class-flash.c +++ b/drivers/leds/led-class-flash.c @@ -12,7 +12,6 @@ #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> -#include "leds.h" #define has_flash_op(fled_cdev, op) \ (fled_cdev && fled_cdev->ops->op) diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c index 30c1ecb5f361..b2a87c994816 100644 --- a/drivers/leds/led-class-multicolor.c +++ b/drivers/leds/led-class-multicolor.c @@ -11,8 +11,6 @@ #include <linux/slab.h> #include <linux/uaccess.h> -#include "leds.h" - int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, enum led_brightness brightness) { diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 06b97fd49ad9..2a04ac61574d 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -25,15 +25,20 @@ static DEFINE_MUTEX(leds_lookup_lock); static LIST_HEAD(leds_lookup_list); +static struct workqueue_struct *leds_wq; + static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); + unsigned int brightness; - /* no lock needed for this */ + mutex_lock(&led_cdev->led_access); led_update_brightness(led_cdev); + brightness = led_cdev->brightness; + mutex_unlock(&led_cdev->led_access); - return sprintf(buf, "%u\n", led_cdev->brightness); + return sprintf(buf, "%u\n", brightness); } static ssize_t brightness_store(struct device *dev, @@ -57,7 +62,6 @@ static ssize_t brightness_store(struct device *dev, if (state == LED_OFF) led_trigger_remove(led_cdev); led_set_brightness(led_cdev, state); - flush_work(&led_cdev->set_brightness_work); ret = size; unlock: @@ -70,8 +74,13 @@ static ssize_t max_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); + unsigned int max_brightness; - return sprintf(buf, "%u\n", led_cdev->max_brightness); + mutex_lock(&led_cdev->led_access); + max_brightness = led_cdev->max_brightness; + mutex_unlock(&led_cdev->led_access); + + return sprintf(buf, "%u\n", max_brightness); } static DEVICE_ATTR_RO(max_brightness); @@ -549,6 +558,8 @@ int led_classdev_register_ext(struct device *parent, led_update_brightness(led_cdev); + led_cdev->wq = leds_wq; + led_init_core(led_cdev); #ifdef CONFIG_LEDS_TRIGGERS @@ -667,12 +678,19 @@ EXPORT_SYMBOL_GPL(devm_led_classdev_unregister); static int __init leds_init(void) { + leds_wq = alloc_ordered_workqueue("leds", 0); + if (!leds_wq) { + pr_err("Failed to create LEDs ordered workqueue\n"); + return -ENOMEM; + } + return class_register(&leds_class); } static void __exit leds_exit(void) { class_unregister(&leds_class); + destroy_workqueue(leds_wq); } subsys_initcall(leds_init); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 001c290bc07b..f6c46d2e5276 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -273,7 +273,7 @@ void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on led_cdev->delayed_delay_on = delay_on; led_cdev->delayed_delay_off = delay_off; set_bit(LED_SET_BLINK, &led_cdev->work_flags); - schedule_work(&led_cdev->set_brightness_work); + queue_work(led_cdev->wq, &led_cdev->set_brightness_work); return; } @@ -304,7 +304,7 @@ void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness) */ if (!brightness) { set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags); - schedule_work(&led_cdev->set_brightness_work); + queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } else { set_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags); @@ -340,7 +340,7 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } - schedule_work(&led_cdev->set_brightness_work); + queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_set_brightness_nopm); diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 81238376484b..ef5c6c4667ab 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -226,7 +226,7 @@ static struct platform_driver pm860x_led_driver = { .name = "88pm860x-led", }, .probe = pm860x_led_probe, - .remove_new = pm860x_led_remove, + .remove = pm860x_led_remove, }; module_platform_driver(pm860x_led_driver); diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c index d89a4dca50ae..13e5bc80e56e 100644 --- a/drivers/leds/leds-adp5520.c +++ b/drivers/leds/leds-adp5520.c @@ -184,7 +184,7 @@ static struct platform_driver adp5520_led_driver = { .name = "adp5520-led", }, .probe = adp5520_led_probe, - .remove_new = adp5520_led_remove, + .remove = adp5520_led_remove, }; module_platform_driver(adp5520_led_driver); diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c index f9d9844e0273..08cca128458c 100644 --- a/drivers/leds/leds-aw200xx.c +++ b/drivers/leds/leds-aw200xx.c @@ -409,7 +409,6 @@ static int aw200xx_probe_get_display_rows(struct device *dev, static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip) { - struct fwnode_handle *child; u32 current_min, current_max, min_uA; int ret; int i; @@ -424,7 +423,7 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip) min_uA = UINT_MAX; i = 0; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct led_init_data init_data = {}; struct aw200xx_led *led; u32 source, imax; @@ -468,10 +467,8 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip) ret = devm_led_classdev_register_ext(dev, &led->cdev, &init_data); - if (ret) { - fwnode_handle_put(child); + if (ret) break; - } i++; } diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 29f5bad61796..592bbf4b7e35 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -113,7 +113,7 @@ static void bcm6328_led_mode(struct bcm6328_led *led, unsigned long value) unsigned long val, shift; shift = bcm6328_pin2shift(led->pin); - if (shift / 16) + if (shift >= 16) mode = led->mem + BCM6328_REG_MODE_HI; else mode = led->mem + BCM6328_REG_MODE_LO; @@ -357,7 +357,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, break; case LEDS_DEFSTATE_KEEP: shift = bcm6328_pin2shift(led->pin); - if (shift / 16) + if (shift >= 16) mode = mem + BCM6328_REG_MODE_HI; else mode = mem + BCM6328_REG_MODE_LO; diff --git a/drivers/leds/leds-cht-wcove.c b/drivers/leds/leds-cht-wcove.c index b4998402b8c6..8246f048edcb 100644 --- a/drivers/leds/leds-cht-wcove.c +++ b/drivers/leds/leds-cht-wcove.c @@ -461,7 +461,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(cht_wc_leds_pm, cht_wc_leds_suspend, cht_wc_leds static struct platform_driver cht_wc_leds_driver = { .probe = cht_wc_leds_probe, - .remove_new = cht_wc_leds_remove, + .remove = cht_wc_leds_remove, .shutdown = cht_wc_leds_disable, .driver = { .name = "cht_wcove_leds", diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 82da0fe688ad..f00b16ac1586 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -165,7 +165,7 @@ static void clevo_mail_led_remove(struct platform_device *pdev) } static struct platform_driver clevo_mail_led_driver = { - .remove_new = clevo_mail_led_remove, + .remove = clevo_mail_led_remove, .driver = { .name = KBUILD_MODNAME, }, diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c index c9914fc51f20..7e51c374edd4 100644 --- a/drivers/leds/leds-cr0014114.c +++ b/drivers/leds/leds-cr0014114.c @@ -181,11 +181,10 @@ static int cr0014114_probe_dt(struct cr0014114 *priv) { size_t i = 0; struct cr0014114_led *led; - struct fwnode_handle *child; struct led_init_data init_data = {}; int ret; - device_for_each_child_node(priv->dev, child) { + device_for_each_child_node_scoped(priv->dev, child) { led = &priv->leds[i]; led->priv = priv; @@ -201,7 +200,6 @@ static int cr0014114_probe_dt(struct cr0014114 *priv) if (ret) { dev_err(priv->dev, "failed to register LED device, err %d", ret); - fwnode_handle_put(child); return ret; } diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c index f067a5f4d3c4..71209b3c8f1e 100644 --- a/drivers/leds/leds-da903x.c +++ b/drivers/leds/leds-da903x.c @@ -133,7 +133,7 @@ static struct platform_driver da903x_led_driver = { .name = "da903x-led", }, .probe = da903x_led_probe, - .remove_new = da903x_led_remove, + .remove = da903x_led_remove, }; module_platform_driver(da903x_led_driver); diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c index 64679d62076b..7c9051184a59 100644 --- a/drivers/leds/leds-da9052.c +++ b/drivers/leds/leds-da9052.c @@ -179,7 +179,7 @@ static struct platform_driver da9052_led_driver = { .name = "da9052-leds", }, .probe = da9052_led_probe, - .remove_new = da9052_led_remove, + .remove = da9052_led_remove, }; module_platform_driver(da9052_led_driver); diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c index d40194a3029f..e26d1654bd0d 100644 --- a/drivers/leds/leds-el15203000.c +++ b/drivers/leds/leds-el15203000.c @@ -237,22 +237,20 @@ static int el15203000_pattern_clear(struct led_classdev *ldev) static int el15203000_probe_dt(struct el15203000 *priv) { struct el15203000_led *led = priv->leds; - struct fwnode_handle *child; int ret; - device_for_each_child_node(priv->dev, child) { + device_for_each_child_node_scoped(priv->dev, child) { struct led_init_data init_data = {}; ret = fwnode_property_read_u32(child, "reg", &led->reg); if (ret) { dev_err(priv->dev, "LED without ID number"); - goto err_child_out; + return ret; } if (led->reg > U8_MAX) { dev_err(priv->dev, "LED value %d is invalid", led->reg); - ret = -EINVAL; - goto err_child_out; + return -EINVAL; } led->priv = priv; @@ -274,17 +272,13 @@ static int el15203000_probe_dt(struct el15203000 *priv) dev_err(priv->dev, "failed to register LED device %s, err %d", led->ldev.name, ret); - goto err_child_out; + return ret; } led++; } return 0; - -err_child_out: - fwnode_handle_put(child); - return ret; } static int el15203000_probe(struct spi_device *spi) diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c index de3f12c2b80d..ccc01fa72e6f 100644 --- a/drivers/leds/leds-gpio-register.c +++ b/drivers/leds/leds-gpio-register.c @@ -10,8 +10,8 @@ /** * gpio_led_register_device - register a gpio-led device - * @pdata: the platform data used for the new device * @id: platform ID + * @pdata: the platform data used for the new device * * Makes a copy of pdata and pdata->leds and registers a new leds-gpio device * with the result. This allows to have pdata and pdata-leds in .init.rodata diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index cf5038cde5b8..a3428b22de3a 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -21,8 +21,6 @@ #include <linux/slab.h> #include <linux/types.h> -#include "leds.h" - struct gpio_led_data { struct led_classdev cdev; struct gpio_desc *gpiod; @@ -148,7 +146,6 @@ struct gpio_leds_priv { static struct gpio_leds_priv *gpio_leds_create(struct device *dev) { - struct fwnode_handle *child; struct gpio_leds_priv *priv; int count, used, ret; @@ -162,7 +159,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) priv->num_leds = count; used = 0; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct gpio_led_data *led_dat = &priv->leds[used]; struct gpio_led led = {}; @@ -176,7 +173,6 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) if (IS_ERR(led.gpiod)) { dev_err_probe(dev, PTR_ERR(led.gpiod), "Failed to get GPIO '%pfw'\n", child); - fwnode_handle_put(child); return ERR_CAST(led.gpiod); } @@ -192,10 +188,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) led.panic_indicator = 1; ret = create_gpio_led(&led, led_dat, dev, child, NULL); - if (ret < 0) { - fwnode_handle_put(child); + if (ret < 0) return ERR_PTR(ret); - } + /* Set gpiod label to match the corresponding LED name. */ gpiod_set_consumer_name(led_dat->gpiod, led_dat->cdev.dev->kobj.name); diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c index 54b5650877f7..24dc8ad27bb3 100644 --- a/drivers/leds/leds-lm3532.c +++ b/drivers/leds/leds-lm3532.c @@ -551,7 +551,6 @@ static void gpio_set_low_action(void *data) static int lm3532_parse_node(struct lm3532_data *priv) { - struct fwnode_handle *child = NULL; struct lm3532_led *led; int control_bank; u32 ramp_time; @@ -587,7 +586,7 @@ static int lm3532_parse_node(struct lm3532_data *priv) else priv->runtime_ramp_down = lm3532_get_ramp_index(ramp_time); - device_for_each_child_node(priv->dev, child) { + device_for_each_child_node_scoped(priv->dev, child) { struct led_init_data idata = { .fwnode = child, .default_label = ":", @@ -599,7 +598,7 @@ static int lm3532_parse_node(struct lm3532_data *priv) ret = fwnode_property_read_u32(child, "reg", &control_bank); if (ret) { dev_err(&priv->client->dev, "reg property missing\n"); - goto child_out; + return ret; } if (control_bank > LM3532_CONTROL_C) { @@ -613,7 +612,7 @@ static int lm3532_parse_node(struct lm3532_data *priv) &led->mode); if (ret) { dev_err(&priv->client->dev, "ti,led-mode property missing\n"); - goto child_out; + return ret; } if (fwnode_property_present(child, "led-max-microamp") && @@ -647,7 +646,7 @@ static int lm3532_parse_node(struct lm3532_data *priv) led->num_leds); if (ret) { dev_err(&priv->client->dev, "led-sources property missing\n"); - goto child_out; + return ret; } led->priv = priv; @@ -657,23 +656,20 @@ static int lm3532_parse_node(struct lm3532_data *priv) if (ret) { dev_err(&priv->client->dev, "led register err: %d\n", ret); - goto child_out; + return ret; } ret = lm3532_init_registers(led); if (ret) { dev_err(&priv->client->dev, "register init err: %d\n", ret); - goto child_out; + return ret; } i++; } - return 0; -child_out: - fwnode_handle_put(child); - return ret; + return 0; } static int lm3532_probe(struct i2c_client *client) diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index a3d33165d262..45795f2a1042 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -744,7 +744,7 @@ static struct platform_driver lm3533_led_driver = { .name = "lm3533-leds", }, .probe = lm3533_led_probe, - .remove_new = lm3533_led_remove, + .remove = lm3533_led_remove, .shutdown = lm3533_led_shutdown, }; module_platform_driver(lm3533_led_driver); diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c index 99de2a331727..7ad232780a31 100644 --- a/drivers/leds/leds-lm3697.c +++ b/drivers/leds/leds-lm3697.c @@ -202,7 +202,6 @@ out: static int lm3697_probe_dt(struct lm3697 *priv) { - struct fwnode_handle *child = NULL; struct device *dev = priv->dev; struct lm3697_led *led; int ret = -EINVAL; @@ -220,19 +219,18 @@ static int lm3697_probe_dt(struct lm3697 *priv) if (IS_ERR(priv->regulator)) priv->regulator = NULL; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct led_init_data init_data = {}; ret = fwnode_property_read_u32(child, "reg", &control_bank); if (ret) { dev_err(dev, "reg property missing\n"); - goto child_out; + return ret; } if (control_bank > LM3697_CONTROL_B) { dev_err(dev, "reg property is invalid\n"); - ret = -EINVAL; - goto child_out; + return -EINVAL; } led = &priv->leds[i]; @@ -262,7 +260,7 @@ static int lm3697_probe_dt(struct lm3697 *priv) led->num_leds); if (ret) { dev_err(dev, "led-sources property missing\n"); - goto child_out; + return ret; } for (j = 0; j < led->num_leds; j++) @@ -286,17 +284,13 @@ static int lm3697_probe_dt(struct lm3697 *priv) &init_data); if (ret) { dev_err(dev, "led register err: %d\n", ret); - goto child_out; + return ret; } i++; } - return ret; - -child_out: - fwnode_handle_put(child); - return ret; + return 0; } static int lm3697_probe(struct i2c_client *client) diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c index 175d4b06659b..02cb1565a9fb 100644 --- a/drivers/leds/leds-lp50xx.c +++ b/drivers/leds/leds-lp50xx.c @@ -16,8 +16,6 @@ #include <linux/led-class-multicolor.h> -#include "leds.h" - #define LP50XX_DEV_CFG0 0x00 #define LP50XX_DEV_CFG1 0x01 #define LP50XX_LED_CFG0 0x02 @@ -434,7 +432,6 @@ static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv, static int lp50xx_probe_dt(struct lp50xx *priv) { - struct fwnode_handle *child = NULL; struct fwnode_handle *led_node = NULL; struct led_init_data init_data = {}; struct led_classdev *led_cdev; @@ -454,17 +451,17 @@ static int lp50xx_probe_dt(struct lp50xx *priv) if (IS_ERR(priv->regulator)) priv->regulator = NULL; - device_for_each_child_node(priv->dev, child) { + device_for_each_child_node_scoped(priv->dev, child) { led = &priv->leds[i]; ret = fwnode_property_count_u32(child, "reg"); if (ret < 0) { dev_err(priv->dev, "reg property is invalid\n"); - goto child_out; + return ret; } ret = lp50xx_probe_leds(child, priv, led, ret); if (ret) - goto child_out; + return ret; init_data.fwnode = child; num_colors = 0; @@ -475,10 +472,8 @@ static int lp50xx_probe_dt(struct lp50xx *priv) */ mc_led_info = devm_kcalloc(priv->dev, LP50XX_LEDS_PER_MODULE, sizeof(*mc_led_info), GFP_KERNEL); - if (!mc_led_info) { - ret = -ENOMEM; - goto child_out; - } + if (!mc_led_info) + return -ENOMEM; fwnode_for_each_child_node(child, led_node) { ret = fwnode_property_read_u32(led_node, "color", @@ -486,7 +481,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv) if (ret) { fwnode_handle_put(led_node); dev_err(priv->dev, "Cannot read color\n"); - goto child_out; + return ret; } mc_led_info[num_colors].color_index = color_id; @@ -504,16 +499,12 @@ static int lp50xx_probe_dt(struct lp50xx *priv) &init_data); if (ret) { dev_err(priv->dev, "led register err: %d\n", ret); - goto child_out; + return ret; } i++; } return 0; - -child_out: - fwnode_handle_put(child); - return ret; } static int lp50xx_probe(struct i2c_client *client) diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index b26bcc81d079..14a4af361b26 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -161,6 +161,30 @@ static int lp5562_post_init_device(struct lp55xx_chip *chip) return 0; } +static int lp5562_multicolor_brightness(struct lp55xx_led *led) +{ + struct lp55xx_chip *chip = led->chip; + static const u8 addr[] = { + LP5562_REG_R_PWM, + LP5562_REG_G_PWM, + LP5562_REG_B_PWM, + LP5562_REG_W_PWM, + }; + int ret; + int i; + + guard(mutex)(&chip->lock); + for (i = 0; i < led->mc_cdev.num_colors; i++) { + ret = lp55xx_write(chip, + addr[led->mc_cdev.subled_info[i].channel], + led->mc_cdev.subled_info[i].brightness); + if (ret) + break; + } + + return ret; +} + static int lp5562_led_brightness(struct lp55xx_led *led) { struct lp55xx_chip *chip = led->chip; @@ -364,6 +388,7 @@ static struct lp55xx_device_config lp5562_cfg = { .post_init_device = lp5562_post_init_device, .set_led_current = lp5562_set_led_current, .brightness_fn = lp5562_led_brightness, + .multicolor_brightness_fn = lp5562_multicolor_brightness, .run_engine = lp5562_run_engine, .firmware_cb = lp55xx_firmware_loaded_cb, .dev_attr_group = &lp5562_group, diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 5a2e259679cf..e71456a56ab8 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -1132,9 +1132,6 @@ static int lp55xx_parse_common_child(struct device_node *np, if (ret) return ret; - if (*chan_nr < 0 || *chan_nr > cfg->max_channel) - return -EINVAL; - return 0; } diff --git a/drivers/leds/leds-max5970.c b/drivers/leds/leds-max5970.c index 56a584311581..285074c53b23 100644 --- a/drivers/leds/leds-max5970.c +++ b/drivers/leds/leds-max5970.c @@ -45,7 +45,7 @@ static int max5970_led_set_brightness(struct led_classdev *cdev, static int max5970_led_probe(struct platform_device *pdev) { - struct fwnode_handle *led_node, *child; + struct fwnode_handle *child; struct device *dev = &pdev->dev; struct regmap *regmap; struct max5970_led *ddata; @@ -55,7 +55,8 @@ static int max5970_led_probe(struct platform_device *pdev) if (!regmap) return -ENODEV; - led_node = device_get_named_child_node(dev->parent, "leds"); + struct fwnode_handle *led_node __free(fwnode_handle) = + device_get_named_child_node(dev->parent, "leds"); if (!led_node) return -ENODEV; diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c index 1eeac56b0014..f8c47078a3bb 100644 --- a/drivers/leds/leds-max77650.c +++ b/drivers/leds/leds-max77650.c @@ -62,7 +62,6 @@ static int max77650_led_brightness_set(struct led_classdev *cdev, static int max77650_led_probe(struct platform_device *pdev) { - struct fwnode_handle *child; struct max77650_led *leds, *led; struct device *dev; struct regmap *map; @@ -84,14 +83,12 @@ static int max77650_led_probe(struct platform_device *pdev) if (!num_leds || num_leds > MAX77650_LED_NUM_LEDS) return -ENODEV; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct led_init_data init_data = {}; rv = fwnode_property_read_u32(child, "reg", ®); - if (rv || reg >= MAX77650_LED_NUM_LEDS) { - rv = -EINVAL; - goto err_node_put; - } + if (rv || reg >= MAX77650_LED_NUM_LEDS) + return -EINVAL; led = &leds[reg]; led->map = map; @@ -108,23 +105,20 @@ static int max77650_led_probe(struct platform_device *pdev) rv = devm_led_classdev_register_ext(dev, &led->cdev, &init_data); if (rv) - goto err_node_put; + return rv; rv = regmap_write(map, led->regA, MAX77650_LED_A_DEFAULT); if (rv) - goto err_node_put; + return rv; rv = regmap_write(map, led->regB, MAX77650_LED_B_DEFAULT); if (rv) - goto err_node_put; + return rv; } return regmap_write(map, MAX77650_REG_CNFG_LED_TOP, MAX77650_LED_TOP_DEFAULT); -err_node_put: - fwnode_handle_put(child); - return rv; } static const struct of_device_id max77650_led_of_match[] = { diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c index da99d114bfb2..e22f09d13798 100644 --- a/drivers/leds/leds-mc13783.c +++ b/drivers/leds/leds-mc13783.c @@ -301,7 +301,7 @@ static struct platform_driver mc13xxx_led_driver = { .driver = { .name = "mc13xxx-led", }, - .remove_new = mc13xxx_led_remove, + .remove = mc13xxx_led_remove, .id_table = mc13xxx_led_id_table, }; module_platform_driver_probe(mc13xxx_led_driver, mc13xxx_led_probe); diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c index a19e8e0b6d1b..dbdc221c3828 100644 --- a/drivers/leds/leds-mt6323.c +++ b/drivers/leds/leds-mt6323.c @@ -713,7 +713,7 @@ MODULE_DEVICE_TABLE(of, mt6323_led_dt_match); static struct platform_driver mt6323_led_driver = { .probe = mt6323_led_probe, - .remove_new = mt6323_led_remove, + .remove = mt6323_led_remove, .driver = { .name = "mt6323-led", .of_match_table = mt6323_led_dt_match, diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index f3010c472bbd..4c6f04a5bd87 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -238,7 +238,6 @@ static int ns2_led_register(struct device *dev, struct fwnode_handle *node, static int ns2_led_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct fwnode_handle *child; struct ns2_led *leds; int count; int ret; @@ -251,12 +250,10 @@ static int ns2_led_probe(struct platform_device *pdev) if (!leds) return -ENOMEM; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { ret = ns2_led_register(dev, child, leds++); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - } } return 0; diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c index b53905da3592..050e93b04884 100644 --- a/drivers/leds/leds-pca963x.c +++ b/drivers/leds/leds-pca963x.c @@ -306,7 +306,6 @@ static int pca963x_register_leds(struct i2c_client *client, struct pca963x_chipdef *chipdef = chip->chipdef; struct pca963x_led *led = chip->leds; struct device *dev = &client->dev; - struct fwnode_handle *child; bool hw_blink; s32 mode2; u32 reg; @@ -338,7 +337,7 @@ static int pca963x_register_leds(struct i2c_client *client, if (ret < 0) return ret; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct led_init_data init_data = {}; char default_label[32]; @@ -346,8 +345,7 @@ static int pca963x_register_leds(struct i2c_client *client, if (ret || reg >= chipdef->n_leds) { dev_err(dev, "Invalid 'reg' property for node %pfw\n", child); - ret = -EINVAL; - goto err; + return -EINVAL; } led->led_num = reg; @@ -369,16 +367,13 @@ static int pca963x_register_leds(struct i2c_client *client, if (ret) { dev_err(dev, "Failed to register LED for node %pfw\n", child); - goto err; + return ret; } ++led; } return 0; -err: - fwnode_handle_put(child); - return ret; } static int pca963x_suspend(struct device *dev) diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c index 49ab8c9a3f29..3a38578ce8e4 100644 --- a/drivers/leds/leds-powernv.c +++ b/drivers/leds/leds-powernv.c @@ -323,8 +323,8 @@ static const struct of_device_id powernv_led_match[] = { MODULE_DEVICE_TABLE(of, powernv_led_match); static struct platform_driver powernv_led_driver = { - .probe = powernv_led_probe, - .remove_new = powernv_led_remove, + .probe = powernv_led_probe, + .remove = powernv_led_remove, .driver = { .name = "powernv-led-driver", .of_match_table = powernv_led_match, diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index e1b414b40353..c73134e7b951 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -17,7 +17,6 @@ #include <linux/err.h> #include <linux/pwm.h> #include <linux/slab.h> -#include "leds.h" struct led_pwm { const char *name; @@ -63,6 +62,20 @@ static int led_pwm_set(struct led_classdev *led_cdev, return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate); } +static int led_pwm_default_brightness_get(struct fwnode_handle *fwnode, + int max_brightness) +{ + unsigned int default_brightness; + int ret; + + ret = fwnode_property_read_u32(fwnode, "default-brightness", + &default_brightness); + if (ret < 0 || default_brightness > max_brightness) + default_brightness = max_brightness; + + return default_brightness; +} + __attribute__((nonnull)) static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, struct led_pwm *led, struct fwnode_handle *fwnode) @@ -104,7 +117,8 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, /* set brightness */ switch (led->default_state) { case LEDS_DEFSTATE_ON: - led_data->cdev.brightness = led->max_brightness; + led_data->cdev.brightness = + led_pwm_default_brightness_get(fwnode, led->max_brightness); break; case LEDS_DEFSTATE_KEEP: { @@ -140,21 +154,18 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv) { - struct fwnode_handle *fwnode; struct led_pwm led; int ret; - device_for_each_child_node(dev, fwnode) { + device_for_each_child_node_scoped(dev, fwnode) { memset(&led, 0, sizeof(led)); ret = fwnode_property_read_string(fwnode, "label", &led.name); if (ret && is_of_node(fwnode)) led.name = to_of_node(fwnode)->name; - if (!led.name) { - ret = -EINVAL; - goto err_child_out; - } + if (!led.name) + return -EINVAL; led.active_low = fwnode_property_read_bool(fwnode, "active-low"); @@ -165,14 +176,10 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv) ret = led_pwm_add(dev, priv, &led, fwnode); if (ret) - goto err_child_out; + return ret; } return 0; - -err_child_out: - fwnode_handle_put(fwnode); - return ret; } static int led_pwm_probe(struct platform_device *pdev) diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c index e66f73879c8e..782e1c11ee44 100644 --- a/drivers/leds/leds-rb532.c +++ b/drivers/leds/leds-rb532.c @@ -49,7 +49,7 @@ static void rb532_led_remove(struct platform_device *pdev) static struct platform_driver rb532_led_driver = { .probe = rb532_led_probe, - .remove_new = rb532_led_remove, + .remove = rb532_led_remove, .driver = { .name = "rb532-led", }, diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c index 848e929c4a61..ade64629431a 100644 --- a/drivers/leds/leds-regulator.c +++ b/drivers/leds/leds-regulator.c @@ -193,7 +193,7 @@ static struct platform_driver regulator_led_driver = { .of_match_table = regulator_led_of_match, }, .probe = regulator_led_probe, - .remove_new = regulator_led_remove, + .remove = regulator_led_remove, }; module_platform_driver(regulator_led_driver); diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c index cca98c644aa6..0c5169773949 100644 --- a/drivers/leds/leds-sc27xx-bltc.c +++ b/drivers/leds/leds-sc27xx-bltc.c @@ -344,7 +344,7 @@ static struct platform_driver sc27xx_led_driver = { .of_match_table = sc27xx_led_of_match, }, .probe = sc27xx_led_probe, - .remove_new = sc27xx_led_remove, + .remove = sc27xx_led_remove, }; module_platform_driver(sc27xx_led_driver); diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 2ef9fc7371bd..f24ca75c7cb1 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -451,7 +451,7 @@ static ssize_t blink_show(struct device *dev, int blinking = 0; if (nasgpio_led_get_attr(led, GPO_BLINK)) blinking = 1; - return sprintf(buf, "%u\n", blinking); + return sprintf(buf, "%d\n", blinking); } static ssize_t blink_store(struct device *dev, diff --git a/drivers/leds/leds-sun50i-a100.c b/drivers/leds/leds-sun50i-a100.c index 4c468d487486..2c9bd360ab81 100644 --- a/drivers/leds/leds-sun50i-a100.c +++ b/drivers/leds/leds-sun50i-a100.c @@ -392,7 +392,6 @@ static int sun50i_a100_ledc_probe(struct platform_device *pdev) struct sun50i_a100_ledc_led *led; struct device *dev = &pdev->dev; struct sun50i_a100_ledc *priv; - struct fwnode_handle *child; struct resource *mem; u32 max_addr = 0; u32 num_leds = 0; @@ -402,21 +401,17 @@ static int sun50i_a100_ledc_probe(struct platform_device *pdev) * The maximum LED address must be known in sun50i_a100_ledc_resume() before * class device registration, so parse and validate the subnodes up front. */ - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { u32 addr, color; ret = fwnode_property_read_u32(child, "reg", &addr); - if (ret || addr >= LEDC_MAX_LEDS) { - fwnode_handle_put(child); + if (ret || addr >= LEDC_MAX_LEDS) return dev_err_probe(dev, -EINVAL, "'reg' must be between 0 and %d\n", LEDC_MAX_LEDS - 1); - } ret = fwnode_property_read_u32(child, "color", &color); - if (ret || color != LED_COLOR_ID_RGB) { - fwnode_handle_put(child); + if (ret || color != LED_COLOR_ID_RGB) return dev_err_probe(dev, -EINVAL, "'color' must be LED_COLOR_ID_RGB\n"); - } max_addr = max(max_addr, addr); num_leds++; @@ -502,7 +497,7 @@ static int sun50i_a100_ledc_probe(struct platform_device *pdev) return ret; led = priv->leds; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct led_classdev *cdev; /* The node was already validated above. */ @@ -527,7 +522,11 @@ static int sun50i_a100_ledc_probe(struct platform_device *pdev) ret = led_classdev_multicolor_register_ext(dev, &led->mc_cdev, &init_data); if (ret) { dev_err_probe(dev, ret, "Failed to register multicolor LED %u", led->addr); - goto err_put_child; + while (led-- > priv->leds) + led_classdev_multicolor_unregister(&led->mc_cdev); + sun50i_a100_ledc_suspend(&pdev->dev); + + return ret; } led++; @@ -536,14 +535,6 @@ static int sun50i_a100_ledc_probe(struct platform_device *pdev) dev_info(dev, "Registered %u LEDs\n", num_leds); return 0; - -err_put_child: - fwnode_handle_put(child); - while (led-- > priv->leds) - led_classdev_multicolor_unregister(&led->mc_cdev); - sun50i_a100_ledc_suspend(&pdev->dev); - - return ret; } static void sun50i_a100_ledc_remove(struct platform_device *pdev) @@ -567,7 +558,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(sun50i_a100_ledc_pm, static struct platform_driver sun50i_a100_ledc_driver = { .probe = sun50i_a100_ledc_probe, - .remove_new = sun50i_a100_ledc_remove, + .remove = sun50i_a100_ledc_remove, .shutdown = sun50i_a100_ledc_remove, .driver = { .name = "sun50i-a100-ledc", diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c index a621e5e5c75c..bd24e7f5947a 100644 --- a/drivers/leds/leds-sunfire.c +++ b/drivers/leds/leds-sunfire.c @@ -219,7 +219,7 @@ MODULE_ALIAS("platform:sunfire-fhc-leds"); static struct platform_driver sunfire_clockboard_led_driver = { .probe = sunfire_clockboard_led_probe, - .remove_new = sunfire_led_generic_remove, + .remove = sunfire_led_generic_remove, .driver = { .name = "sunfire-clockboard-leds", }, @@ -227,7 +227,7 @@ static struct platform_driver sunfire_clockboard_led_driver = { static struct platform_driver sunfire_fhc_led_driver = { .probe = sunfire_fhc_led_probe, - .remove_new = sunfire_led_generic_remove, + .remove = sunfire_led_generic_remove, .driver = { .name = "sunfire-fhc-leds", }, diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c index 4f22f4224946..acbd8169723c 100644 --- a/drivers/leds/leds-tca6507.c +++ b/drivers/leds/leds-tca6507.c @@ -658,7 +658,6 @@ static struct tca6507_platform_data * tca6507_led_dt_init(struct device *dev) { struct tca6507_platform_data *pdata; - struct fwnode_handle *child; struct led_info *tca_leds; int count; @@ -671,7 +670,7 @@ tca6507_led_dt_init(struct device *dev) if (!tca_leds) return ERR_PTR(-ENOMEM); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct led_info led; u32 reg; int ret; @@ -688,10 +687,8 @@ tca6507_led_dt_init(struct device *dev) led.flags |= TCA6507_MAKE_GPIO; ret = fwnode_property_read_u32(child, "reg", ®); - if (ret || reg >= NUM_LEDS) { - fwnode_handle_put(child); + if (ret || reg >= NUM_LEDS) return ERR_PTR(ret ? : -EINVAL); - } tca_leds[reg] = led; } diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c index 4cff8c4b020c..2de825ac08b3 100644 --- a/drivers/leds/leds-turris-omnia.c +++ b/drivers/leds/leds-turris-omnia.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> -#include "leds.h" #define OMNIA_BOARD_LEDS 12 #define OMNIA_LED_NUM_CHANNELS 3 diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index 70b32d80f960..05930e9e8887 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -292,7 +292,7 @@ static struct platform_driver wm831x_status_driver = { .name = "wm831x-status", }, .probe = wm831x_status_probe, - .remove_new = wm831x_status_remove, + .remove = wm831x_status_remove, }; module_platform_driver(wm831x_status_driver); diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c index 61cbefa05710..87e60ea927b9 100644 --- a/drivers/leds/leds-wm8350.c +++ b/drivers/leds/leds-wm8350.c @@ -255,7 +255,7 @@ static struct platform_driver wm8350_led_driver = { .name = "wm8350-led", }, .probe = wm8350_led_probe, - .remove_new = wm8350_led_remove, + .remove = wm8350_led_remove, .shutdown = wm8350_led_shutdown, }; diff --git a/drivers/leds/rgb/leds-group-multicolor.c b/drivers/leds/rgb/leds-group-multicolor.c index b6c7679015fd..548c7dd63ba1 100644 --- a/drivers/leds/rgb/leds-group-multicolor.c +++ b/drivers/leds/rgb/leds-group-multicolor.c @@ -55,7 +55,7 @@ static void restore_sysfs_write_access(void *data) { struct led_classdev *led_cdev = data; - /* Restore the write acccess to the LED */ + /* Restore the write access to the LED */ mutex_lock(&led_cdev->led_access); led_sysfs_enable(led_cdev); mutex_unlock(&led_cdev->led_access); diff --git a/drivers/leds/rgb/leds-ktd202x.c b/drivers/leds/rgb/leds-ktd202x.c index d5c442163c46..04e62faa3a00 100644 --- a/drivers/leds/rgb/leds-ktd202x.c +++ b/drivers/leds/rgb/leds-ktd202x.c @@ -495,7 +495,6 @@ static int ktd202x_add_led(struct ktd202x *chip, struct fwnode_handle *fwnode, u static int ktd202x_probe_fw(struct ktd202x *chip) { - struct fwnode_handle *child; struct device *dev = chip->dev; int count; int i = 0; @@ -509,13 +508,12 @@ static int ktd202x_probe_fw(struct ktd202x *chip) /* Allow the device to execute the complete reset */ usleep_range(200, 300); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { int ret = ktd202x_add_led(chip, child, i); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - } + i++; } diff --git a/drivers/leds/rgb/leds-mt6370-rgb.c b/drivers/leds/rgb/leds-mt6370-rgb.c index 10a0b5b45227..ebd3ba878dd5 100644 --- a/drivers/leds/rgb/leds-mt6370-rgb.c +++ b/drivers/leds/rgb/leds-mt6370-rgb.c @@ -587,7 +587,7 @@ static inline int mt6370_mc_pattern_clear(struct led_classdev *lcdev) struct mt6370_led *led = container_of(mccdev, struct mt6370_led, mc); struct mt6370_priv *priv = led->priv; struct mc_subled *subled; - int i, ret; + int i, ret = 0; mutex_lock(&led->priv->lock); @@ -905,7 +905,6 @@ static int mt6370_leds_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mt6370_priv *priv; - struct fwnode_handle *child; size_t count; unsigned int i = 0; int ret; @@ -936,37 +935,27 @@ static int mt6370_leds_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Failed to allocate regmap field\n"); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct mt6370_led *led = priv->leds + i++; struct led_init_data init_data = { .fwnode = child }; u32 reg, color; ret = fwnode_property_read_u32(child, "reg", ®); - if (ret) { - dev_err(dev, "Failed to parse reg property\n"); - goto fwnode_release; - } + if (ret) + dev_err_probe(dev, ret, "Failed to parse reg property\n"); - if (reg >= MT6370_MAX_LEDS) { - ret = -EINVAL; - dev_err(dev, "Error reg property number\n"); - goto fwnode_release; - } + if (reg >= MT6370_MAX_LEDS) + return dev_err_probe(dev, -EINVAL, "Error reg property number\n"); ret = fwnode_property_read_u32(child, "color", &color); - if (ret) { - dev_err(dev, "Failed to parse color property\n"); - goto fwnode_release; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to parse color property\n"); if (color == LED_COLOR_ID_RGB || color == LED_COLOR_ID_MULTI) reg = MT6370_VIRTUAL_MULTICOLOR; - if (priv->leds_active & BIT(reg)) { - ret = -EINVAL; - dev_err(dev, "Duplicate reg property\n"); - goto fwnode_release; - } + if (priv->leds_active & BIT(reg)) + return dev_err_probe(dev, -EINVAL, "Duplicate reg property\n"); priv->leds_active |= BIT(reg); @@ -975,18 +964,14 @@ static int mt6370_leds_probe(struct platform_device *pdev) ret = mt6370_init_led_properties(dev, led, &init_data); if (ret) - goto fwnode_release; + return ret; ret = mt6370_led_register(dev, led, &init_data); if (ret) - goto fwnode_release; + return ret; } return 0; - -fwnode_release: - fwnode_handle_put(child); - return ret; } static const struct of_device_id mt6370_rgbled_device_table[] = { diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c index 726c186391af..c98c370687c2 100644 --- a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c +++ b/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c @@ -53,7 +53,7 @@ static void simatic_ipc_leds_gpio_apollolake_remove(struct platform_device *pdev static struct platform_driver simatic_ipc_led_gpio_apollolake_driver = { .probe = simatic_ipc_leds_gpio_apollolake_probe, - .remove_new = simatic_ipc_leds_gpio_apollolake_remove, + .remove = simatic_ipc_leds_gpio_apollolake_remove, .driver = { .name = KBUILD_MODNAME, }, diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c index 3fec96c549c1..7f7cff275448 100644 --- a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c +++ b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c @@ -43,7 +43,7 @@ static void simatic_ipc_leds_gpio_elkhartlake_remove(struct platform_device *pde static struct platform_driver simatic_ipc_led_gpio_elkhartlake_driver = { .probe = simatic_ipc_leds_gpio_elkhartlake_probe, - .remove_new = simatic_ipc_leds_gpio_elkhartlake_remove, + .remove = simatic_ipc_leds_gpio_elkhartlake_remove, .driver = { .name = KBUILD_MODNAME, }, diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c b/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c index a1f10952513c..bc23d701bcb7 100644 --- a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c +++ b/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c @@ -93,7 +93,7 @@ static void simatic_ipc_leds_gpio_f7188x_remove(struct platform_device *pdev) static struct platform_driver simatic_ipc_led_gpio_driver = { .probe = simatic_ipc_leds_gpio_f7188x_probe, - .remove_new = simatic_ipc_leds_gpio_f7188x_remove, + .remove = simatic_ipc_leds_gpio_f7188x_remove, .driver = { .name = KBUILD_MODNAME, }, diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c index a4fb16d7db3c..fc1af74b6596 100644 --- a/drivers/macintosh/via-pmu-led.c +++ b/drivers/macintosh/via-pmu-led.c @@ -92,18 +92,15 @@ static int __init via_pmu_led_init(void) if (dt == NULL) return -ENODEV; model = of_get_property(dt, "model", NULL); - if (model == NULL) { - of_node_put(dt); - return -ENODEV; - } + if (!model) + goto put_node; + if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 && strncmp(model, "iBook", strlen("iBook")) != 0 && strcmp(model, "PowerMac7,2") != 0 && - strcmp(model, "PowerMac7,3") != 0) { - of_node_put(dt); - /* ignore */ - return -ENODEV; - } + strcmp(model, "PowerMac7,3") != 0) + goto put_node; + of_node_put(dt); spin_lock_init(&pmu_blink_lock); @@ -112,6 +109,10 @@ static int __init via_pmu_led_init(void) pmu_blink_req.done = pmu_req_done; return led_classdev_register(NULL, &pmu_led); + +put_node: + of_node_put(dt); + return -ENODEV; } late_initcall(via_pmu_led_init); diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 6fb995778636..8ecba7fb999e 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -127,7 +127,7 @@ config STI_MBOX config TI_MESSAGE_MANAGER tristate "Texas Instruments Message Manager Driver" - depends on ARCH_KEYSTONE || ARCH_K3 + depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST default ARCH_K3 help An implementation of Message Manager slave driver for Keystone @@ -168,6 +168,7 @@ config MAILBOX_TEST config POLARFIRE_SOC_MAILBOX tristate "PolarFire SoC (MPFS) Mailbox" depends on HAS_IOMEM + depends on MFD_SYSCON depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST help This driver adds support for the PolarFire SoC (MPFS) mailbox controller. @@ -295,4 +296,14 @@ config QCOM_IPCC acts as an interrupt controller for receiving interrupts from clients. Say Y here if you want to build this driver. +config THEAD_TH1520_MBOX + tristate "T-head TH1520 Mailbox" + depends on ARCH_THEAD || COMPILE_TEST + help + Mailbox driver implementation for the Thead TH-1520 platform. Enables + two cores within the SoC to communicate and coordinate by passing + messages. Could be used to communicate between E910 core, on which the + kernel is running, and E902 core used for power management among other + things. + endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 3c3c27d54c13..5f4f5b0ce2cc 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -64,3 +64,5 @@ obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o + +obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c index 0ec21dcdbde7..cff7c343ee08 100644 --- a/drivers/mailbox/arm_mhuv2.c +++ b/drivers/mailbox/arm_mhuv2.c @@ -500,7 +500,7 @@ static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = { static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg) { struct mbox_chan *chans = mhu->mbox.chans; - int channel = 0, i, offset = 0, windows, protocol, ch_wn; + int channel = 0, i, j, offset = 0, windows, protocol, ch_wn; u32 stat; for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) { @@ -510,9 +510,9 @@ static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg) ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat); - for (i = 0; i < mhu->length; i += 2) { - protocol = mhu->protocols[i]; - windows = mhu->protocols[i + 1]; + for (j = 0; j < mhu->length; j += 2) { + protocol = mhu->protocols[j]; + windows = mhu->protocols[j + 1]; if (ch_wn >= offset + windows) { if (protocol == DOORBELL) diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index b1abc2a0c971..41f79e51d9e5 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1675,7 +1675,7 @@ static struct platform_driver flexrm_mbox_driver = { .of_match_table = flexrm_mbox_of_match, }, .probe = flexrm_mbox_probe, - .remove_new = flexrm_mbox_remove, + .remove = flexrm_mbox_remove, }; module_platform_driver(flexrm_mbox_driver); diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index a873672a9082..406bc41cba60 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c @@ -1618,7 +1618,7 @@ static void pdc_remove(struct platform_device *pdev) static struct platform_driver pdc_mbox_driver = { .probe = pdc_probe, - .remove_new = pdc_remove, + .remove = pdc_remove, .driver = { .name = "brcm-iproc-pdc-mbox", .of_match_table = pdc_mbox_of_match, diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index f815dab3be50..6ef8338add0d 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -782,7 +782,7 @@ static int imx_mu_init_generic(struct imx_mu_priv *priv) cp->chan = &priv->mbox_chans[i]; priv->mbox_chans[i].con_priv = cp; snprintf(cp->irq_desc, sizeof(cp->irq_desc), - "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx); + "%s[%i-%u]", dev_name(priv->dev), cp->type, cp->idx); } priv->mbox.num_chans = IMX_MU_CHANS; @@ -819,7 +819,7 @@ static int imx_mu_init_specific(struct imx_mu_priv *priv) cp->chan = &priv->mbox_chans[i]; priv->mbox_chans[i].con_priv = cp; snprintf(cp->irq_desc, sizeof(cp->irq_desc), - "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx); + "%s[%i-%u]", dev_name(priv->dev), cp->type, cp->idx); } priv->mbox.num_chans = num_chans; @@ -1120,7 +1120,7 @@ static const struct dev_pm_ops imx_mu_pm_ops = { static struct platform_driver imx_mu_driver = { .probe = imx_mu_probe, - .remove_new = imx_mu_remove, + .remove = imx_mu_remove, .driver = { .name = "imx_mu", .of_match_table = imx_mu_dt_ids, diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c index 20ee283a04cc..4df546e3b7ea 100644 --- a/drivers/mailbox/mailbox-mpfs.c +++ b/drivers/mailbox/mailbox-mpfs.c @@ -13,12 +13,15 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/regmap.h> #include <linux/interrupt.h> +#include <linux/mfd/syscon.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/mailbox_controller.h> #include <soc/microchip/mpfs.h> +#define MESSAGE_INT_OFFSET 0x18cu #define SERVICES_CR_OFFSET 0x50u #define SERVICES_SR_OFFSET 0x54u #define MAILBOX_REG_OFFSET 0x800u @@ -68,6 +71,7 @@ struct mpfs_mbox { void __iomem *int_reg; struct mbox_chan chans[1]; struct mpfs_mss_response *response; + struct regmap *sysreg_scb, *control_scb; u16 resp_offset; }; @@ -75,7 +79,10 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox) { u32 status; - status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); + if (mbox->control_scb) + regmap_read(mbox->control_scb, SERVICES_SR_OFFSET, &status); + else + status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); return status & SCB_STATUS_BUSY_MASK; } @@ -95,7 +102,11 @@ static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan) * Failed services are intended to generated interrupts, but in reality * this does not happen, so the status must be checked here. */ - val = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); + if (mbox->control_scb) + regmap_read(mbox->control_scb, SERVICES_SR_OFFSET, &val); + else + val = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); + response->resp_status = (val & SCB_STATUS_MASK) >> SCB_STATUS_POS; return true; @@ -143,7 +154,12 @@ static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data) tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK; tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK; - writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET); + + if (mbox->control_scb) + regmap_write(mbox->control_scb, SERVICES_CR_OFFSET, tx_trigger); + else + writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET); + return 0; } @@ -185,7 +201,10 @@ static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data) struct mbox_chan *chan = data; struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; - writel_relaxed(0, mbox->int_reg); + if (mbox->control_scb) + regmap_write(mbox->sysreg_scb, MESSAGE_INT_OFFSET, 0); + else + writel_relaxed(0, mbox->int_reg); mpfs_mbox_rx_data(chan); @@ -221,28 +240,62 @@ static const struct mbox_chan_ops mpfs_mbox_ops = { .last_tx_done = mpfs_mbox_last_tx_done, }; -static int mpfs_mbox_probe(struct platform_device *pdev) +static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform_device *pdev) { - struct mpfs_mbox *mbox; - struct resource *regs; - int ret; + mbox->control_scb = syscon_regmap_lookup_by_compatible("microchip,mpfs-control-scb"); + if (IS_ERR(mbox->control_scb)) + return PTR_ERR(mbox->control_scb); - mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); - if (!mbox) - return -ENOMEM; + mbox->sysreg_scb = syscon_regmap_lookup_by_compatible("microchip,mpfs-sysreg-scb"); + if (IS_ERR(mbox->sysreg_scb)) + return PTR_ERR(mbox->sysreg_scb); + + mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->ctrl_base)) + return PTR_ERR(mbox->mbox_base); + + return 0; +} + +static inline int mpfs_mbox_old_format_probe(struct mpfs_mbox *mbox, struct platform_device *pdev) +{ + dev_warn(&pdev->dev, "falling back to old devicetree format"); - mbox->ctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); + mbox->ctrl_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mbox->ctrl_base)) return PTR_ERR(mbox->ctrl_base); - mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, ®s); + mbox->int_reg = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(mbox->int_reg)) return PTR_ERR(mbox->int_reg); - mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 2, ®s); + mbox->mbox_base = devm_platform_ioremap_resource(pdev, 2); if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET; + return 0; +} + +static int mpfs_mbox_probe(struct platform_device *pdev) +{ + struct mpfs_mbox *mbox; + int ret; + + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + ret = mpfs_mbox_syscon_probe(mbox, pdev); + if (ret) { + /* + * set this to null, so it can be used as the decision for to + * regmap or not to regmap + */ + mbox->control_scb = NULL; + ret = mpfs_mbox_old_format_probe(mbox, pdev); + if (ret) + return ret; + } mbox->irq = platform_get_irq(pdev, 0); if (mbox->irq < 0) return mbox->irq; diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 3386b4e72551..c9dd8c42c0cd 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -441,8 +441,8 @@ static struct platform_driver mbox_test_driver = { .name = "mailbox_test", .of_match_table = mbox_test_match, }, - .probe = mbox_test_probe, - .remove_new = mbox_test_remove, + .probe = mbox_test_probe, + .remove = mbox_test_remove, }; module_platform_driver(mbox_test_driver); diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c new file mode 100644 index 000000000000..4e84640ac3b8 --- /dev/null +++ b/drivers/mailbox/mailbox-th1520.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Alibaba Group Holding Limited. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Status Register */ +#define TH_1520_MBOX_STA 0x0 +#define TH_1520_MBOX_CLR 0x4 +#define TH_1520_MBOX_MASK 0xc + +/* Transmit/receive data register: + * INFO0 ~ INFO6 + */ +#define TH_1520_MBOX_INFO_NUM 8 +#define TH_1520_MBOX_DATA_INFO_NUM 7 +#define TH_1520_MBOX_INFO0 0x14 +/* Transmit ack register: INFO7 */ +#define TH_1520_MBOX_INFO7 0x30 + +/* Generate remote icu IRQ Register */ +#define TH_1520_MBOX_GEN 0x10 +#define TH_1520_MBOX_GEN_RX_DATA BIT(6) +#define TH_1520_MBOX_GEN_TX_ACK BIT(7) + +#define TH_1520_MBOX_CHAN_RES_SIZE 0x1000 +#define TH_1520_MBOX_CHANS 4 +#define TH_1520_MBOX_CHAN_NAME_SIZE 20 + +#define TH_1520_MBOX_ACK_MAGIC 0xdeadbeaf + +#ifdef CONFIG_PM_SLEEP +/* store MBOX context across system-wide suspend/resume transitions */ +struct th1520_mbox_context { + u32 intr_mask[TH_1520_MBOX_CHANS - 1]; +}; +#endif + +enum th1520_mbox_icu_cpu_id { + TH_1520_MBOX_ICU_KERNEL_CPU0, /* 910T */ + TH_1520_MBOX_ICU_CPU1, /* 902 */ + TH_1520_MBOX_ICU_CPU2, /* 906 */ + TH_1520_MBOX_ICU_CPU3, /* 910R */ +}; + +struct th1520_mbox_con_priv { + enum th1520_mbox_icu_cpu_id idx; + void __iomem *comm_local_base; + void __iomem *comm_remote_base; + char irq_desc[TH_1520_MBOX_CHAN_NAME_SIZE]; + struct mbox_chan *chan; +}; + +struct th1520_mbox_priv { + struct device *dev; + void __iomem *local_icu[TH_1520_MBOX_CHANS]; + void __iomem *remote_icu[TH_1520_MBOX_CHANS - 1]; + void __iomem *cur_cpu_ch_base; + spinlock_t mbox_lock; /* control register lock */ + + struct mbox_controller mbox; + struct mbox_chan mbox_chans[TH_1520_MBOX_CHANS]; + struct clk_bulk_data clocks[TH_1520_MBOX_CHANS]; + struct th1520_mbox_con_priv con_priv[TH_1520_MBOX_CHANS]; + int irq; +#ifdef CONFIG_PM_SLEEP + struct th1520_mbox_context *ctx; +#endif +}; + +static struct th1520_mbox_priv * +to_th1520_mbox_priv(struct mbox_controller *mbox) +{ + return container_of(mbox, struct th1520_mbox_priv, mbox); +} + +static void th1520_mbox_write(struct th1520_mbox_priv *priv, u32 val, u32 offs) +{ + iowrite32(val, priv->cur_cpu_ch_base + offs); +} + +static u32 th1520_mbox_read(struct th1520_mbox_priv *priv, u32 offs) +{ + return ioread32(priv->cur_cpu_ch_base + offs); +} + +static u32 th1520_mbox_rmw(struct th1520_mbox_priv *priv, u32 off, u32 set, + u32 clr) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&priv->mbox_lock, flags); + val = th1520_mbox_read(priv, off); + val &= ~clr; + val |= set; + th1520_mbox_write(priv, val, off); + spin_unlock_irqrestore(&priv->mbox_lock, flags); + + return val; +} + +static void th1520_mbox_chan_write(struct th1520_mbox_con_priv *cp, u32 val, + u32 offs, bool is_remote) +{ + if (is_remote) + iowrite32(val, cp->comm_remote_base + offs); + else + iowrite32(val, cp->comm_local_base + offs); +} + +static u32 th1520_mbox_chan_read(struct th1520_mbox_con_priv *cp, u32 offs, + bool is_remote) +{ + if (is_remote) + return ioread32(cp->comm_remote_base + offs); + else + return ioread32(cp->comm_local_base + offs); +} + +static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv *cp, u32 off, + u32 set, u32 clr, bool is_remote) +{ + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(cp->chan->mbox); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&priv->mbox_lock, flags); + val = th1520_mbox_chan_read(cp, off, is_remote); + val &= ~clr; + val |= set; + th1520_mbox_chan_write(cp, val, off, is_remote); + spin_unlock_irqrestore(&priv->mbox_lock, flags); +} + +static void th1520_mbox_chan_rd_data(struct th1520_mbox_con_priv *cp, + void *data, bool is_remote) +{ + u32 off = TH_1520_MBOX_INFO0; + u32 *arg = data; + u32 i; + + /* read info0 ~ info6, totally 28 bytes + * requires data memory size is 28 bytes + */ + for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) { + *arg = th1520_mbox_chan_read(cp, off, is_remote); + off += 4; + arg++; + } +} + +static void th1520_mbox_chan_wr_data(struct th1520_mbox_con_priv *cp, + void *data, bool is_remote) +{ + u32 off = TH_1520_MBOX_INFO0; + u32 *arg = data; + u32 i; + + /* write info0 ~ info6, totally 28 bytes + * requires data memory is 28 bytes valid data + */ + for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) { + th1520_mbox_chan_write(cp, *arg, off, is_remote); + off += 4; + arg++; + } +} + +static void th1520_mbox_chan_wr_ack(struct th1520_mbox_con_priv *cp, void *data, + bool is_remote) +{ + u32 off = TH_1520_MBOX_INFO7; + u32 *arg = data; + + th1520_mbox_chan_write(cp, *arg, off, is_remote); +} + +static int th1520_mbox_chan_id_to_mapbit(struct th1520_mbox_con_priv *cp) +{ + int mapbit = 0; + int i; + + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + if (i == cp->idx) + return mapbit; + + if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) + mapbit++; + } + + if (i == TH_1520_MBOX_CHANS) + dev_err(cp->chan->mbox->dev, "convert to mapbit failed\n"); + + return 0; +} + +static irqreturn_t th1520_mbox_isr(int irq, void *p) +{ + struct mbox_chan *chan = p; + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox); + struct th1520_mbox_con_priv *cp = chan->con_priv; + int mapbit = th1520_mbox_chan_id_to_mapbit(cp); + u32 sta, dat[TH_1520_MBOX_DATA_INFO_NUM]; + u32 ack_magic = TH_1520_MBOX_ACK_MAGIC; + u32 info0_data, info7_data; + + sta = th1520_mbox_read(priv, TH_1520_MBOX_STA); + if (!(sta & BIT(mapbit))) + return IRQ_NONE; + + /* clear chan irq bit in STA register */ + th1520_mbox_rmw(priv, TH_1520_MBOX_CLR, BIT(mapbit), 0); + + /* info0 is the protocol word, should not be zero! */ + info0_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO0, false); + if (info0_data) { + /* read info0~info6 data */ + th1520_mbox_chan_rd_data(cp, dat, false); + + /* clear local info0 */ + th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO0, false); + + /* notify remote cpu */ + th1520_mbox_chan_wr_ack(cp, &ack_magic, true); + /* CPU1 902/906 use polling mode to monitor info7 */ + if (cp->idx != TH_1520_MBOX_ICU_CPU1 && + cp->idx != TH_1520_MBOX_ICU_CPU2) + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, + TH_1520_MBOX_GEN_TX_ACK, 0, true); + + /* transfer the data to client */ + mbox_chan_received_data(chan, (void *)dat); + } + + /* info7 magic value mean the real ack signal, not generate bit7 */ + info7_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO7, false); + if (info7_data == TH_1520_MBOX_ACK_MAGIC) { + /* clear local info7 */ + th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO7, false); + + /* notify framework the last TX has completed */ + mbox_chan_txdone(chan, 0); + } + + if (!info0_data && !info7_data) + return IRQ_NONE; + + return IRQ_HANDLED; +} + +static int th1520_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct th1520_mbox_con_priv *cp = chan->con_priv; + + th1520_mbox_chan_wr_data(cp, data, true); + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, TH_1520_MBOX_GEN_RX_DATA, 0, + true); + return 0; +} + +static int th1520_mbox_startup(struct mbox_chan *chan) +{ + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox); + struct th1520_mbox_con_priv *cp = chan->con_priv; + u32 data[8] = {}; + int mask_bit; + int ret; + + /* clear local and remote generate and info0~info7 */ + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, true); + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, false); + th1520_mbox_chan_wr_ack(cp, &data[7], true); + th1520_mbox_chan_wr_ack(cp, &data[7], false); + th1520_mbox_chan_wr_data(cp, &data[0], true); + th1520_mbox_chan_wr_data(cp, &data[0], false); + + /* enable the chan mask */ + mask_bit = th1520_mbox_chan_id_to_mapbit(cp); + th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, BIT(mask_bit), 0); + + /* + * Mixing devm_ managed resources with manual IRQ handling is generally + * discouraged due to potential complexities with resource management, + * especially when dealing with shared interrupts. However, in this case, + * the approach is safe and effective because: + * + * 1. Each mailbox channel requests its IRQ within the .startup() callback + * and frees it within the .shutdown() callback. + * 2. During device unbinding, the devm_ managed mailbox controller first + * iterates through all channels, ensuring that their IRQs are freed before + * any other devm_ resources are released. + * + * This ordering guarantees that no interrupts can be triggered from the device + * while it is being unbound, preventing race conditions and ensuring system + * stability. + */ + ret = request_irq(priv->irq, th1520_mbox_isr, + IRQF_SHARED | IRQF_NO_SUSPEND, cp->irq_desc, chan); + if (ret) { + dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq); + return ret; + } + + return 0; +} + +static void th1520_mbox_shutdown(struct mbox_chan *chan) +{ + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox); + struct th1520_mbox_con_priv *cp = chan->con_priv; + int mask_bit; + + free_irq(priv->irq, chan); + + /* clear the chan mask */ + mask_bit = th1520_mbox_chan_id_to_mapbit(cp); + th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, 0, BIT(mask_bit)); +} + +static const struct mbox_chan_ops th1520_mbox_ops = { + .send_data = th1520_mbox_send_data, + .startup = th1520_mbox_startup, + .shutdown = th1520_mbox_shutdown, +}; + +static int th1520_mbox_init_generic(struct th1520_mbox_priv *priv) +{ +#ifdef CONFIG_PM_SLEEP + priv->ctx = devm_kzalloc(priv->dev, sizeof(*priv->ctx), GFP_KERNEL); + if (!priv->ctx) + return -ENOMEM; +#endif + /* Set default configuration */ + th1520_mbox_write(priv, 0xff, TH_1520_MBOX_CLR); + th1520_mbox_write(priv, 0x0, TH_1520_MBOX_MASK); + return 0; +} + +static struct mbox_chan *th1520_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + u32 chan; + + if (sp->args_count != 1) { + dev_err(mbox->dev, "Invalid argument count %d\n", + sp->args_count); + return ERR_PTR(-EINVAL); + } + + chan = sp->args[0]; /* comm remote channel */ + + if (chan >= mbox->num_chans) { + dev_err(mbox->dev, "Not supported channel number: %d\n", chan); + return ERR_PTR(-EINVAL); + } + + if (chan == TH_1520_MBOX_ICU_KERNEL_CPU0) { + dev_err(mbox->dev, "Cannot communicate with yourself\n"); + return ERR_PTR(-EINVAL); + } + + return &mbox->chans[chan]; +} + +static void __iomem *th1520_map_mmio(struct platform_device *pdev, + char *res_name, size_t offset) +{ + void __iomem *mapped; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); + + if (!res) { + dev_err(&pdev->dev, "Failed to get resource: %s\n", res_name); + return ERR_PTR(-EINVAL); + } + + mapped = devm_ioremap(&pdev->dev, res->start + offset, + resource_size(res) - offset); + if (IS_ERR(mapped)) + dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name); + + return mapped; +} + +static void th1520_disable_clk(void *data) +{ + struct th1520_mbox_priv *priv = data; + + clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks); +} + +static int th1520_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct th1520_mbox_priv *priv; + unsigned int remote_idx = 0; + unsigned int i; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + priv->clocks[0].id = "clk-local"; + priv->clocks[1].id = "clk-remote-icu0"; + priv->clocks[2].id = "clk-remote-icu1"; + priv->clocks[3].id = "clk-remote-icu2"; + + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks), + priv->clocks); + if (ret) { + dev_err(dev, "Failed to get clocks\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks); + if (ret) { + dev_err(dev, "Failed to enable clocks\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv); + if (ret) { + clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks); + return ret; + } + + /* + * The address mappings in the device tree align precisely with those + * outlined in the manual. However, register offsets within these + * mapped regions are irregular, particularly for remote-icu0. + * Consequently, th1520_map_mmio() requires an additional parameter to + * handle this quirk. + */ + priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] = + th1520_map_mmio(pdev, "local", 0x0); + if (IS_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0])) + return PTR_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]); + + priv->remote_icu[0] = th1520_map_mmio(pdev, "remote-icu0", 0x4000); + if (IS_ERR(priv->remote_icu[0])) + return PTR_ERR(priv->remote_icu[0]); + + priv->remote_icu[1] = th1520_map_mmio(pdev, "remote-icu1", 0x0); + if (IS_ERR(priv->remote_icu[1])) + return PTR_ERR(priv->remote_icu[1]); + + priv->remote_icu[2] = th1520_map_mmio(pdev, "remote-icu2", 0x0); + if (IS_ERR(priv->remote_icu[2])) + return PTR_ERR(priv->remote_icu[2]); + + priv->local_icu[TH_1520_MBOX_ICU_CPU1] = + priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] + + TH_1520_MBOX_CHAN_RES_SIZE; + priv->local_icu[TH_1520_MBOX_ICU_CPU2] = + priv->local_icu[TH_1520_MBOX_ICU_CPU1] + + TH_1520_MBOX_CHAN_RES_SIZE; + priv->local_icu[TH_1520_MBOX_ICU_CPU3] = + priv->local_icu[TH_1520_MBOX_ICU_CPU2] + + TH_1520_MBOX_CHAN_RES_SIZE; + + priv->cur_cpu_ch_base = priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]; + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) + return priv->irq; + + /* init the chans */ + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + struct th1520_mbox_con_priv *cp = &priv->con_priv[i]; + + cp->idx = i; + cp->chan = &priv->mbox_chans[i]; + priv->mbox_chans[i].con_priv = cp; + snprintf(cp->irq_desc, sizeof(cp->irq_desc), + "th1520_mbox_chan[%i]", cp->idx); + + cp->comm_local_base = priv->local_icu[i]; + if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) { + cp->comm_remote_base = priv->remote_icu[remote_idx]; + remote_idx++; + } + } + + spin_lock_init(&priv->mbox_lock); + + priv->mbox.dev = dev; + priv->mbox.ops = &th1520_mbox_ops; + priv->mbox.chans = priv->mbox_chans; + priv->mbox.num_chans = TH_1520_MBOX_CHANS; + priv->mbox.of_xlate = th1520_mbox_xlate; + priv->mbox.txdone_irq = true; + + platform_set_drvdata(pdev, priv); + + ret = th1520_mbox_init_generic(priv); + if (ret) { + dev_err(dev, "Failed to init mailbox context\n"); + return ret; + } + + return devm_mbox_controller_register(dev, &priv->mbox); +} + +static const struct of_device_id th1520_mbox_dt_ids[] = { + { .compatible = "thead,th1520-mbox" }, + {} +}; +MODULE_DEVICE_TABLE(of, th1520_mbox_dt_ids); + +#ifdef CONFIG_PM_SLEEP +static int __maybe_unused th1520_mbox_suspend_noirq(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + struct th1520_mbox_context *ctx = priv->ctx; + u32 i; + /* + * ONLY interrupt mask bit should be stored and restores. + * INFO data all assumed to be lost. + */ + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + ctx->intr_mask[i] = + ioread32(priv->local_icu[i] + TH_1520_MBOX_MASK); + } + return 0; +} + +static int __maybe_unused th1520_mbox_resume_noirq(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + struct th1520_mbox_context *ctx = priv->ctx; + u32 i; + + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + iowrite32(ctx->intr_mask[i], + priv->local_icu[i] + TH_1520_MBOX_MASK); + } + + return 0; +} +#endif + +static int __maybe_unused th1520_mbox_runtime_suspend(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks); + + return 0; +} + +static int __maybe_unused th1520_mbox_runtime_resume(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks); + if (ret) + dev_err(dev, "Failed to enable clocks in runtime resume\n"); + + return ret; +} + +static const struct dev_pm_ops th1520_mbox_pm_ops = { +#ifdef CONFIG_PM_SLEEP + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(th1520_mbox_suspend_noirq, + th1520_mbox_resume_noirq) +#endif + SET_RUNTIME_PM_OPS(th1520_mbox_runtime_suspend, + th1520_mbox_runtime_resume, NULL) +}; + +static struct platform_driver th1520_mbox_driver = { + .probe = th1520_mbox_probe, + .driver = { + .name = "th1520-mbox", + .of_match_table = th1520_mbox_dt_ids, + .pm = &th1520_mbox_pm_ops, + }, +}; +module_platform_driver(th1520_mbox_driver); + +MODULE_DESCRIPTION("Thead TH-1520 mailbox IPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 4bff73532085..d186865b8dce 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -397,7 +397,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) task = kzalloc(sizeof(*task), GFP_ATOMIC); if (!task) { - pm_runtime_put_autosuspend(cmdq->mbox.dev); + __pm_runtime_put_autosuspend(cmdq->mbox.dev); return -ENOMEM; } @@ -447,7 +447,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) list_move_tail(&task->list_entry, &thread->task_busy_list); pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); + __pm_runtime_put_autosuspend(cmdq->mbox.dev); return 0; } @@ -495,7 +495,7 @@ done: spin_unlock_irqrestore(&thread->chan->lock, flags); pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); + __pm_runtime_put_autosuspend(cmdq->mbox.dev); } static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) @@ -535,7 +535,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) out: spin_unlock_irqrestore(&thread->chan->lock, flags); pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); + __pm_runtime_put_autosuspend(cmdq->mbox.dev); return 0; @@ -550,7 +550,7 @@ wait: return -EFAULT; } pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); + __pm_runtime_put_autosuspend(cmdq->mbox.dev); return 0; } @@ -584,7 +584,7 @@ static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq) struct clk_bulk_data *clks; cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, - sizeof(cmdq->clocks), GFP_KERNEL); + sizeof(*cmdq->clocks), GFP_KERNEL); if (!cmdq->clocks) return -ENOMEM; @@ -796,7 +796,7 @@ MODULE_DEVICE_TABLE(of, cmdq_of_ids); static struct platform_driver cmdq_drv = { .probe = cmdq_probe, - .remove_new = cmdq_remove, + .remove = cmdq_remove, .driver = { .name = "mtk_cmdq", .pm = &cmdq_pm_ops, diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 94885e411085..82102a4c5d68 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -269,6 +269,35 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan) return !!val; } +static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan) +{ + struct acpi_pcct_ext_pcc_shared_memory pcc_hdr; + + if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) + return; + /* If the memory region has not been mapped, we cannot + * determine if we need to send the message, but we still + * need to set the cmd_update flag before returning. + */ + if (pchan->chan.shmem == NULL) { + pcc_chan_reg_read_modify_write(&pchan->cmd_update); + return; + } + memcpy_fromio(&pcc_hdr, pchan->chan.shmem, + sizeof(struct acpi_pcct_ext_pcc_shared_memory)); + /* + * The PCC slave subspace channel needs to set the command complete bit + * after processing message. If the PCC_ACK_FLAG is set, it should also + * ring the doorbell. + * + * The PCC master subspace channel clears chan_in_use to free channel. + */ + if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK) + pcc_send_data(chan, NULL); + else + pcc_chan_reg_read_modify_write(&pchan->cmd_update); +} + /** * pcc_mbox_irq - PCC mailbox interrupt handler * @irq: interrupt number @@ -306,14 +335,7 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) mbox_chan_received_data(chan, NULL); - /* - * The PCC slave subspace channel needs to set the command complete bit - * and ring doorbell after processing message. - * - * The PCC master subspace channel clears chan_in_use to free channel. - */ - if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) - pcc_send_data(chan, NULL); + check_and_ack(pchan, chan); pchan->chan_in_use = false; return IRQ_HANDLED; @@ -365,14 +387,37 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan) { struct mbox_chan *chan = pchan->mchan; + struct pcc_chan_info *pchan_info; + struct pcc_mbox_chan *pcc_mbox_chan; if (!chan || !chan->cl) return; + pchan_info = chan->con_priv; + pcc_mbox_chan = &pchan_info->chan; + if (pcc_mbox_chan->shmem) { + iounmap(pcc_mbox_chan->shmem); + pcc_mbox_chan->shmem = NULL; + } mbox_free_channel(chan); } EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); +int pcc_mbox_ioremap(struct mbox_chan *chan) +{ + struct pcc_chan_info *pchan_info; + struct pcc_mbox_chan *pcc_mbox_chan; + + if (!chan || !chan->cl) + return -1; + pchan_info = chan->con_priv; + pcc_mbox_chan = &pchan_info->chan; + pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr, + pcc_mbox_chan->shmem_size); + return 0; +} +EXPORT_SYMBOL_GPL(pcc_mbox_ioremap); + /** * pcc_send_data - Called from Mailbox Controller code. Used * here only to ring the channel doorbell. The PCC client diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c index 7d91e7c016ba..f0d1fc0fb9ff 100644 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c @@ -167,7 +167,7 @@ MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match); static struct platform_driver qcom_apcs_ipc_driver = { .probe = qcom_apcs_ipc_probe, - .remove_new = qcom_apcs_ipc_remove, + .remove = qcom_apcs_ipc_remove, .driver = { .name = "qcom_apcs_ipc", .of_match_table = qcom_apcs_ipc_of_match, diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index d537cc9c4d4b..14c7907c6632 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -346,7 +346,7 @@ static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = { static struct platform_driver qcom_ipcc_driver = { .probe = qcom_ipcc_probe, - .remove_new = qcom_ipcc_remove, + .remove = qcom_ipcc_remove, .driver = { .name = "qcom-ipcc", .of_match_table = qcom_ipcc_of_match, diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index 1442f275782b..4f63f1a14ca6 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c @@ -379,7 +379,7 @@ static struct platform_driver stm32_ipcc_driver = { .of_match_table = stm32_ipcc_of_match, }, .probe = stm32_ipcc_probe, - .remove_new = stm32_ipcc_remove, + .remove = stm32_ipcc_remove, }; module_platform_driver(stm32_ipcc_driver); diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c index 3dcc54dc83b2..6ba6920f4645 100644 --- a/drivers/mailbox/sun6i-msgbox.c +++ b/drivers/mailbox/sun6i-msgbox.c @@ -307,8 +307,8 @@ static struct platform_driver sun6i_msgbox_driver = { .name = "sun6i-msgbox", .of_match_table = sun6i_msgbox_of_match, }, - .probe = sun6i_msgbox_probe, - .remove_new = sun6i_msgbox_remove, + .probe = sun6i_msgbox_probe, + .remove = sun6i_msgbox_remove, }; module_platform_driver(sun6i_msgbox_driver); diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index 19ef56cbcfd3..8d5e2d7dc03b 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -951,7 +951,7 @@ static struct platform_driver tegra_hsp_driver = { .pm = &tegra_hsp_pm_ops, }, .probe = tegra_hsp_probe, - .remove_new = tegra_hsp_remove, + .remove = tegra_hsp_remove, }; static int __init tegra_hsp_init(void) diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c index 9d2d4ff6cda4..8eb8df8d95a4 100644 --- a/drivers/mailbox/ti-msgmgr.c +++ b/drivers/mailbox/ti-msgmgr.c @@ -920,7 +920,7 @@ static struct platform_driver ti_msgmgr_driver = { .probe = ti_msgmgr_probe, .driver = { .name = "ti-msgmgr", - .of_match_table = of_match_ptr(ti_msgmgr_of_match), + .of_match_table = ti_msgmgr_of_match, .pm = &ti_msgmgr_pm_ops, }, }; diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index 521d08b9ab47..aa5249da59b2 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -940,10 +940,10 @@ static int zynqmp_ipi_probe(struct platform_device *pdev) pdata->num_mboxes = num_mboxes; mbox = pdata->ipi_mboxes; - mbox->setup_ipi_fn = ipi_fn; - for_each_available_child_of_node(np, nc) { mbox->pdata = pdata; + mbox->setup_ipi_fn = ipi_fn; + ret = zynqmp_ipi_mbox_probe(mbox, nc); if (ret) { of_node_put(nc); @@ -1015,7 +1015,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match); static struct platform_driver zynqmp_ipi_driver = { .probe = zynqmp_ipi_probe, - .remove_new = zynqmp_ipi_remove, + .remove = zynqmp_ipi_remove, .driver = { .name = "zynqmp-ipi", .of_match_table = of_match_ptr(zynqmp_ipi_of_match), diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index b2d10063d35f..d4697e79d5a3 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -5,6 +5,7 @@ config BCACHE select BLOCK_HOLDER_DEPRECATED if SYSFS select CRC64 select CLOSURES + select MIN_HEAP help Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index da50f6661bae..8998e61efa40 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -189,23 +189,16 @@ static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args) return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs); } -static inline void new_bucket_swap(void *l, void *r, void __always_unused *args) -{ - struct bucket **lhs = l, **rhs = r; - - swap(*lhs, *rhs); -} - static void invalidate_buckets_lru(struct cache *ca) { struct bucket *b; const struct min_heap_callbacks bucket_max_cmp_callback = { .less = new_bucket_max_cmp, - .swp = new_bucket_swap, + .swp = NULL, }; const struct min_heap_callbacks bucket_min_cmp_callback = { .less = new_bucket_min_cmp, - .swp = new_bucket_swap, + .swp = NULL, }; ca->heap.nr = 0; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index bd97d8626887..68258a16e125 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1093,14 +1093,6 @@ static inline bool new_btree_iter_cmp(const void *l, const void *r, void __alway return bkey_cmp(_l->k, _r->k) <= 0; } -static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args) -{ - struct btree_iter_set *_iter1 = iter1; - struct btree_iter_set *_iter2 = iter2; - - swap(*_iter1, *_iter2); -} - static inline bool btree_iter_end(struct btree_iter *iter) { return !iter->heap.nr; @@ -1111,7 +1103,7 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, { const struct min_heap_callbacks callbacks = { .less = new_btree_iter_cmp, - .swp = new_btree_iter_swap, + .swp = NULL, }; if (k != end) @@ -1157,7 +1149,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, struct bkey *ret = NULL; const struct min_heap_callbacks callbacks = { .less = cmp, - .swp = new_btree_iter_swap, + .swp = NULL, }; if (!btree_iter_end(iter)) { @@ -1231,7 +1223,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out, : bch_ptr_invalid; const struct min_heap_callbacks callbacks = { .less = b->ops->sort_cmp, - .swp = new_btree_iter_swap, + .swp = NULL, }; /* Heapify the iterator, using our comparison function */ diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index a7221e5dbe81..4b84fda1530a 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -266,20 +266,12 @@ static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_ return !(c ? c > 0 : _l->k < _r->k); } -static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args) -{ - struct btree_iter_set *_iter1 = iter1; - struct btree_iter_set *_iter2 = iter2; - - swap(*_iter1, *_iter2); -} - static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, struct bkey *tmp) { const struct min_heap_callbacks callbacks = { .less = new_bch_extent_sort_cmp, - .swp = new_btree_iter_swap, + .swp = NULL, }; while (iter->heap.nr > 1) { struct btree_iter_set *top = iter->heap.data, *i = top + 1; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7f482729c56d..ef6abf33f926 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -190,14 +190,6 @@ static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *a return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r); } -static void new_bucket_swap(void *l, void *r, void __always_unused *args) -{ - struct bucket **_l = l; - struct bucket **_r = r; - - swap(*_l, *_r); -} - static unsigned int bucket_heap_top(struct cache *ca) { struct bucket *b; @@ -212,7 +204,7 @@ void bch_moving_gc(struct cache_set *c) unsigned long sectors_to_move, reserve_sectors; const struct min_heap_callbacks callbacks = { .less = new_bucket_cmp, - .swp = new_bucket_swap, + .swp = NULL, }; if (!c->copy_gc_enabled) diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c index bca0f39e15b8..b4d1c4329df3 100644 --- a/drivers/md/dm-bio-prison-v1.c +++ b/drivers/md/dm-bio-prison-v1.c @@ -198,15 +198,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, } EXPORT_SYMBOL_GPL(dm_bio_detain); -int dm_get_cell(struct dm_bio_prison *prison, - struct dm_cell_key *key, - struct dm_bio_prison_cell *cell_prealloc, - struct dm_bio_prison_cell **cell_result) -{ - return bio_detain(prison, key, NULL, cell_prealloc, cell_result); -} -EXPORT_SYMBOL_GPL(dm_get_cell); - /* * @inmates must have been initialised prior to this call */ @@ -288,32 +279,6 @@ void dm_cell_visit_release(struct dm_bio_prison *prison, } EXPORT_SYMBOL_GPL(dm_cell_visit_release); -static int __promote_or_release(struct rb_root *root, - struct dm_bio_prison_cell *cell) -{ - if (bio_list_empty(&cell->bios)) { - rb_erase(&cell->node, root); - return 1; - } - - cell->holder = bio_list_pop(&cell->bios); - return 0; -} - -int dm_cell_promote_or_release(struct dm_bio_prison *prison, - struct dm_bio_prison_cell *cell) -{ - int r; - unsigned l = lock_nr(&cell->key, prison->num_locks); - - spin_lock_irq(&prison->regions[l].lock); - r = __promote_or_release(&prison->regions[l].cell, cell); - spin_unlock_irq(&prison->regions[l].lock); - - return r; -} -EXPORT_SYMBOL_GPL(dm_cell_promote_or_release); - /*----------------------------------------------------------------*/ #define DEFERRED_SET_SIZE 64 diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h index 2a097ed0d85e..d39706c48447 100644 --- a/drivers/md/dm-bio-prison-v1.h +++ b/drivers/md/dm-bio-prison-v1.h @@ -73,17 +73,6 @@ void dm_bio_prison_free_cell(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell); /* - * Creates, or retrieves a cell that overlaps the given key. - * - * Returns 1 if pre-existing cell returned, zero if new cell created using - * @cell_prealloc. - */ -int dm_get_cell(struct dm_bio_prison *prison, - struct dm_cell_key *key, - struct dm_bio_prison_cell *cell_prealloc, - struct dm_bio_prison_cell **cell_result); - -/* * Returns false if key is beyond BIO_PRISON_MAX_RANGE or spans a boundary. */ bool dm_cell_key_has_valid_range(struct dm_cell_key *key); @@ -117,19 +106,6 @@ void dm_cell_visit_release(struct dm_bio_prison *prison, void (*visit_fn)(void *, struct dm_bio_prison_cell *), void *context, struct dm_bio_prison_cell *cell); -/* - * Rather than always releasing the prisoners in a cell, the client may - * want to promote one of them to be the new holder. There is a race here - * though between releasing an empty cell, and other threads adding new - * inmates. So this function makes the decision with its lock held. - * - * This function can have two outcomes: - * i) An inmate is promoted to be the holder of the cell (return value of 0). - * ii) The cell has no inmate for promotion and is released (return value of 1). - */ -int dm_cell_promote_or_release(struct dm_bio_prison *prison, - struct dm_bio_prison_cell *cell); - /*----------------------------------------------------------------*/ /* diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 23e0b71b991e..aab8240429b0 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -318,9 +318,10 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con */ enum data_mode { DATA_MODE_SLAB = 0, - DATA_MODE_GET_FREE_PAGES = 1, - DATA_MODE_VMALLOC = 2, - DATA_MODE_LIMIT = 3 + DATA_MODE_KMALLOC = 1, + DATA_MODE_GET_FREE_PAGES = 2, + DATA_MODE_VMALLOC = 3, + DATA_MODE_LIMIT = 4 }; struct dm_buffer { @@ -1062,6 +1063,7 @@ static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; static unsigned long dm_bufio_peak_allocated; static unsigned long dm_bufio_allocated_kmem_cache; +static unsigned long dm_bufio_allocated_kmalloc; static unsigned long dm_bufio_allocated_get_free_pages; static unsigned long dm_bufio_allocated_vmalloc; static unsigned long dm_bufio_current_allocated; @@ -1104,6 +1106,7 @@ static void adjust_total_allocated(struct dm_buffer *b, bool unlink) static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { &dm_bufio_allocated_kmem_cache, + &dm_bufio_allocated_kmalloc, &dm_bufio_allocated_get_free_pages, &dm_bufio_allocated_vmalloc, }; @@ -1181,6 +1184,11 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, return kmem_cache_alloc(c->slab_cache, gfp_mask); } + if (unlikely(c->block_size < PAGE_SIZE)) { + *data_mode = DATA_MODE_KMALLOC; + return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE); + } + if (c->block_size <= KMALLOC_MAX_SIZE && gfp_mask & __GFP_NORETRY) { *data_mode = DATA_MODE_GET_FREE_PAGES; @@ -1204,6 +1212,10 @@ static void free_buffer_data(struct dm_bufio_client *c, kmem_cache_free(c->slab_cache, data); break; + case DATA_MODE_KMALLOC: + kfree(data); + break; + case DATA_MODE_GET_FREE_PAGES: free_pages((unsigned long)data, c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); @@ -2519,8 +2531,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign goto bad_dm_io; } - if (block_size <= KMALLOC_MAX_SIZE && - (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { + if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) { unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE); snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u", @@ -2902,6 +2913,7 @@ static int __init dm_bufio_init(void) __u64 mem; dm_bufio_allocated_kmem_cache = 0; + dm_bufio_allocated_kmalloc = 0; dm_bufio_allocated_get_free_pages = 0; dm_bufio_allocated_vmalloc = 0; dm_bufio_current_allocated = 0; @@ -2990,6 +3002,9 @@ MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444); MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); +module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444); +MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc"); + module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444); MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c index f3051bd7d2df..b4165f172d62 100644 --- a/drivers/md/dm-cache-background-tracker.c +++ b/drivers/md/dm-cache-background-tracker.c @@ -143,12 +143,6 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in } } -unsigned int btracker_nr_writebacks_queued(struct background_tracker *b) -{ - return atomic_read(&b->pending_writebacks); -} -EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued); - unsigned int btracker_nr_demotions_queued(struct background_tracker *b) { return atomic_read(&b->pending_demotes); diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h index 09c8fc59f7bb..47156c14a44a 100644 --- a/drivers/md/dm-cache-background-tracker.h +++ b/drivers/md/dm-cache-background-tracker.h @@ -50,7 +50,6 @@ struct background_tracker *btracker_create(unsigned int max_work); */ void btracker_destroy(struct background_tracker *b); -unsigned int btracker_nr_writebacks_queued(struct background_tracker *b); unsigned int btracker_nr_demotions_queued(struct background_tracker *b); /* diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 24cd87fddf75..a9a1ab284076 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1218,15 +1218,6 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd, return r; } -int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result) -{ - READ_LOCK(cmd); - *result = cmd->cache_blocks; - READ_UNLOCK(cmd); - - return 0; -} - static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock) { int r; @@ -1507,30 +1498,6 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd, return r; } -static int __dump_mapping(void *context, uint64_t cblock, void *leaf) -{ - __le64 value; - dm_oblock_t oblock; - unsigned int flags; - - memcpy(&value, leaf, sizeof(value)); - unpack_value(value, &oblock, &flags); - - return 0; -} - -static int __dump_mappings(struct dm_cache_metadata *cmd) -{ - return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL); -} - -void dm_cache_dump(struct dm_cache_metadata *cmd) -{ - READ_LOCK_VOID(cmd); - __dump_mappings(cmd); - READ_UNLOCK(cmd); -} - int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd) { int r; diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index 57afc7047947..5f77890207fe 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -71,7 +71,6 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd); * origin blocks to map to. */ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size); -int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result); int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, sector_t discard_block_size, @@ -123,8 +122,6 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd, int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd, dm_block_t *result); -void dm_cache_dump(struct dm_cache_metadata *cmd); - /* * The policy is invited to save a 32bit hint value for every cblock (eg, * for a hit count). These are stored against the policy name. If diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f299ff393a6a..d42eac944eb5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1912,7 +1912,7 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user, if ((kernel_params->version[0] != DM_VERSION_MAJOR) || (kernel_params->version[1] > DM_VERSION_MINOR)) { - DMERR("ioctl interface mismatch: kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DMERR_LIMIT("ioctl interface mismatch: kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, kernel_params->version[0], @@ -1961,7 +1961,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern if (unlikely(param_kernel->data_size < minimum_data_size) || unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) { - DMERR("Invalid data size in the ioctl structure: %u", + DMERR_LIMIT("Invalid data size in the ioctl structure: %u", param_kernel->data_size); return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index dbd39b9722b9..bd8b796ae683 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1033,11 +1033,6 @@ struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) return NULL; } -bool dm_table_bio_based(struct dm_table *t) -{ - return __table_type_bio_based(dm_table_get_type(t)); -} - bool dm_table_request_based(struct dm_table *t) { return __table_type_request_based(dm_table_get_type(t)); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 9095f19a84f3..bf0f9dddd146 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2484,6 +2484,7 @@ static void pool_work_wait(struct pool_work *pw, struct pool *pool, init_completion(&pw->complete); queue_work(pool->wq, &pw->worker); wait_for_completion(&pw->complete); + destroy_work_on_stack(&pw->worker); } /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-vdo/Kconfig b/drivers/md/dm-vdo/Kconfig index 111ecd2c2a24..2400b2bc4bc7 100644 --- a/drivers/md/dm-vdo/Kconfig +++ b/drivers/md/dm-vdo/Kconfig @@ -7,6 +7,7 @@ config DM_VDO select DM_BUFIO select LZ4_COMPRESS select LZ4_DECOMPRESS + select MIN_HEAP help This device mapper target presents a block device with deduplication, compression and thin-provisioning. diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index a0a7c1bd634e..89cb7942ec5c 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -209,8 +209,6 @@ static int initialize_info(struct vdo_page_cache *cache) /** * allocate_cache_components() - Allocate components of the cache which require their own * allocation. - * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be - * written out. * * The caller is responsible for all clean up on errors. * diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 0d502f6a86ad..810002747091 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -327,8 +327,9 @@ static u32 __must_check pack_status(struct data_vio_compression_status status) /** * set_data_vio_compression_status() - Set the compression status of a data_vio. - * @state: The expected current status of the data_vio. - * @new_state: The status to set. + * @data_vio: The data_vio to change. + * @status: The expected current status of the data_vio. + * @new_status: The status to set. * * Return: true if the new status was set, false if the data_vio's compression status did not * match the expected state, and so was left unchanged. @@ -836,7 +837,7 @@ static void destroy_data_vio(struct data_vio *data_vio) * @vdo: The vdo to which the pool will belong. * @pool_size: The number of data_vios in the pool. * @discard_limit: The maximum number of data_vios which may be used for discards. - * @pool: A pointer to hold the newly allocated pool. + * @pool_ptr: A pointer to hold the newly allocated pool. */ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr) @@ -1074,35 +1075,6 @@ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios) spin_unlock(&pool->lock); } -data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool) -{ - return READ_ONCE(pool->discard_limiter.busy); -} - -data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool) -{ - return READ_ONCE(pool->discard_limiter.limit); -} - -data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool) -{ - return READ_ONCE(pool->discard_limiter.max_busy); -} - -int set_data_vio_pool_discard_limit(struct data_vio_pool *pool, data_vio_count_t limit) -{ - if (get_data_vio_pool_request_limit(pool) < limit) { - // The discard limit may not be higher than the data_vio limit. - return -EINVAL; - } - - spin_lock(&pool->lock); - pool->discard_limiter.limit = limit; - spin_unlock(&pool->lock); - - return VDO_SUCCESS; -} - data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool) { return READ_ONCE(pool->limiter.busy); diff --git a/drivers/md/dm-vdo/data-vio.h b/drivers/md/dm-vdo/data-vio.h index 25926b6cd98b..067b983bb291 100644 --- a/drivers/md/dm-vdo/data-vio.h +++ b/drivers/md/dm-vdo/data-vio.h @@ -336,11 +336,6 @@ void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *comp void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion); void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios); -data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool); -data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool); -data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool); -int __must_check set_data_vio_pool_discard_limit(struct data_vio_pool *pool, - data_vio_count_t limit); data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool); data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool); data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool); diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 80628ae93fba..b6f8e2dc7729 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -565,7 +565,7 @@ static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio) * @waiter: The data_vio's waiter link. * @context: Not used. */ -static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused) +static void abort_waiter(struct vdo_waiter *waiter, void __always_unused *context) { write_data_vio(vdo_waiter_as_data_vio(waiter)); } @@ -1727,7 +1727,7 @@ static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *dat /** * vdo_continue_hash_lock() - Continue the processing state after writing, compressing, or * deduplicating. - * @data_vio: The data_vio to continue processing in its hash lock. + * @completion: The data_vio completion to continue processing in its hash lock. * * Asynchronously continue processing a data_vio in its hash lock after it has finished writing, * compressing, or deduplicating, so it can share the result with any data_vios waiting in the hash @@ -1825,7 +1825,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio /** * vdo_acquire_hash_lock() - Acquire or share a lock on a record name. - * @data_vio: The data_vio acquiring a lock on its record name. + * @completion: The data_vio completion acquiring a lock on its record name. * * Acquire or share a lock on the hash (record name) of the data in a data_vio, updating the * data_vio to reference the lock. This must only be called in the correct thread for the zone. In @@ -2679,7 +2679,8 @@ static void get_index_statistics(struct hash_zones *zones, /** * vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index. - * @hash_zones: The hash zones to query + * @zones: The hash zones to query + * @stats: A structure to store the statistics * * Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS * index diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index a34ea0229d53..100e92f8f866 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -858,7 +858,7 @@ static int __must_check make_partition(struct layout *layout, enum partition_id /** * vdo_initialize_layout() - Lay out the partitions of a vdo. * @size: The entire size of the vdo. - * @origin: The start of the layout on the underlying storage in blocks. + * @offset: The start of the layout on the underlying storage in blocks. * @block_map_blocks: The size of the block map partition. * @journal_blocks: The size of the journal partition. * @summary_blocks: The size of the slab summary partition. diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c index 627adc24af3b..af8fab83b0f3 100644 --- a/drivers/md/dm-vdo/indexer/index-layout.c +++ b/drivers/md/dm-vdo/indexer/index-layout.c @@ -248,32 +248,6 @@ static int __must_check compute_sizes(const struct uds_configuration *config, return UDS_SUCCESS; } -int uds_compute_index_size(const struct uds_parameters *parameters, u64 *index_size) -{ - int result; - struct uds_configuration *index_config; - struct save_layout_sizes sizes; - - if (index_size == NULL) { - vdo_log_error("Missing output size pointer"); - return -EINVAL; - } - - result = uds_make_configuration(parameters, &index_config); - if (result != UDS_SUCCESS) { - vdo_log_error_strerror(result, "cannot compute index size"); - return uds_status_to_errno(result); - } - - result = compute_sizes(index_config, &sizes); - uds_free_configuration(index_config); - if (result != UDS_SUCCESS) - return uds_status_to_errno(result); - - *index_size = sizes.total_size; - return UDS_SUCCESS; -} - /* Create unique data using the current time and a pseudorandom number. */ static void create_unique_nonce_data(u8 *buffer) { diff --git a/drivers/md/dm-vdo/indexer/indexer.h b/drivers/md/dm-vdo/indexer/indexer.h index 3744aaf625b0..183a94eb7e92 100644 --- a/drivers/md/dm-vdo/indexer/indexer.h +++ b/drivers/md/dm-vdo/indexer/indexer.h @@ -283,10 +283,6 @@ struct uds_request { enum uds_index_region location; }; -/* Compute the number of bytes needed to store an index. */ -int __must_check uds_compute_index_size(const struct uds_parameters *parameters, - u64 *index_size); - /* A session is required for most index operations. */ int __must_check uds_create_index_session(struct uds_index_session **session); diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c index f6fe58e437b3..aeb690415dbd 100644 --- a/drivers/md/dm-vdo/int-map.c +++ b/drivers/md/dm-vdo/int-map.c @@ -70,7 +70,7 @@ * it's crucial to keep the hop fields near the buckets that they use them so they'll tend to share * cache lines. */ -struct __packed bucket { +struct bucket { /** * @first_hop: The biased offset of the first entry in the hop list of the neighborhood * that hashes to this bucket. @@ -82,7 +82,7 @@ struct __packed bucket { u64 key; /** @value: The value stored in this bucket (NULL if empty). */ void *value; -}; +} __packed; /** * struct int_map - The concrete definition of the opaque int_map type. @@ -310,7 +310,6 @@ static struct bucket *select_bucket(const struct int_map *map, u64 key) /** * search_hop_list() - Search the hop list associated with given hash bucket for a given search * key. - * @map: The map being searched. * @bucket: The map bucket to search for the key. * @key: The mapping key. * @previous_ptr: Output. if not NULL, a pointer in which to store the bucket in the list preceding @@ -321,9 +320,7 @@ static struct bucket *select_bucket(const struct int_map *map, u64 key) * * Return: An entry that matches the key, or NULL if not found. */ -static struct bucket *search_hop_list(struct int_map *map __always_unused, - struct bucket *bucket, - u64 key, +static struct bucket *search_hop_list(struct bucket *bucket, u64 key, struct bucket **previous_ptr) { struct bucket *previous = NULL; @@ -357,7 +354,7 @@ static struct bucket *search_hop_list(struct int_map *map __always_unused, */ void *vdo_int_map_get(struct int_map *map, u64 key) { - struct bucket *match = search_hop_list(map, select_bucket(map, key), key, NULL); + struct bucket *match = search_hop_list(select_bucket(map, key), key, NULL); return ((match != NULL) ? match->value : NULL); } @@ -443,7 +440,6 @@ find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_p /** * move_empty_bucket() - Move an empty bucket closer to the start of the bucket array. - * @map: The map containing the bucket. * @hole: The empty bucket to fill with an entry that precedes it in one of its enclosing * neighborhoods. * @@ -454,8 +450,7 @@ find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_p * Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no * entry could be moved. */ -static struct bucket *move_empty_bucket(struct int_map *map __always_unused, - struct bucket *hole) +static struct bucket *move_empty_bucket(struct bucket *hole) { /* * Examine every neighborhood that the empty bucket is part of, starting with the one in @@ -516,7 +511,6 @@ static struct bucket *move_empty_bucket(struct int_map *map __always_unused, /** * update_mapping() - Find and update any existing mapping for a given key, returning the value * associated with the key in the provided pointer. - * @map: The int_map to attempt to modify. * @neighborhood: The first bucket in the neighborhood that would contain the search key * @key: The key with which to associate the new value. * @new_value: The value to be associated with the key. @@ -525,10 +519,10 @@ static struct bucket *move_empty_bucket(struct int_map *map __always_unused, * * Return: true if the map contains a mapping for the key, false if it does not. */ -static bool update_mapping(struct int_map *map, struct bucket *neighborhood, - u64 key, void *new_value, bool update, void **old_value_ptr) +static bool update_mapping(struct bucket *neighborhood, u64 key, void *new_value, + bool update, void **old_value_ptr) { - struct bucket *bucket = search_hop_list(map, neighborhood, key, NULL); + struct bucket *bucket = search_hop_list(neighborhood, key, NULL); if (bucket == NULL) { /* There is no bucket containing the key in the neighborhood. */ @@ -584,7 +578,7 @@ static struct bucket *find_or_make_vacancy(struct int_map *map, * The nearest empty bucket isn't within the neighborhood that must contain the new * entry, so try to swap it with bucket that is closer. */ - hole = move_empty_bucket(map, hole); + hole = move_empty_bucket(hole); } return NULL; @@ -625,7 +619,7 @@ int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update, * Check whether the neighborhood already contains an entry for the key, in which case we * optionally update it, returning the old value. */ - if (update_mapping(map, neighborhood, key, new_value, update, old_value_ptr)) + if (update_mapping(neighborhood, key, new_value, update, old_value_ptr)) return VDO_SUCCESS; /* @@ -679,7 +673,7 @@ void *vdo_int_map_remove(struct int_map *map, u64 key) /* Select the bucket to search and search it for an existing entry. */ struct bucket *bucket = select_bucket(map, key); struct bucket *previous; - struct bucket *victim = search_hop_list(map, bucket, key, &previous); + struct bucket *victim = search_hop_list(bucket, key, &previous); if (victim == NULL) { /* There is no matching entry to remove. */ diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index ab62abe18827..421e5436c32c 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -367,7 +367,7 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical, * completions. * @max_requests_active: Number of bios for merge tracking. * @vdo: The vdo which will use this submitter. - * @io_submitter: pointer to the new data structure. + * @io_submitter_ptr: pointer to the new data structure. * * Return: VDO_SUCCESS or an error. */ diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c index 13008b089206..b0b0587d85f3 100644 --- a/drivers/md/dm-vdo/murmurhash3.c +++ b/drivers/md/dm-vdo/murmurhash3.c @@ -44,14 +44,11 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) u64 *hash_out = out; /* body */ - - const u64 *blocks = (const u64 *)(data); - int i; for (i = 0; i < nblocks; i++) { - u64 k1 = get_unaligned_le64(&blocks[i * 2]); - u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]); + u64 k1 = get_unaligned_le64(&data[i * 16]); + u64 k2 = get_unaligned_le64(&data[i * 16 + 8]); k1 *= c1; k1 = ROTL64(k1, 31); diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index 16cf29b4c90a..f70f5edabc10 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -250,7 +250,6 @@ static void abort_packing(struct data_vio *data_vio) /** * release_compressed_write_waiter() - Update a data_vio for which a successful compressed write * has completed and send it on its way. - * @data_vio: The data_vio to release. * @allocation: The allocation to which the compressed block was written. */ @@ -383,7 +382,7 @@ static void initialize_compressed_block(struct compressed_block *block, u16 size * @compression: The agent's compression_state to pack in to. * @data_vio: The data_vio to pack. * @offset: The offset into the compressed block at which to pack the fragment. - * @compressed_block: The compressed block which will be written out when batch is fully packed. + * @block: The compressed block which will be written out when batch is fully packed. * * Return: The new amount of space used. */ diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index 2fee3a7c1191..a43b5c45fab7 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -517,7 +517,7 @@ static int allocate_and_lock_block(struct allocation *allocation) * @waiter: The allocating_vio that was waiting to allocate. * @context: The context (unused). */ -static void retry_allocation(struct vdo_waiter *waiter, void *context __always_unused) +static void retry_allocation(struct vdo_waiter *waiter, void __always_unused *context) { struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index ee6321a3e523..de58184f538f 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -1365,7 +1365,7 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block) * * Implements waiter_callback_fn. */ -static void write_block(struct vdo_waiter *waiter, void *context __always_unused) +static void write_block(struct vdo_waiter *waiter, void __always_unused *context) { struct recovery_journal_block *block = container_of(waiter, struct recovery_journal_block, write_waiter); diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index ffff2c999518..8c006fb3afcf 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -166,7 +166,7 @@ static void swap_mappings(void *item1, void *item2, void __always_unused *args) static const struct min_heap_callbacks repair_min_heap = { .less = mapping_is_less_than, - .swp = swap_mappings, + .swp = NULL, }; static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair) diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 274f9ccd072f..8f0a35c63af6 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -1287,7 +1287,7 @@ static struct reference_block * __must_check get_reference_block(struct vdo_slab * slab_block_number_from_pbn() - Determine the index within the slab of a particular physical * block number. * @slab: The slab. - * @physical_block_number: The physical block number. + * @pbn: The physical block number. * @slab_block_number_ptr: A pointer to the slab block number. * * Return: VDO_SUCCESS or an error code. @@ -1459,7 +1459,6 @@ static int increment_for_data(struct vdo_slab *slab, struct reference_block *blo * @block_number: The block to update. * @old_status: The reference status of the data block before this decrement. * @updater: The reference updater doing this operation in case we need to look up the pbn lock. - * @lock: The pbn_lock associated with the block being decremented (may be NULL). * @counter_ptr: A pointer to the count for the data block (in, out). * @adjust_block_count: Whether to update the allocator's free block count. * @@ -3232,8 +3231,7 @@ int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator, /** * vdo_modify_reference_count() - Modify the reference count of a block by first making a slab * journal entry and then updating the reference counter. - * - * @data_vio: The data_vio for which to add the entry. + * @completion: The data_vio completion for which to add the entry. * @updater: Which of the data_vio's reference updaters is being submitted. */ void vdo_modify_reference_count(struct vdo_completion *completion, @@ -3301,17 +3299,9 @@ static bool slab_status_is_less_than(const void *item1, const void *item2, return info1->slab_number < info2->slab_number; } -static void swap_slab_statuses(void *item1, void *item2, void __always_unused *args) -{ - struct slab_status *info1 = item1; - struct slab_status *info2 = item2; - - swap(*info1, *info2); -} - static const struct min_heap_callbacks slab_status_min_heap = { .less = slab_status_is_less_than, - .swp = swap_slab_statuses, + .swp = NULL, }; /* Inform the slab actor that a action has finished on some slab; used by apply_to_slabs(). */ @@ -4750,8 +4740,7 @@ void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent) /** * stop_scrubbing() - Tell the scrubber to stop scrubbing after it finishes the slab it is * currently working on. - * @scrubber: The scrubber to stop. - * @parent: The completion to notify when scrubbing has stopped. + * @allocator: The block allocator owning the scrubber to stop. */ static void stop_scrubbing(struct block_allocator *allocator) { diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index fff847767755..a7e32baab4af 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -643,7 +643,7 @@ static void finish_vdo(struct vdo *vdo) /** * free_listeners() - Free the list of read-only listeners associated with a thread. - * @thread_data: The thread holding the list to free. + * @thread: The thread holding the list to free. */ static void free_listeners(struct vdo_thread *thread) { @@ -852,7 +852,7 @@ int vdo_synchronous_flush(struct vdo *vdo) /** * vdo_get_state() - Get the current state of the vdo. * @vdo: The vdo. - + * * Context: This method may be called from any thread. * * Return: The current state of the vdo. diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index b291578f726f..e710f3c5a972 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -202,6 +202,7 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback, if (data == NULL) return VDO_SUCCESS; + bio->bi_ioprio = 0; bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_max_vecs = vio->block_count + 1; len = VDO_BLOCK_SIZE * vio->block_count; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index c142ec5458b7..47d595f6a76e 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -93,7 +93,7 @@ static void dm_bufio_alloc_callback(struct dm_buffer *buf) */ static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector) { - return v->data_start + dm_target_offset(v->ti, bi_sector); + return dm_target_offset(v->ti, bi_sector); } /* @@ -952,7 +952,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev *bdev = v->data_dev->bdev; - if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev)) + if (ti->len != bdev_nr_sectors(v->data_dev->bdev)) return 1; return 0; } @@ -962,7 +962,7 @@ static int verity_iterate_devices(struct dm_target *ti, { struct dm_verity *v = ti->private; - return fn(ti, v->data_dev, v->data_start, ti->len, data); + return fn(ti, v->data_dev, 0, ti->len, data); } static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index c996140bda94..8cbb57862ae1 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -50,7 +50,6 @@ struct dm_verity { unsigned int sig_size; /* root digest signature size */ #endif /* CONFIG_SECURITY */ unsigned int salt_size; - sector_t data_start; /* data offset in 512-byte sectors */ sector_t hash_start; /* hash start in blocks */ sector_t data_blocks; /* the number of data blocks */ sector_t hash_blocks; /* the number of hash blocks */ diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 8156881a31de..deff22ecccbb 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -245,11 +245,6 @@ unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd) return zmd->zone_nr_blocks; } -unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd) -{ - return zmd->zone_nr_blocks_shift; -} - unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd) { return zmd->zone_nr_sectors; @@ -3005,48 +3000,3 @@ void dmz_dtr_metadata(struct dmz_metadata *zmd) dmz_cleanup_metadata(zmd); kfree(zmd); } - -/* - * Check zone information on resume. - */ -int dmz_resume_metadata(struct dmz_metadata *zmd) -{ - struct dm_zone *zone; - sector_t wp_block; - unsigned int i; - int ret; - - /* Check zones */ - for (i = 0; i < zmd->nr_zones; i++) { - zone = dmz_get(zmd, i); - if (!zone) { - dmz_zmd_err(zmd, "Unable to get zone %u", i); - return -EIO; - } - wp_block = zone->wp_block; - - ret = dmz_update_zone(zmd, zone); - if (ret) { - dmz_zmd_err(zmd, "Broken zone %u", i); - return ret; - } - - if (dmz_is_offline(zone)) { - dmz_zmd_warn(zmd, "Zone %u is offline", i); - continue; - } - - /* Check write pointer */ - if (!dmz_is_seq(zone)) - zone->wp_block = 0; - else if (zone->wp_block != wp_block) { - dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)", - i, (u64)zone->wp_block, (u64)wp_block); - zone->wp_block = wp_block; - dmz_invalidate_blocks(zmd, zone, zone->wp_block, - zmd->zone_nr_blocks - zone->wp_block); - } - } - - return 0; -} diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index 265494d3f711..59ba0aaa9531 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -192,7 +192,6 @@ enum { int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev, struct dmz_metadata **zmd, const char *devname); void dmz_dtr_metadata(struct dmz_metadata *zmd); -int dmz_resume_metadata(struct dmz_metadata *zmd); void dmz_lock_map(struct dmz_metadata *zmd); void dmz_unlock_map(struct dmz_metadata *zmd); @@ -230,7 +229,6 @@ unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx); unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx); unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx); unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd); -unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd); unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd); unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 19230404d8c2..12ecf07a3841 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2517,12 +2517,6 @@ void dm_unlock_md_type(struct mapped_device *md) mutex_unlock(&md->type_lock); } -void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) -{ - BUG_ON(!mutex_is_locked(&md->type_lock)); - md->type = type; -} - enum dm_queue_mode dm_get_md_type(struct mapped_device *md) { return md->type; @@ -3349,6 +3343,59 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) kfree(pools); } +struct dm_blkdev_id { + u8 *id; + enum blk_unique_id type; +}; + +static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_blkdev_id *dm_id = data; + const struct block_device_operations *fops = dev->bdev->bd_disk->fops; + + if (!fops->get_unique_id) + return 0; + + return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type); +} + +/* + * Allow access to get_unique_id() for the first device returning a + * non-zero result. Reasonable use expects all devices to have the + * same unique id. + */ +static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id, + enum blk_unique_id type) +{ + struct mapped_device *md = disk->private_data; + struct dm_table *table; + struct dm_target *ti; + int ret = 0, srcu_idx; + + struct dm_blkdev_id dm_id = { + .id = id, + .type = type, + }; + + table = dm_get_live_table(md, &srcu_idx); + if (!table || !dm_table_get_size(table)) + goto out; + + /* We only support devices that have a single target */ + if (table->num_targets != 1) + goto out; + ti = dm_table_get_target(table, 0); + + if (!ti->type->iterate_devices) + goto out; + + ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id); +out: + dm_put_live_table(md, srcu_idx); + return ret; +} + struct dm_pr { u64 old_key; u64 new_key; @@ -3674,6 +3721,7 @@ static const struct block_device_operations dm_blk_dops = { .ioctl = dm_blk_ioctl, .getgeo = dm_blk_getgeo, .report_zones = dm_blk_report_zones, + .get_unique_id = dm_blk_get_unique_id, .pr_ops = &dm_pr_ops, .owner = THIS_MODULE }; @@ -3683,6 +3731,7 @@ static const struct block_device_operations dm_rq_blk_dops = { .release = dm_blk_close, .ioctl = dm_blk_ioctl, .getgeo = dm_blk_getgeo, + .get_unique_id = dm_blk_get_unique_id, .pr_ops = &dm_pr_ops, .owner = THIS_MODULE }; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 8ad782249af8..a0a8ff119815 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -71,12 +71,10 @@ enum dm_queue_mode dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); -bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); -void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type); enum dm_queue_mode dm_get_md_type(struct mapped_device *md); struct target_type *dm_get_immutable_target_type(struct mapped_device *md); diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 3a19124ee279..22a551c407da 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -51,7 +51,7 @@ static int index_check(const struct dm_block_validator *v, block_size - sizeof(__le32), INDEX_CSUM_XOR)); if (csum_disk != mi_le->csum) { - DMERR_LIMIT("i%s failed: csum %u != wanted %u", __func__, + DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__, le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum)); return -EILSEQ; } diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h index a1ec7e84d6fe..40b34f670065 100644 --- a/drivers/message/fusion/mptlan.h +++ b/drivers/message/fusion/mptlan.h @@ -51,10 +51,7 @@ #define LINUX_MPTLAN_H_INCLUDED /*****************************************************************************/ -#if !defined(__GENKSYMS__) #include <linux/module.h> -#endif - #include <linux/netdevice.h> #include <linux/errno.h> // #include <linux/etherdevice.h> diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index a0bcb0864ecd..a798e26c6402 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4231,10 +4231,8 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num, static void mptsas_reprobe_lun(struct scsi_device *sdev, void *data) { - int rc; - sdev->no_uld_attach = data ? 1 : 0; - rc = scsi_device_reprobe(sdev); + WARN_ON(scsi_device_reprobe(sdev)); } static void diff --git a/drivers/mfd/88pm886.c b/drivers/mfd/88pm886.c index dbe9efc027d2..891fdce5d8c1 100644 --- a/drivers/mfd/88pm886.c +++ b/drivers/mfd/88pm886.c @@ -37,6 +37,7 @@ static struct resource pm886_onkey_resources[] = { static struct mfd_cell pm886_devs[] = { MFD_CELL_RES("88pm886-onkey", pm886_onkey_resources), MFD_CELL_NAME("88pm886-regulator"), + MFD_CELL_NAME("88pm886-rtc"), }; static int pm886_power_off_handler(struct sys_off_data *sys_off_data) diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 03c1e4e3eea4..ae23b317a64e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -25,7 +25,7 @@ config MFD_ADP5585 select MFD_CORE select REGMAP_I2C depends on I2C - depends on OF || COMPILE_TEST + depends on OF help Say yes here to add support for the Analog Devices ADP5585 GPIO expander, PWM and keypad controller. This includes the I2C driver and diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c index 8f3ebe651eea..b6b44e2e3198 100644 --- a/drivers/mfd/ab8500-sysctrl.c +++ b/drivers/mfd/ab8500-sysctrl.c @@ -159,7 +159,7 @@ static struct platform_driver ab8500_sysctrl_driver = { .of_match_table = ab8500_sysctrl_match, }, .probe = ab8500_sysctrl_probe, - .remove_new = ab8500_sysctrl_remove, + .remove = ab8500_sysctrl_remove, }; static int __init ab8500_sysctrl_init(void) diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c index b52f7ffdad35..d5df5466eaf5 100644 --- a/drivers/mfd/atmel-flexcom.c +++ b/drivers/mfd/atmel-flexcom.c @@ -95,7 +95,7 @@ static int __maybe_unused atmel_flexcom_resume_noirq(struct device *dev) if (err) return err; - val = FLEX_MR_OPMODE(ddata->opmode), + val = FLEX_MR_OPMODE(ddata->opmode); writel(val, ddata->base + FLEX_MR); clk_disable_unprepare(ddata->clk); diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c index e560066e5885..4628ca14e766 100644 --- a/drivers/mfd/atmel-smc.c +++ b/drivers/mfd/atmel-smc.c @@ -255,8 +255,8 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_apply); /** * atmel_hsmc_cs_conf_apply - apply an SMC CS conf * @regmap: the HSMC regmap - * @cs: the CS id * @layout: the layout of registers + * @cs: the CS id * @conf: the SMC CS conf to apply * * Applies an SMC CS configuration. @@ -296,8 +296,8 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_get); /** * atmel_hsmc_cs_conf_get - retrieve the current SMC CS conf * @regmap: the HSMC regmap - * @cs: the CS id * @layout: the layout of registers + * @cs: the CS id * @conf: the SMC CS conf object to store the current conf * * Retrieve the SMC CS configuration. diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c index 791a0b4cb64b..5c93136f977e 100644 --- a/drivers/mfd/axp20x-i2c.c +++ b/drivers/mfd/axp20x-i2c.c @@ -65,6 +65,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = { { .compatible = "x-powers,axp221", .data = (void *)AXP221_ID }, { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID }, { .compatible = "x-powers,axp313a", .data = (void *)AXP313A_ID }, + { .compatible = "x-powers,axp323", .data = (void *)AXP323_ID }, { .compatible = "x-powers,axp717", .data = (void *)AXP717_ID }, { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID }, { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID }, diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index 4051551757f2..251465a656d0 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -34,20 +34,21 @@ #define AXP806_REG_ADDR_EXT_ADDR_SLAVE_MODE BIT(4) static const char * const axp20x_model_names[] = { - "AXP152", - "AXP192", - "AXP202", - "AXP209", - "AXP221", - "AXP223", - "AXP288", - "AXP313a", - "AXP717", - "AXP803", - "AXP806", - "AXP809", - "AXP813", - "AXP15060", + [AXP152_ID] = "AXP152", + [AXP192_ID] = "AXP192", + [AXP202_ID] = "AXP202", + [AXP209_ID] = "AXP209", + [AXP221_ID] = "AXP221", + [AXP223_ID] = "AXP223", + [AXP288_ID] = "AXP288", + [AXP313A_ID] = "AXP313a", + [AXP323_ID] = "AXP323", + [AXP717_ID] = "AXP717", + [AXP803_ID] = "AXP803", + [AXP806_ID] = "AXP806", + [AXP809_ID] = "AXP809", + [AXP813_ID] = "AXP813", + [AXP15060_ID] = "AXP15060", }; static const struct regmap_range axp152_writeable_ranges[] = { @@ -193,6 +194,10 @@ static const struct regmap_range axp313a_writeable_ranges[] = { regmap_reg_range(AXP313A_ON_INDICATE, AXP313A_IRQ_STATE), }; +static const struct regmap_range axp323_writeable_ranges[] = { + regmap_reg_range(AXP313A_ON_INDICATE, AXP323_DCDC_MODE_CTRL2), +}; + static const struct regmap_range axp313a_volatile_ranges[] = { regmap_reg_range(AXP313A_SHUTDOWN_CTRL, AXP313A_SHUTDOWN_CTRL), regmap_reg_range(AXP313A_IRQ_STATE, AXP313A_IRQ_STATE), @@ -203,6 +208,11 @@ static const struct regmap_access_table axp313a_writeable_table = { .n_yes_ranges = ARRAY_SIZE(axp313a_writeable_ranges), }; +static const struct regmap_access_table axp323_writeable_table = { + .yes_ranges = axp323_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(axp323_writeable_ranges), +}; + static const struct regmap_access_table axp313a_volatile_table = { .yes_ranges = axp313a_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(axp313a_volatile_ranges), @@ -433,6 +443,15 @@ static const struct regmap_config axp313a_regmap_config = { .cache_type = REGCACHE_MAPLE, }; +static const struct regmap_config axp323_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .wr_table = &axp323_writeable_table, + .volatile_table = &axp313a_volatile_table, + .max_register = AXP323_DCDC_MODE_CTRL2, + .cache_type = REGCACHE_MAPLE, +}; + static const struct regmap_config axp717_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -1221,6 +1240,7 @@ static int axp20x_power_off(struct sys_off_data *data) unsigned int shutdown_reg; switch (axp20x->variant) { + case AXP323_ID: case AXP313A_ID: shutdown_reg = AXP313A_SHUTDOWN_CTRL; break; @@ -1289,6 +1309,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x) axp20x->regmap_cfg = &axp313a_regmap_config; axp20x->regmap_irq_chip = &axp313a_regmap_irq_chip; break; + case AXP323_ID: + axp20x->nr_cells = ARRAY_SIZE(axp313a_cells); + axp20x->cells = axp313a_cells; + axp20x->regmap_cfg = &axp323_regmap_config; + axp20x->regmap_irq_chip = &axp313a_regmap_irq_chip; + break; case AXP717_ID: axp20x->nr_cells = ARRAY_SIZE(axp717_cells); axp20x->cells = axp717_cells; @@ -1345,7 +1371,7 @@ int axp20x_match_device(struct axp20x_dev *axp20x) axp20x->regmap_irq_chip = &axp15060_regmap_irq_chip; break; default: - dev_err(dev, "unsupported AXP20X ID %lu\n", axp20x->variant); + dev_err(dev, "unsupported AXP20X ID %u\n", axp20x->variant); return -EINVAL; } @@ -1419,7 +1445,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x) } } - ret = mfd_add_devices(axp20x->dev, -1, axp20x->cells, + ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_AUTO, axp20x->cells, axp20x->nr_cells, NULL, 0, NULL); if (ret) { diff --git a/drivers/mfd/cgbc-core.c b/drivers/mfd/cgbc-core.c index 93004a6b29c1..85283c8dde25 100644 --- a/drivers/mfd/cgbc-core.c +++ b/drivers/mfd/cgbc-core.c @@ -321,9 +321,18 @@ static int cgbc_init_device(struct cgbc_device_data *cgbc) ret = cgbc_get_version(cgbc); if (ret) - return ret; + goto release_session; + + ret = mfd_add_devices(cgbc->dev, -1, cgbc_devs, ARRAY_SIZE(cgbc_devs), + NULL, 0, NULL); + if (ret) + goto release_session; - return mfd_add_devices(cgbc->dev, -1, cgbc_devs, ARRAY_SIZE(cgbc_devs), NULL, 0, NULL); + return 0; + +release_session: + cgbc_session_release(cgbc); + return ret; } static int cgbc_probe(struct platform_device *pdev) @@ -364,7 +373,7 @@ static struct platform_driver cgbc_driver = { .dev_groups = cgbc_groups, }, .probe = cgbc_probe, - .remove_new = cgbc_remove, + .remove = cgbc_remove, }; static const struct dmi_system_id cgbc_dmi_table[] __initconst = { diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index f3dc812b359f..9f84a52b48d6 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -108,6 +108,10 @@ static const struct mfd_cell cros_ec_keyboard_leds_cells[] = { { .name = "cros-keyboard-leds", }, }; +static const struct mfd_cell cros_ec_ucsi_cells[] = { + { .name = "cros_ec_ucsi", }, +}; + static const struct cros_feature_to_cells cros_subdevices[] = { { .id = EC_FEATURE_CEC, @@ -125,9 +129,9 @@ static const struct cros_feature_to_cells cros_subdevices[] = { .num_cells = ARRAY_SIZE(cros_ec_rtc_cells), }, { - .id = EC_FEATURE_USB_PD, - .mfd_cells = cros_usbpd_charger_cells, - .num_cells = ARRAY_SIZE(cros_usbpd_charger_cells), + .id = EC_FEATURE_UCSI_PPM, + .mfd_cells = cros_ec_ucsi_cells, + .num_cells = ARRAY_SIZE(cros_ec_ucsi_cells), }, { .id = EC_FEATURE_HANG_DETECT, @@ -253,6 +257,21 @@ static int ec_device_probe(struct platform_device *pdev) } /* + * UCSI provides power supply information so we don't need to separately + * load the cros_usbpd_charger driver. + */ + if (cros_ec_check_features(ec, EC_FEATURE_USB_PD) && + !cros_ec_check_features(ec, EC_FEATURE_UCSI_PPM)) { + retval = mfd_add_hotplug_devices(ec->dev, + cros_usbpd_charger_cells, + ARRAY_SIZE(cros_usbpd_charger_cells)); + + if (retval) + dev_warn(ec->dev, "failed to add usbpd-charger: %d\n", + retval); + } + + /* * Lightbar is a special case. Newer devices support autodetection, * but older ones do not. */ @@ -346,7 +365,7 @@ static struct platform_driver cros_ec_dev_driver = { }, .id_table = cros_ec_id, .probe = ec_device_probe, - .remove_new = ec_device_remove, + .remove = ec_device_remove, }; static int __init cros_ec_dev_init(void) diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c index ae8fd37afb75..e5f17fc430e4 100644 --- a/drivers/mfd/cs42l43.c +++ b/drivers/mfd/cs42l43.c @@ -967,7 +967,6 @@ static void cs42l43_boot_work(struct work_struct *work) err: pm_runtime_put_sync(cs42l43->dev); - cs42l43_dev_remove(cs42l43); } static int cs42l43_power_up(struct cs42l43 *cs42l43) @@ -1101,6 +1100,8 @@ EXPORT_SYMBOL_NS_GPL(cs42l43_dev_probe, MFD_CS42L43); void cs42l43_dev_remove(struct cs42l43 *cs42l43) { + cancel_work_sync(&cs42l43->boot_work); + cs42l43_power_down(cs42l43); } EXPORT_SYMBOL_NS_GPL(cs42l43_dev_remove, MFD_CS42L43); @@ -1108,16 +1109,39 @@ EXPORT_SYMBOL_NS_GPL(cs42l43_dev_remove, MFD_CS42L43); static int cs42l43_suspend(struct device *dev) { struct cs42l43 *cs42l43 = dev_get_drvdata(dev); + static const struct reg_sequence mask_all[] = { + { CS42L43_DECIM_MASK, 0xFFFFFFFF, }, + { CS42L43_EQ_MIX_MASK, 0xFFFFFFFF, }, + { CS42L43_ASP_MASK, 0xFFFFFFFF, }, + { CS42L43_PLL_MASK, 0xFFFFFFFF, }, + { CS42L43_SOFT_MASK, 0xFFFFFFFF, }, + { CS42L43_SWIRE_MASK, 0xFFFFFFFF, }, + { CS42L43_MSM_MASK, 0xFFFFFFFF, }, + { CS42L43_ACC_DET_MASK, 0xFFFFFFFF, }, + { CS42L43_I2C_TGT_MASK, 0xFFFFFFFF, }, + { CS42L43_SPI_MSTR_MASK, 0xFFFFFFFF, }, + { CS42L43_SW_TO_SPI_BRIDGE_MASK, 0xFFFFFFFF, }, + { CS42L43_OTP_MASK, 0xFFFFFFFF, }, + { CS42L43_CLASS_D_AMP_MASK, 0xFFFFFFFF, }, + { CS42L43_GPIO_INT_MASK, 0xFFFFFFFF, }, + { CS42L43_ASRC_MASK, 0xFFFFFFFF, }, + { CS42L43_HPOUT_MASK, 0xFFFFFFFF, }, + }; int ret; - /* - * Don't care about being resumed here, but the driver does want - * force_resume to always trigger an actual resume, so that register - * state for the MCU/GPIOs is returned as soon as possible after system - * resume. force_resume will resume if the reference count is resumed on - * suspend hence the get_noresume. - */ - pm_runtime_get_noresume(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) { + dev_err(cs42l43->dev, "Failed to resume for suspend: %d\n", ret); + return ret; + } + + /* The IRQs will be re-enabled on resume by the cache sync */ + ret = regmap_multi_reg_write_bypassed(cs42l43->regmap, + mask_all, ARRAY_SIZE(mask_all)); + if (ret) { + dev_err(cs42l43->dev, "Failed to mask IRQs: %d\n", ret); + return ret; + } ret = pm_runtime_force_suspend(dev); if (ret) { @@ -1132,6 +1156,26 @@ static int cs42l43_suspend(struct device *dev) if (ret) return ret; + disable_irq(cs42l43->irq); + + return 0; +} + +static int cs42l43_suspend_noirq(struct device *dev) +{ + struct cs42l43 *cs42l43 = dev_get_drvdata(dev); + + enable_irq(cs42l43->irq); + + return 0; +} + +static int cs42l43_resume_noirq(struct device *dev) +{ + struct cs42l43 *cs42l43 = dev_get_drvdata(dev); + + disable_irq(cs42l43->irq); + return 0; } @@ -1144,6 +1188,8 @@ static int cs42l43_resume(struct device *dev) if (ret) return ret; + enable_irq(cs42l43->irq); + ret = pm_runtime_force_resume(dev); if (ret) { dev_err(cs42l43->dev, "Failed to force resume: %d\n", ret); @@ -1211,6 +1257,7 @@ err: EXPORT_NS_GPL_DEV_PM_OPS(cs42l43_pm_ops, MFD_CS42L43) = { SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume) + NOIRQ_SYSTEM_SLEEP_PM_OPS(cs42l43_suspend_noirq, cs42l43_resume_noirq) RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL) }; diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c index be5f2b34e18a..80fc5c0cac2f 100644 --- a/drivers/mfd/da9052-spi.c +++ b/drivers/mfd/da9052-spi.c @@ -37,7 +37,7 @@ static int da9052_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, da9052); config = da9052_regmap_config; - config.read_flag_mask = 1; + config.write_flag_mask = 1; config.reg_bits = 7; config.pad_bits = 1; config.val_bits = 8; diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c index e58990c85ed8..6a585173230b 100644 --- a/drivers/mfd/exynos-lpass.c +++ b/drivers/mfd/exynos-lpass.c @@ -179,13 +179,13 @@ static const struct of_device_id exynos_lpass_of_match[] = { MODULE_DEVICE_TABLE(of, exynos_lpass_of_match); static struct platform_driver exynos_lpass_driver = { - .driver = { + .driver = { .name = "exynos-lpass", .pm = &lpass_pm_ops, .of_match_table = exynos_lpass_of_match, }, .probe = exynos_lpass_probe, - .remove_new = exynos_lpass_remove, + .remove = exynos_lpass_remove, }; module_platform_driver(exynos_lpass_driver); diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index 2e4ab2404154..6fe388da6fb6 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -211,7 +211,7 @@ static struct platform_driver mx25_tsadc_driver = { .of_match_table = mx25_tsadc_ids, }, .probe = mx25_tsadc_probe, - .remove_new = mx25_tsadc_remove, + .remove = mx25_tsadc_remove, }; module_platform_driver(mx25_tsadc_driver); diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c index 5f61909c85e9..3b4ffcbbee20 100644 --- a/drivers/mfd/hi655x-pmic.c +++ b/drivers/mfd/hi655x-pmic.c @@ -159,12 +159,12 @@ static const struct of_device_id hi655x_pmic_match[] = { MODULE_DEVICE_TABLE(of, hi655x_pmic_match); static struct platform_driver hi655x_pmic_driver = { - .driver = { - .name = "hi655x-pmic", + .driver = { + .name = "hi655x-pmic", .of_match_table = hi655x_pmic_match, }, - .probe = hi655x_pmic_probe, - .remove_new = hi655x_pmic_remove, + .probe = hi655x_pmic_probe, + .remove = hi655x_pmic_remove, }; module_platform_driver(hi655x_pmic_driver); diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c index 2a83f8678f1d..557061856e58 100644 --- a/drivers/mfd/intel-lpss-acpi.c +++ b/drivers/mfd/intel-lpss-acpi.c @@ -208,7 +208,7 @@ static void intel_lpss_acpi_remove(struct platform_device *pdev) static struct platform_driver intel_lpss_acpi_driver = { .probe = intel_lpss_acpi_probe, - .remove_new = intel_lpss_acpi_remove, + .remove = intel_lpss_acpi_remove, .driver = { .name = "intel-lpss", .acpi_match_table = intel_lpss_acpi_ids, diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index ccd76800d8e4..9d89171d83f9 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c @@ -6,16 +6,27 @@ */ #include <linux/acpi.h> +#include <linux/array_size.h> #include <linux/bits.h> #include <linux/delay.h> +#include <linux/device.h> #include <linux/err.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> #include <linux/interrupt.h> -#include <linux/kernel.h> +#include <linux/ioport.h> +#include <linux/kstrtox.h> #include <linux/mfd/core.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/mfd/intel_soc_pmic_bxtwc.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_data/x86/intel_scu_ipc.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/regmap.h> +#include <linux/sysfs.h> +#include <linux/types.h> /* PMIC device registers */ #define REG_ADDR_MASK GENMASK(15, 8) @@ -148,6 +159,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip = { static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { .name = "bxtwc_irq_chip_pwrbtn", + .domain_suffix = "PWRBTN", .status_base = BXTWC_PWRBTNIRQ, .mask_base = BXTWC_MPWRBTNIRQ, .irqs = bxtwc_regmap_irqs_pwrbtn, @@ -157,6 +169,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { .name = "bxtwc_irq_chip_tmu", + .domain_suffix = "TMU", .status_base = BXTWC_TMUIRQ, .mask_base = BXTWC_MTMUIRQ, .irqs = bxtwc_regmap_irqs_tmu, @@ -166,6 +179,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = { .name = "bxtwc_irq_chip_bcu", + .domain_suffix = "BCU", .status_base = BXTWC_BCUIRQ, .mask_base = BXTWC_MBCUIRQ, .irqs = bxtwc_regmap_irqs_bcu, @@ -175,6 +189,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = { static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = { .name = "bxtwc_irq_chip_adc", + .domain_suffix = "ADC", .status_base = BXTWC_ADCIRQ, .mask_base = BXTWC_MADCIRQ, .irqs = bxtwc_regmap_irqs_adc, @@ -184,6 +199,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = { static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = { .name = "bxtwc_irq_chip_chgr", + .domain_suffix = "CHGR", .status_base = BXTWC_CHGR0IRQ, .mask_base = BXTWC_MCHGR0IRQ, .irqs = bxtwc_regmap_irqs_chgr, @@ -193,6 +209,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = { static const struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = { .name = "bxtwc_irq_chip_crit", + .domain_suffix = "CRIT", .status_base = BXTWC_CRITIRQ, .mask_base = BXTWC_MCRITIRQ, .irqs = bxtwc_regmap_irqs_crit, @@ -231,43 +248,54 @@ static const struct resource tmu_resources[] = { static struct mfd_cell bxt_wc_dev[] = { { - .name = "bxt_wcove_gpadc", - .num_resources = ARRAY_SIZE(adc_resources), - .resources = adc_resources, - }, - { .name = "bxt_wcove_thermal", .num_resources = ARRAY_SIZE(thermal_resources), .resources = thermal_resources, }, { - .name = "bxt_wcove_usbc", - .num_resources = ARRAY_SIZE(usbc_resources), - .resources = usbc_resources, + .name = "bxt_wcove_gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = gpio_resources, }, { - .name = "bxt_wcove_ext_charger", - .num_resources = ARRAY_SIZE(charger_resources), - .resources = charger_resources, + .name = "bxt_wcove_region", }, +}; + +static const struct mfd_cell bxt_wc_tmu_dev[] = { + { + .name = "bxt_wcove_tmu", + .num_resources = ARRAY_SIZE(tmu_resources), + .resources = tmu_resources, + }, +}; + +static const struct mfd_cell bxt_wc_bcu_dev[] = { { .name = "bxt_wcove_bcu", .num_resources = ARRAY_SIZE(bcu_resources), .resources = bcu_resources, }, +}; + +static const struct mfd_cell bxt_wc_adc_dev[] = { { - .name = "bxt_wcove_tmu", - .num_resources = ARRAY_SIZE(tmu_resources), - .resources = tmu_resources, + .name = "bxt_wcove_gpadc", + .num_resources = ARRAY_SIZE(adc_resources), + .resources = adc_resources, }, +}; +static struct mfd_cell bxt_wc_chgr_dev[] = { { - .name = "bxt_wcove_gpio", - .num_resources = ARRAY_SIZE(gpio_resources), - .resources = gpio_resources, + .name = "bxt_wcove_usbc", + .num_resources = ARRAY_SIZE(usbc_resources), + .resources = usbc_resources, }, { - .name = "bxt_wcove_region", + .name = "bxt_wcove_ext_charger", + .num_resources = ARRAY_SIZE(charger_resources), + .resources = charger_resources, }, }; @@ -347,6 +375,7 @@ static ssize_t addr_store(struct device *dev, return count; } +static DEVICE_ATTR_ADMIN_RW(addr); static ssize_t val_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -383,23 +412,14 @@ static ssize_t val_store(struct device *dev, } return count; } - -static DEVICE_ATTR_ADMIN_RW(addr); static DEVICE_ATTR_ADMIN_RW(val); + static struct attribute *bxtwc_attrs[] = { &dev_attr_addr.attr, &dev_attr_val.attr, NULL }; - -static const struct attribute_group bxtwc_group = { - .attrs = bxtwc_attrs, -}; - -static const struct attribute_group *bxtwc_groups[] = { - &bxtwc_group, - NULL -}; +ATTRIBUTE_GROUPS(bxtwc); static const struct regmap_config bxtwc_regmap_config = { .reg_bits = 16, @@ -414,15 +434,39 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { - int irq; + struct device *dev = pmic->dev; + int irq, ret; irq = regmap_irq_get_virq(pdata, pirq); if (irq < 0) - return dev_err_probe(pmic->dev, irq, "Failed to get parent vIRQ(%d) for chip %s\n", + return dev_err_probe(dev, irq, "Failed to get parent vIRQ(%d) for chip %s\n", pirq, chip->name); - return devm_regmap_add_irq_chip(pmic->dev, pmic->regmap, irq, irq_flags, - 0, chip, data); + ret = devm_regmap_add_irq_chip(dev, pmic->regmap, irq, irq_flags, 0, chip, data); + if (ret) + return dev_err_probe(dev, ret, "Failed to add %s IRQ chip\n", chip->name); + + return 0; +} + +static int bxtwc_add_chained_devices(struct intel_soc_pmic *pmic, + const struct mfd_cell *cells, int n_devs, + struct regmap_irq_chip_data *pdata, + int pirq, int irq_flags, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + struct device *dev = pmic->dev; + struct irq_domain *domain; + int ret; + + ret = bxtwc_add_chained_irq_chip(pmic, pdata, pirq, irq_flags, chip, data); + if (ret) + return ret; + + domain = regmap_irq_get_domain(*data); + + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, cells, n_devs, NULL, 0, domain); } static int bxtwc_probe(struct platform_device *pdev) @@ -466,48 +510,49 @@ static int bxtwc_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Failed to add IRQ chip\n"); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_tmu_dev, ARRAY_SIZE(bxt_wc_tmu_dev), + pmic->irq_chip_data, + BXTWC_TMU_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_tmu, + &pmic->irq_chip_data_tmu); + if (ret) + return ret; + ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, BXTWC_PWRBTN_LVL1_IRQ, IRQF_ONESHOT, &bxtwc_regmap_irq_chip_pwrbtn, &pmic->irq_chip_data_pwrbtn); if (ret) - return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n"); - - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_TMU_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_tmu, - &pmic->irq_chip_data_tmu); - if (ret) - return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n"); + return ret; - /* Add chained IRQ handler for BCU IRQs */ - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_BCU_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_bcu, - &pmic->irq_chip_data_bcu); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_bcu_dev, ARRAY_SIZE(bxt_wc_bcu_dev), + pmic->irq_chip_data, + BXTWC_BCU_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_bcu, + &pmic->irq_chip_data_bcu); if (ret) - return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n"); + return ret; - /* Add chained IRQ handler for ADC IRQs */ - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_ADC_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_adc, - &pmic->irq_chip_data_adc); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_adc_dev, ARRAY_SIZE(bxt_wc_adc_dev), + pmic->irq_chip_data, + BXTWC_ADC_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_adc, + &pmic->irq_chip_data_adc); if (ret) - return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n"); + return ret; - /* Add chained IRQ handler for CHGR IRQs */ - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_CHGR_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_chgr, - &pmic->irq_chip_data_chgr); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_chgr_dev, ARRAY_SIZE(bxt_wc_chgr_dev), + pmic->irq_chip_data, + BXTWC_CHGR_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_chgr, + &pmic->irq_chip_data_chgr); if (ret) - return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n"); + return ret; /* Add chained IRQ handler for CRIT IRQs */ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, @@ -516,7 +561,7 @@ static int bxtwc_probe(struct platform_device *pdev) &bxtwc_regmap_irq_chip_crit, &pmic->irq_chip_data_crit); if (ret) - return dev_err_probe(dev, ret, "Failed to add CRIT IRQ chip\n"); + return ret; ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, bxt_wc_dev, ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL); @@ -571,7 +616,7 @@ static struct platform_driver bxtwc_driver = { .probe = bxtwc_probe, .shutdown = bxtwc_shutdown, .driver = { - .name = "BXTWC PMIC", + .name = "intel_soc_pmic_bxtwc", .pm = pm_sleep_ptr(&bxtwc_pm_ops), .acpi_match_table = bxtwc_acpi_ids, .dev_groups = bxtwc_groups, diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c index 2a83f540d4c9..aa71a7d83fcd 100644 --- a/drivers/mfd/intel_soc_pmic_chtwc.c +++ b/drivers/mfd/intel_soc_pmic_chtwc.c @@ -267,7 +267,7 @@ static const struct acpi_device_id cht_wc_acpi_ids[] = { static struct i2c_driver cht_wc_driver = { .driver = { - .name = "CHT Whiskey Cove PMIC", + .name = "intel_soc_pmic_chtwc", .pm = pm_sleep_ptr(&cht_wc_pm_ops), .acpi_match_table = cht_wc_acpi_ids, }, diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c index 876d017f74fe..879fbf5cd162 100644 --- a/drivers/mfd/intel_soc_pmic_crc.c +++ b/drivers/mfd/intel_soc_pmic_crc.c @@ -259,12 +259,19 @@ static const struct acpi_device_id crystal_cove_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, crystal_cove_acpi_match); +static const struct i2c_device_id crystal_cove_i2c_match[] = { + { "intel_soc_pmic_crc" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, crystal_cove_i2c_match); + static struct i2c_driver crystal_cove_i2c_driver = { .driver = { - .name = "crystal_cove_i2c", + .name = "intel_soc_pmic_crc", .pm = pm_sleep_ptr(&crystal_cove_pm_ops), .acpi_match_table = crystal_cove_acpi_match, }, + .id_table = crystal_cove_i2c_match, .probe = crystal_cove_i2c_probe, .remove = crystal_cove_i2c_remove, .shutdown = crystal_cove_shutdown, diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c index c964ea6539aa..2370b44e2214 100644 --- a/drivers/mfd/ipaq-micro.c +++ b/drivers/mfd/ipaq-micro.c @@ -130,6 +130,7 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data) default: dev_err(micro->dev, "unknown msg %d [%d] %*ph\n", id, len, len, data); + break; } spin_unlock(µ->lock); } diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index 8a332852bf97..c5bfb6440a93 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -486,7 +486,7 @@ static struct platform_driver kempld_driver = { .dev_groups = pld_groups, }, .probe = kempld_probe, - .remove_new = kempld_remove, + .remove = kempld_remove, }; static const struct dmi_system_id kempld_dmi_table[] __initconst = { diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c index 3883e472b739..228c4a2f4c1f 100644 --- a/drivers/mfd/mcp-sa11x0.c +++ b/drivers/mfd/mcp-sa11x0.c @@ -286,7 +286,7 @@ static const struct dev_pm_ops mcp_sa11x0_pm_ops = { static struct platform_driver mcp_sa11x0_driver = { .probe = mcp_sa11x0_probe, - .remove_new = mcp_sa11x0_remove, + .remove = mcp_sa11x0_remove, .driver = { .name = DRIVER_NAME, .pm = pm_sleep_ptr(&mcp_sa11x0_pm_ops), diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c index c2939e785818..0e5d59ae064a 100644 --- a/drivers/mfd/mt6397-core.c +++ b/drivers/mfd/mt6397-core.c @@ -13,12 +13,14 @@ #include <linux/regmap.h> #include <linux/mfd/core.h> #include <linux/mfd/mt6323/core.h> +#include <linux/mfd/mt6328/core.h> #include <linux/mfd/mt6331/core.h> #include <linux/mfd/mt6357/core.h> #include <linux/mfd/mt6358/core.h> #include <linux/mfd/mt6359/core.h> #include <linux/mfd/mt6397/core.h> #include <linux/mfd/mt6323/registers.h> +#include <linux/mfd/mt6328/registers.h> #include <linux/mfd/mt6331/registers.h> #include <linux/mfd/mt6357/registers.h> #include <linux/mfd/mt6358/registers.h> @@ -87,6 +89,13 @@ static const struct resource mt6323_keys_resources[] = { DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"), }; +static const struct resource mt6328_keys_resources[] = { + DEFINE_RES_IRQ_NAMED(MT6328_IRQ_STATUS_PWRKEY, "powerkey"), + DEFINE_RES_IRQ_NAMED(MT6328_IRQ_STATUS_HOMEKEY, "homekey"), + DEFINE_RES_IRQ_NAMED(MT6328_IRQ_STATUS_PWRKEY_R, "powerkey_r"), + DEFINE_RES_IRQ_NAMED(MT6328_IRQ_STATUS_HOMEKEY_R, "homekey_r"), +}; + static const struct resource mt6357_keys_resources[] = { DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY, "powerkey"), DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY, "homekey"), @@ -133,6 +142,18 @@ static const struct mfd_cell mt6323_devs[] = { }, }; +static const struct mfd_cell mt6328_devs[] = { + { + .name = "mt6328-regulator", + .of_compatible = "mediatek,mt6328-regulator" + }, { + .name = "mtk-pmic-keys", + .num_resources = ARRAY_SIZE(mt6328_keys_resources), + .resources = mt6328_keys_resources, + .of_compatible = "mediatek,mt6328-keys" + }, +}; + static const struct mfd_cell mt6357_devs[] = { { .name = "mt6359-auxadc", @@ -262,6 +283,14 @@ static const struct chip_data mt6323_core = { .irq_init = mt6397_irq_init, }; +static const struct chip_data mt6328_core = { + .cid_addr = MT6328_HWCID, + .cid_shift = 0, + .cells = mt6328_devs, + .cell_size = ARRAY_SIZE(mt6328_devs), + .irq_init = mt6397_irq_init, +}; + static const struct chip_data mt6357_core = { .cid_addr = MT6357_SWCID, .cid_shift = 8, @@ -361,6 +390,9 @@ static const struct of_device_id mt6397_of_match[] = { .compatible = "mediatek,mt6323", .data = &mt6323_core, }, { + .compatible = "mediatek,mt6328", + .data = &mt6328_core, + }, { .compatible = "mediatek,mt6331", .data = &mt6331_mt6332_core, }, { diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c index 886745b5b607..1310665200ed 100644 --- a/drivers/mfd/mt6397-irq.c +++ b/drivers/mfd/mt6397-irq.c @@ -11,6 +11,8 @@ #include <linux/suspend.h> #include <linux/mfd/mt6323/core.h> #include <linux/mfd/mt6323/registers.h> +#include <linux/mfd/mt6328/core.h> +#include <linux/mfd/mt6328/registers.h> #include <linux/mfd/mt6331/core.h> #include <linux/mfd/mt6331/registers.h> #include <linux/mfd/mt6397/core.h> @@ -31,6 +33,9 @@ static void mt6397_irq_sync_unlock(struct irq_data *data) mt6397->irq_masks_cur[0]); regmap_write(mt6397->regmap, mt6397->int_con[1], mt6397->irq_masks_cur[1]); + if (mt6397->int_con[2]) + regmap_write(mt6397->regmap, mt6397->int_con[2], + mt6397->irq_masks_cur[2]); mutex_unlock(&mt6397->irqlock); } @@ -105,6 +110,8 @@ static irqreturn_t mt6397_irq_thread(int irq, void *data) mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0); mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16); + if (mt6397->int_status[2]) + mt6397_irq_handle_reg(mt6397, mt6397->int_status[2], 32); return IRQ_HANDLED; } @@ -138,6 +145,9 @@ static int mt6397_irq_pm_notifier(struct notifier_block *notifier, chip->int_con[0], chip->wake_mask[0]); regmap_write(chip->regmap, chip->int_con[1], chip->wake_mask[1]); + if (chip->int_con[2]) + regmap_write(chip->regmap, + chip->int_con[2], chip->wake_mask[2]); enable_irq_wake(chip->irq); break; @@ -146,6 +156,9 @@ static int mt6397_irq_pm_notifier(struct notifier_block *notifier, chip->int_con[0], chip->irq_masks_cur[0]); regmap_write(chip->regmap, chip->int_con[1], chip->irq_masks_cur[1]); + if (chip->int_con[2]) + regmap_write(chip->regmap, + chip->int_con[2], chip->irq_masks_cur[2]); disable_irq_wake(chip->irq); break; @@ -169,6 +182,14 @@ int mt6397_irq_init(struct mt6397_chip *chip) chip->int_status[0] = MT6323_INT_STATUS0; chip->int_status[1] = MT6323_INT_STATUS1; break; + case MT6328_CHIP_ID: + chip->int_con[0] = MT6328_INT_CON0; + chip->int_con[1] = MT6328_INT_CON1; + chip->int_con[2] = MT6328_INT_CON2; + chip->int_status[0] = MT6328_INT_STATUS0; + chip->int_status[1] = MT6328_INT_STATUS1; + chip->int_status[2] = MT6328_INT_STATUS2; + break; case MT6331_CHIP_ID: chip->int_con[0] = MT6331_INT_CON0; chip->int_con[1] = MT6331_INT_CON1; @@ -191,6 +212,8 @@ int mt6397_irq_init(struct mt6397_chip *chip) /* Mask all interrupt sources */ regmap_write(chip->regmap, chip->int_con[0], 0x0); regmap_write(chip->regmap, chip->int_con[1], 0x0); + if (chip->int_con[2]) + regmap_write(chip->regmap, chip->int_con[2], 0x0); chip->pm_nb.notifier_call = mt6397_irq_pm_notifier; chip->irq_domain = irq_domain_add_linear(chip->dev->of_node, diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c index b2ebb5433121..64afc7631790 100644 --- a/drivers/mfd/mxs-lradc.c +++ b/drivers/mfd/mxs-lradc.c @@ -243,7 +243,7 @@ static struct platform_driver mxs_lradc_driver = { .of_match_table = mxs_lradc_dt_ids, }, .probe = mxs_lradc_probe, - .remove_new = mxs_lradc_remove, + .remove = mxs_lradc_remove, }; module_platform_driver(mxs_lradc_driver); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 6de7ba752345..a77b6fc790f2 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -843,7 +843,7 @@ static struct platform_driver usbhs_omap_driver = { .of_match_table = usbhs_omap_dt_ids, }, .probe = usbhs_omap_probe, - .remove_new = usbhs_omap_remove, + .remove = usbhs_omap_remove, }; MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>"); diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 5f25ac514ff2..0f7fdb99c809 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -301,7 +301,7 @@ static struct platform_driver usbtll_omap_driver = { .of_match_table = usbtll_omap_dt_ids, }, .probe = usbtll_omap_probe, - .remove_new = usbtll_omap_remove, + .remove = usbtll_omap_remove, }; int omap_tll_init(struct usbhs_omap_platform_data *pdata) diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c index ab55906f91f9..1fbba0e666d5 100644 --- a/drivers/mfd/pcf50633-adc.c +++ b/drivers/mfd/pcf50633-adc.c @@ -243,7 +243,7 @@ static struct platform_driver pcf50633_adc_driver = { .name = "pcf50633-adc", }, .probe = pcf50633_adc_probe, - .remove_new = pcf50633_adc_remove, + .remove = pcf50633_adc_remove, }; module_platform_driver(pcf50633_adc_driver); diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c index 8b6285f687da..f9ebdf5845b8 100644 --- a/drivers/mfd/qcom-pm8xxx.c +++ b/drivers/mfd/qcom-pm8xxx.c @@ -595,7 +595,7 @@ static void pm8xxx_remove(struct platform_device *pdev) static struct platform_driver pm8xxx_driver = { .probe = pm8xxx_probe, - .remove_new = pm8xxx_remove, + .remove = pm8xxx_remove, .driver = { .name = "pm8xxx-core", .of_match_table = pm8xxx_id_table, diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c index 39ab114ea669..71c2b80a4678 100644 --- a/drivers/mfd/rk8xx-core.c +++ b/drivers/mfd/rk8xx-core.c @@ -618,7 +618,7 @@ static int rk808_power_off(struct sys_off_data *data) bit = DEV_OFF; break; case RK808_ID: - reg = RK808_DEVCTRL_REG, + reg = RK808_DEVCTRL_REG; bit = DEV_OFF_RST; break; case RK809_ID: @@ -785,8 +785,8 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap if (ret) return dev_err_probe(dev, ret, "failed to add MFD devices\n"); - if (device_property_read_bool(dev, "rockchip,system-power-controller") || - device_property_read_bool(dev, "system-power-controller")) { + if (device_property_read_bool(dev, "system-power-controller") || + device_property_read_bool(dev, "rockchip,system-power-controller")) { ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE, SYS_OFF_PRIO_HIGH, &rk808_power_off, rk808); diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c index 39f7514aa3d8..738d8b3b9ffe 100644 --- a/drivers/mfd/rohm-bd71828.c +++ b/drivers/mfd/rohm-bd71828.c @@ -32,15 +32,15 @@ static struct gpio_keys_platform_data bd71828_powerkey_data = { }; static const struct resource bd71815_rtc_irqs[] = { - DEFINE_RES_IRQ_NAMED(BD71815_INT_RTC0, "bd71815-rtc-alm-0"), - DEFINE_RES_IRQ_NAMED(BD71815_INT_RTC1, "bd71815-rtc-alm-1"), - DEFINE_RES_IRQ_NAMED(BD71815_INT_RTC2, "bd71815-rtc-alm-2"), + DEFINE_RES_IRQ_NAMED(BD71815_INT_RTC0, "bd70528-rtc-alm-0"), + DEFINE_RES_IRQ_NAMED(BD71815_INT_RTC1, "bd70528-rtc-alm-1"), + DEFINE_RES_IRQ_NAMED(BD71815_INT_RTC2, "bd70528-rtc-alm-2"), }; static const struct resource bd71828_rtc_irqs[] = { - DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC0, "bd71828-rtc-alm-0"), - DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC1, "bd71828-rtc-alm-1"), - DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC2, "bd71828-rtc-alm-2"), + DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC0, "bd70528-rtc-alm-0"), + DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC1, "bd70528-rtc-alm-1"), + DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC2, "bd70528-rtc-alm-2"), }; static struct resource bd71815_power_irqs[] = { diff --git a/drivers/mfd/rohm-bd96801.c b/drivers/mfd/rohm-bd96801.c index 714f08ed544b..60ec8db790a7 100644 --- a/drivers/mfd/rohm-bd96801.c +++ b/drivers/mfd/rohm-bd96801.c @@ -5,13 +5,9 @@ * ROHM BD96801 PMIC driver * * This version of the "BD86801 scalable PMIC"'s driver supports only very - * basic set of the PMIC features. Most notably, there is no support for - * the ERRB interrupt and the configurations which should be done when the - * PMIC is in STBY mode. - * - * Supporting the ERRB interrupt would require dropping the regmap-IRQ - * usage or working around (or accepting a presense of) a naming conflict - * in debugFS IRQs. + * basic set of the PMIC features. + * Most notably, there is no support for the configurations which should + * be done when the PMIC is in STBY mode. * * Being able to reliably do the configurations like changing the * regulator safety limits (like limits for the over/under -voltages, over @@ -23,16 +19,14 @@ * be the need to configure these safety limits. Hence it's not simple to * come up with a generic solution. * - * Users who require the ERRB handling and STBY state configurations can - * have a look at the original RFC: + * Users who require the STBY state configurations can have a look at the + * original RFC: * https://lore.kernel.org/all/cover.1712920132.git.mazziesaccount@gmail.com/ - * which implements a workaround to debugFS naming conflict and some of - * the safety limit configurations - but leaves the state change handling - * and synchronization to be implemented. + * which implements some of the safety limit configurations - but leaves the + * state change handling and synchronization to be implemented. * * It would be great to hear (and receive a patch!) if you implement the - * STBY configuration support or a proper fix to the debugFS naming - * conflict in your downstream driver ;) + * STBY configuration support or a proper fix in your downstream driver ;) */ #include <linux/i2c.h> @@ -46,6 +40,64 @@ #include <linux/mfd/rohm-bd96801.h> #include <linux/mfd/rohm-generic.h> +static const struct resource regulator_errb_irqs[] = { + DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "bd96801-otp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "bd96801-dbist-err"), + DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "bd96801-eep-err"), + DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "bd96801-abist-err"), + DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "bd96801-prstb-err"), + DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "bd96801-drmoserr1"), + DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "bd96801-drmoserr2"), + DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "bd96801-slave-err"), + DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "bd96801-vref-err"), + DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "bd96801-tsd"), + DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "bd96801-uvlo-err"), + DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "bd96801-ovlo-err"), + DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "bd96801-osc-err"), + DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "bd96801-pon-err"), + DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "bd96801-poff-err"), + DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "bd96801-cmd-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_INT_PRSTB_WDT_ERR, "bd96801-prstb-wdt-err"), + DEFINE_RES_IRQ_NAMED(BD96801_INT_CHIP_IF_ERR, "bd96801-chip-if-err"), + DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "bd96801-int-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "bd96801-buck1-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "bd96801-buck1-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "bd96801-buck1-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "bd96801-buck1-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "bd96801-buck2-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "bd96801-buck2-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "bd96801-buck2-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "bd96801-buck2-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "bd96801-buck3-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "bd96801-buck3-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "bd96801-buck3-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "bd96801-buck3-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "bd96801-buck4-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "bd96801-buck4-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "bd96801-buck4-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "bd96801-buck4-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "bd96801-ldo5-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "bd96801-ldo5-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "bd96801-ldo5-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "bd96801-ldo5-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "bd96801-ldo6-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "bd96801-ldo6-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "bd96801-ldo6-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "bd96801-ldo6-shdn-err"), + + DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "bd96801-ldo7-pvin-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "bd96801-ldo7-ovp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "bd96801-ldo7-uvp-err"), + DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "bd96801-ldo7-shdn-err"), +}; + static const struct resource regulator_intb_irqs[] = { DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "bd96801-core-thermal"), @@ -90,20 +142,14 @@ static const struct resource regulator_intb_irqs[] = { DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "bd96801-ldo7-undervolt"), }; -static const struct resource wdg_intb_irqs[] = { - DEFINE_RES_IRQ_NAMED(BD96801_WDT_ERR_STAT, "bd96801-wdg"), +enum { + WDG_CELL = 0, + REGULATOR_CELL, }; static struct mfd_cell bd96801_cells[] = { - { - .name = "bd96801-wdt", - .resources = wdg_intb_irqs, - .num_resources = ARRAY_SIZE(wdg_intb_irqs), - }, { - .name = "bd96801-regulator", - .resources = regulator_intb_irqs, - .num_resources = ARRAY_SIZE(regulator_intb_irqs), - }, + [WDG_CELL] = { .name = "bd96801-wdt", }, + [REGULATOR_CELL] = { .name = "bd96801-regulator", }, }; static const struct regmap_range bd96801_volatile_ranges[] = { @@ -128,6 +174,91 @@ static const struct regmap_access_table volatile_regs = { .n_yes_ranges = ARRAY_SIZE(bd96801_volatile_ranges), }; +/* + * For ERRB we need main register bit mapping as bit(0) indicates active IRQ + * in one of the first 3 sub IRQ registers, For INTB we can use default 1 to 1 + * mapping. + */ +static unsigned int bit0_offsets[] = {0, 1, 2}; /* System stat, 3 registers */ +static unsigned int bit1_offsets[] = {3}; /* Buck 1 stat */ +static unsigned int bit2_offsets[] = {4}; /* Buck 2 stat */ +static unsigned int bit3_offsets[] = {5}; /* Buck 3 stat */ +static unsigned int bit4_offsets[] = {6}; /* Buck 4 stat */ +static unsigned int bit5_offsets[] = {7}; /* LDO 5 stat */ +static unsigned int bit6_offsets[] = {8}; /* LDO 6 stat */ +static unsigned int bit7_offsets[] = {9}; /* LDO 7 stat */ + +static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = { + REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit3_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit4_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit5_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit6_offsets), + REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets), +}; + +static const struct regmap_irq bd96801_errb_irqs[] = { + /* Reg 0x52 Fatal ERRB1 */ + REGMAP_IRQ_REG(BD96801_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_DBIST_ERR_STAT, 0, BD96801_DBIST_ERR_MASK), + REGMAP_IRQ_REG(BD96801_EEP_ERR_STAT, 0, BD96801_EEP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_ABIST_ERR_STAT, 0, BD96801_ABIST_ERR_MASK), + REGMAP_IRQ_REG(BD96801_PRSTB_ERR_STAT, 0, BD96801_PRSTB_ERR_MASK), + REGMAP_IRQ_REG(BD96801_DRMOS1_ERR_STAT, 0, BD96801_DRMOS1_ERR_MASK), + REGMAP_IRQ_REG(BD96801_DRMOS2_ERR_STAT, 0, BD96801_DRMOS2_ERR_MASK), + REGMAP_IRQ_REG(BD96801_SLAVE_ERR_STAT, 0, BD96801_SLAVE_ERR_MASK), + /* 0x53 Fatal ERRB2 */ + REGMAP_IRQ_REG(BD96801_VREF_ERR_STAT, 1, BD96801_VREF_ERR_MASK), + REGMAP_IRQ_REG(BD96801_TSD_ERR_STAT, 1, BD96801_TSD_ERR_MASK), + REGMAP_IRQ_REG(BD96801_UVLO_ERR_STAT, 1, BD96801_UVLO_ERR_MASK), + REGMAP_IRQ_REG(BD96801_OVLO_ERR_STAT, 1, BD96801_OVLO_ERR_MASK), + REGMAP_IRQ_REG(BD96801_OSC_ERR_STAT, 1, BD96801_OSC_ERR_MASK), + REGMAP_IRQ_REG(BD96801_PON_ERR_STAT, 1, BD96801_PON_ERR_MASK), + REGMAP_IRQ_REG(BD96801_POFF_ERR_STAT, 1, BD96801_POFF_ERR_MASK), + REGMAP_IRQ_REG(BD96801_CMD_SHDN_ERR_STAT, 1, BD96801_CMD_SHDN_ERR_MASK), + /* 0x54 Fatal INTB shadowed to ERRB */ + REGMAP_IRQ_REG(BD96801_INT_PRSTB_WDT_ERR, 2, BD96801_INT_PRSTB_WDT_ERR_MASK), + REGMAP_IRQ_REG(BD96801_INT_CHIP_IF_ERR, 2, BD96801_INT_CHIP_IF_ERR_MASK), + REGMAP_IRQ_REG(BD96801_INT_SHDN_ERR_STAT, 2, BD96801_INT_SHDN_ERR_MASK), + /* Reg 0x55 BUCK1 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_BUCK1_PVIN_ERR_STAT, 3, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK1_OVP_ERR_STAT, 3, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK1_UVP_ERR_STAT, 3, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK1_SHDN_ERR_STAT, 3, BD96801_OUT_SHDN_ERR_MASK), + /* Reg 0x56 BUCK2 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_BUCK2_PVIN_ERR_STAT, 4, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK2_OVP_ERR_STAT, 4, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK2_UVP_ERR_STAT, 4, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK2_SHDN_ERR_STAT, 4, BD96801_OUT_SHDN_ERR_MASK), + /* Reg 0x57 BUCK3 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_BUCK3_PVIN_ERR_STAT, 5, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK3_OVP_ERR_STAT, 5, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK3_UVP_ERR_STAT, 5, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK3_SHDN_ERR_STAT, 5, BD96801_OUT_SHDN_ERR_MASK), + /* Reg 0x58 BUCK4 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_BUCK4_PVIN_ERR_STAT, 6, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK4_OVP_ERR_STAT, 6, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK4_UVP_ERR_STAT, 6, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_BUCK4_SHDN_ERR_STAT, 6, BD96801_OUT_SHDN_ERR_MASK), + /* Reg 0x59 LDO5 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_LDO5_PVIN_ERR_STAT, 7, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO5_OVP_ERR_STAT, 7, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO5_UVP_ERR_STAT, 7, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO5_SHDN_ERR_STAT, 7, BD96801_OUT_SHDN_ERR_MASK), + /* Reg 0x5a LDO6 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_LDO6_PVIN_ERR_STAT, 8, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO6_OVP_ERR_STAT, 8, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO6_UVP_ERR_STAT, 8, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO6_SHDN_ERR_STAT, 8, BD96801_OUT_SHDN_ERR_MASK), + /* Reg 0x5b LDO7 ERR IRQs */ + REGMAP_IRQ_REG(BD96801_LDO7_PVIN_ERR_STAT, 9, BD96801_OUT_PVIN_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO7_OVP_ERR_STAT, 9, BD96801_OUT_OVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO7_UVP_ERR_STAT, 9, BD96801_OUT_UVP_ERR_MASK), + REGMAP_IRQ_REG(BD96801_LDO7_SHDN_ERR_STAT, 9, BD96801_OUT_SHDN_ERR_MASK), +}; + static const struct regmap_irq bd96801_intb_irqs[] = { /* STATUS SYSTEM INTB */ REGMAP_IRQ_REG(BD96801_TW_STAT, 0, BD96801_TW_STAT_MASK), @@ -176,8 +307,25 @@ static const struct regmap_irq bd96801_intb_irqs[] = { REGMAP_IRQ_REG(BD96801_LDO7_UVD_STAT, 7, BD96801_LDO_UVD_STAT_MASK), }; -static struct regmap_irq_chip bd96801_irq_chip_intb = { +static const struct regmap_irq_chip bd96801_irq_chip_errb = { + .name = "bd96801-irq-errb", + .domain_suffix = "errb", + .main_status = BD96801_REG_INT_MAIN, + .num_main_regs = 1, + .irqs = &bd96801_errb_irqs[0], + .num_irqs = ARRAY_SIZE(bd96801_errb_irqs), + .status_base = BD96801_REG_INT_SYS_ERRB1, + .mask_base = BD96801_REG_MASK_SYS_ERRB, + .ack_base = BD96801_REG_INT_SYS_ERRB1, + .init_ack_masked = true, + .num_regs = 10, + .irq_reg_stride = 1, + .sub_reg_offsets = &errb_sub_irq_offsets[0], +}; + +static const struct regmap_irq_chip bd96801_irq_chip_intb = { .name = "bd96801-irq-intb", + .domain_suffix = "intb", .main_status = BD96801_REG_INT_MAIN, .num_main_regs = 1, .irqs = &bd96801_intb_irqs[0], @@ -194,16 +342,20 @@ static const struct regmap_config bd96801_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_table = &volatile_regs, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, }; static int bd96801_i2c_probe(struct i2c_client *i2c) { - struct regmap_irq_chip_data *intb_irq_data; + struct regmap_irq_chip_data *intb_irq_data, *errb_irq_data; + struct irq_domain *intb_domain, *errb_domain; const struct fwnode_handle *fwnode; - struct irq_domain *intb_domain; + struct resource *regulator_res; + struct resource wdg_irq; struct regmap *regmap; - int ret, intb_irq; + int intb_irq, errb_irq, num_intb, num_errb = 0; + int num_regu_irqs, wdg_irq_no; + int i, ret; fwnode = dev_fwnode(&i2c->dev); if (!fwnode) @@ -213,6 +365,23 @@ static int bd96801_i2c_probe(struct i2c_client *i2c) if (intb_irq < 0) return dev_err_probe(&i2c->dev, intb_irq, "INTB IRQ not configured\n"); + num_intb = ARRAY_SIZE(regulator_intb_irqs); + + /* ERRB may be omitted if processor is powered by the PMIC */ + errb_irq = fwnode_irq_get_byname(fwnode, "errb"); + if (errb_irq < 0) + errb_irq = 0; + + if (errb_irq) + num_errb = ARRAY_SIZE(regulator_errb_irqs); + + num_regu_irqs = num_intb + num_errb; + + regulator_res = devm_kcalloc(&i2c->dev, num_regu_irqs, + sizeof(*regulator_res), GFP_KERNEL); + if (!regulator_res) + return -ENOMEM; + regmap = devm_regmap_init_i2c(i2c, &bd96801_regmap_config); if (IS_ERR(regmap)) return dev_err_probe(&i2c->dev, PTR_ERR(regmap), @@ -230,12 +399,50 @@ static int bd96801_i2c_probe(struct i2c_client *i2c) intb_domain = regmap_irq_get_domain(intb_irq_data); - ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, - bd96801_cells, - ARRAY_SIZE(bd96801_cells), NULL, 0, - intb_domain); + /* + * MFD core code is built to handle only one IRQ domain. BD96801 + * has two domains so we do IRQ mapping here and provide the + * already mapped IRQ numbers to sub-devices. + */ + for (i = 0; i < num_intb; i++) { + struct resource *res = ®ulator_res[i]; + + *res = regulator_intb_irqs[i]; + res->start = res->end = irq_create_mapping(intb_domain, + res->start); + } + + wdg_irq_no = irq_create_mapping(intb_domain, BD96801_WDT_ERR_STAT); + wdg_irq = DEFINE_RES_IRQ_NAMED(wdg_irq_no, "bd96801-wdg"); + bd96801_cells[WDG_CELL].resources = &wdg_irq; + bd96801_cells[WDG_CELL].num_resources = 1; + + if (!num_errb) + goto skip_errb; + + ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, errb_irq, IRQF_ONESHOT, + 0, &bd96801_irq_chip_errb, &errb_irq_data); + if (ret) + return dev_err_probe(&i2c->dev, ret, + "Failed to add ERRB IRQ chip\n"); + + errb_domain = regmap_irq_get_domain(errb_irq_data); + + for (i = 0; i < num_errb; i++) { + struct resource *res = ®ulator_res[num_intb + i]; + + *res = regulator_errb_irqs[i]; + res->start = res->end = irq_create_mapping(errb_domain, res->start); + } + +skip_errb: + bd96801_cells[REGULATOR_CELL].resources = regulator_res; + bd96801_cells[REGULATOR_CELL].num_resources = num_regu_irqs; + + ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, bd96801_cells, + ARRAY_SIZE(bd96801_cells), NULL, 0, NULL); if (ret) - dev_err(&i2c->dev, "Failed to create subdevices\n"); + dev_err_probe(&i2c->dev, ret, "Failed to create subdevices\n"); return ret; } diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c index 7e23ab3d5842..84ebc96f58e4 100644 --- a/drivers/mfd/rt5033.c +++ b/drivers/mfd/rt5033.c @@ -81,8 +81,8 @@ static int rt5033_i2c_probe(struct i2c_client *i2c) chip_rev = dev_id & RT5033_CHIP_REV_MASK; dev_info(&i2c->dev, "Device found (rev. %d)\n", chip_rev); - ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + ret = devm_regmap_add_irq_chip(rt5033->dev, rt5033->regmap, + rt5033->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0, &rt5033_irq_chip, &rt5033->irq_data); if (ret) { dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index a6b0d7300b2d..cdfe738e1d76 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -34,6 +34,10 @@ static const struct mfd_cell s5m8767_devs[] = { }, }; +static const struct mfd_cell s2dos05_devs[] = { + { .name = "s2dos05-regulator", }, +}; + static const struct mfd_cell s2mps11_devs[] = { { .name = "s2mps11-regulator", }, { .name = "s2mps14-rtc", }, @@ -84,6 +88,9 @@ static const struct of_device_id sec_dt_match[] = { .compatible = "samsung,s5m8767-pmic", .data = (void *)S5M8767X, }, { + .compatible = "samsung,s2dos05", + .data = (void *)S2DOS05, + }, { .compatible = "samsung,s2mps11-pmic", .data = (void *)S2MPS11X, }, { @@ -339,6 +346,10 @@ static int sec_pmic_probe(struct i2c_client *i2c) sec_devs = s5m8767_devs; num_sec_devs = ARRAY_SIZE(s5m8767_devs); break; + case S2DOS05: + sec_devs = s2dos05_devs; + num_sec_devs = ARRAY_SIZE(s2dos05_devs); + break; case S2MPA01: sec_devs = s2mpa01_devs; num_sec_devs = ARRAY_SIZE(s2mpa01_devs); diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index b3592982a83b..0469e85d72cf 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1705,7 +1705,7 @@ static struct platform_driver sm501_plat_driver = { .of_match_table = of_sm501_match_tbl, }, .probe = sm501_plat_probe, - .remove_new = sm501_plat_remove, + .remove = sm501_plat_remove, .suspend = pm_sleep_ptr(sm501_plat_suspend), .resume = pm_sleep_ptr(sm501_plat_resume), }; diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c index 9fd13d88950c..650724e19b88 100644 --- a/drivers/mfd/stm32-timers.c +++ b/drivers/mfd/stm32-timers.c @@ -326,7 +326,7 @@ MODULE_DEVICE_TABLE(of, stm32_timers_of_match); static struct platform_driver stm32_timers_driver = { .probe = stm32_timers_probe, - .remove_new = stm32_timers_remove, + .remove = stm32_timers_remove, .driver = { .name = "stm32-timers", .of_match_table = stm32_timers_of_match, diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 2ce15f60eb10..3e1d699ba934 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -108,6 +108,8 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res) syscon_config.reg_stride = reg_io_width; syscon_config.val_bits = reg_io_width * 8; syscon_config.max_register = resource_size(&res) - reg_io_width; + if (!syscon_config.max_register) + syscon_config.max_register_is_0 = true; regmap = regmap_init_mmio(NULL, base, &syscon_config); kfree(syscon_config.name); @@ -357,6 +359,9 @@ static int syscon_probe(struct platform_device *pdev) return -ENOMEM; syscon_config.max_register = resource_size(res) - 4; + if (!syscon_config.max_register) + syscon_config.max_register_is_0 = true; + if (pdata) syscon_config.name = pdata->label; syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config); diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 0c1364d88469..068c25401c6c 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -377,7 +377,7 @@ static struct platform_driver ti_tscadc_driver = { .of_match_table = ti_tscadc_dt_ids, }, .probe = ti_tscadc_probe, - .remove_new = ti_tscadc_remove, + .remove = ti_tscadc_remove, }; diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index 2b9105295f30..710364435b6b 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -544,17 +544,13 @@ static int tps65010_probe(struct i2c_client *client) */ if (client->irq > 0) { status = request_irq(client->irq, tps65010_irq, - IRQF_TRIGGER_FALLING, DRIVER_NAME, tps); + IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN, + DRIVER_NAME, tps); if (status < 0) { dev_dbg(&client->dev, "can't get IRQ %d, err %d\n", client->irq, status); return status; } - /* annoying race here, ideally we'd have an option - * to claim the irq now and enable it later. - * FIXME genirq IRQF_NOAUTOEN now solves that ... - */ - disable_irq(client->irq); set_bit(FLAG_IRQ_ENABLE, &tps->flags); } else dev_warn(&client->dev, "IRQ not configured!\n"); diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c index f206a9c50e9d..7098712ea008 100644 --- a/drivers/mfd/tps65911-comparator.c +++ b/drivers/mfd/tps65911-comparator.c @@ -154,7 +154,7 @@ static struct platform_driver tps65911_comparator_driver = { .name = "tps65911-comparator", }, .probe = tps65911_comparator_probe, - .remove_new = tps65911_comparator_remove, + .remove = tps65911_comparator_remove, }; static int __init tps65911_comparator_init(void) diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c index fac02875fe7d..1cba3b67b0fb 100644 --- a/drivers/mfd/tqmx86.c +++ b/drivers/mfd/tqmx86.c @@ -35,11 +35,14 @@ #define TQMX86_REG_BOARD_ID_E39C2 7 #define TQMX86_REG_BOARD_ID_70EB 8 #define TQMX86_REG_BOARD_ID_80UC 9 +#define TQMX86_REG_BOARD_ID_120UC 10 #define TQMX86_REG_BOARD_ID_110EB 11 #define TQMX86_REG_BOARD_ID_E40M 12 #define TQMX86_REG_BOARD_ID_E40S 13 #define TQMX86_REG_BOARD_ID_E40C1 14 #define TQMX86_REG_BOARD_ID_E40C2 15 +#define TQMX86_REG_BOARD_ID_130UC 16 +#define TQMX86_REG_BOARD_ID_E41S 19 #define TQMX86_REG_BOARD_REV 0x01 #define TQMX86_REG_IO_EXT_INT 0x06 #define TQMX86_REG_IO_EXT_INT_NONE 0 @@ -47,6 +50,7 @@ #define TQMX86_REG_IO_EXT_INT_9 2 #define TQMX86_REG_IO_EXT_INT_12 3 #define TQMX86_REG_IO_EXT_INT_MASK 0x3 +#define TQMX86_REG_IO_EXT_INT_I2C1_SHIFT 0 #define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT 4 #define TQMX86_REG_SAUC 0x17 @@ -55,23 +59,36 @@ static uint gpio_irq; module_param(gpio_irq, uint, 0); -MODULE_PARM_DESC(gpio_irq, "GPIO IRQ number (7, 9, 12)"); +MODULE_PARM_DESC(gpio_irq, "GPIO IRQ number (valid parameters: 7, 9, 12)"); -static const struct resource tqmx_i2c_soft_resources[] = { - DEFINE_RES_IO(TQMX86_IOBASE_I2C, TQMX86_IOSIZE_I2C), +static uint i2c1_irq; +module_param(i2c1_irq, uint, 0); +MODULE_PARM_DESC(i2c1_irq, "I2C1 IRQ number (valid parameters: 7, 9, 12)"); + +enum tqmx86_i2c1_resource_type { + TQMX86_I2C1_IO, + TQMX86_I2C1_IRQ, +}; + +static struct resource tqmx_i2c_soft_resources[] = { + [TQMX86_I2C1_IO] = DEFINE_RES_IO(TQMX86_IOBASE_I2C, TQMX86_IOSIZE_I2C), + /* Placeholder for IRQ resource */ + [TQMX86_I2C1_IRQ] = {}, }; static const struct resource tqmx_watchdog_resources[] = { DEFINE_RES_IO(TQMX86_IOBASE_WATCHDOG, TQMX86_IOSIZE_WATCHDOG), }; -/* - * The IRQ resource must be first, since it is updated with the - * configured IRQ in the probe function. - */ +enum tqmx86_gpio_resource_type { + TQMX86_GPIO_IO, + TQMX86_GPIO_IRQ, +}; + static struct resource tqmx_gpio_resources[] = { - DEFINE_RES_IRQ(0), - DEFINE_RES_IO(TQMX86_IOBASE_GPIO, TQMX86_IOSIZE_GPIO), + [TQMX86_GPIO_IO] = DEFINE_RES_IO(TQMX86_IOBASE_GPIO, TQMX86_IOSIZE_GPIO), + /* Placeholder for IRQ resource */ + [TQMX86_GPIO_IRQ] = {}, }; static struct i2c_board_info tqmx86_i2c_devices[] = { @@ -132,6 +149,8 @@ static const char *tqmx86_board_id_to_name(u8 board_id, u8 sauc) return "TQMx70EB"; case TQMX86_REG_BOARD_ID_80UC: return "TQMx80UC"; + case TQMX86_REG_BOARD_ID_120UC: + return "TQMx120UC"; case TQMX86_REG_BOARD_ID_110EB: return "TQMx110EB"; case TQMX86_REG_BOARD_ID_E40M: @@ -142,6 +161,10 @@ static const char *tqmx86_board_id_to_name(u8 board_id, u8 sauc) return "TQMxE40C1"; case TQMX86_REG_BOARD_ID_E40C2: return "TQMxE40C2"; + case TQMX86_REG_BOARD_ID_130UC: + return "TQMx130UC"; + case TQMX86_REG_BOARD_ID_E41S: + return "TQMxE41S"; default: return "Unknown"; } @@ -154,11 +177,14 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id) case TQMX86_REG_BOARD_ID_60EB: case TQMX86_REG_BOARD_ID_70EB: case TQMX86_REG_BOARD_ID_80UC: + case TQMX86_REG_BOARD_ID_120UC: case TQMX86_REG_BOARD_ID_110EB: case TQMX86_REG_BOARD_ID_E40M: case TQMX86_REG_BOARD_ID_E40S: case TQMX86_REG_BOARD_ID_E40C1: case TQMX86_REG_BOARD_ID_E40C2: + case TQMX86_REG_BOARD_ID_130UC: + case TQMX86_REG_BOARD_ID_E41S: return 24000; case TQMX86_REG_BOARD_ID_E39MS: case TQMX86_REG_BOARD_ID_E39C1: @@ -174,33 +200,52 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id) } } -static int tqmx86_probe(struct platform_device *pdev) +static int tqmx86_setup_irq(struct device *dev, const char *label, u8 irq, + void __iomem *io_base, u8 reg_shift) { - u8 board_id, sauc, rev, i2c_det, io_ext_int_val; - struct device *dev = &pdev->dev; - u8 gpio_irq_cfg, readback; - const char *board_name; - void __iomem *io_base; - int err; + u8 val, readback; + int irq_cfg; - switch (gpio_irq) { + switch (irq) { case 0: - gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_NONE; + irq_cfg = TQMX86_REG_IO_EXT_INT_NONE; break; case 7: - gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_7; + irq_cfg = TQMX86_REG_IO_EXT_INT_7; break; case 9: - gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_9; + irq_cfg = TQMX86_REG_IO_EXT_INT_9; break; case 12: - gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_12; + irq_cfg = TQMX86_REG_IO_EXT_INT_12; break; default: - pr_err("tqmx86: Invalid GPIO IRQ (%d)\n", gpio_irq); + dev_err(dev, "invalid %s IRQ (%d)\n", label, irq); return -EINVAL; } + val = ioread8(io_base + TQMX86_REG_IO_EXT_INT); + val &= ~(TQMX86_REG_IO_EXT_INT_MASK << reg_shift); + val |= (irq_cfg & TQMX86_REG_IO_EXT_INT_MASK) << reg_shift; + + iowrite8(val, io_base + TQMX86_REG_IO_EXT_INT); + readback = ioread8(io_base + TQMX86_REG_IO_EXT_INT); + if (readback != val) { + dev_warn(dev, "%s interrupts not supported\n", label); + return -EINVAL; + } + + return 0; +} + +static int tqmx86_probe(struct platform_device *pdev) +{ + u8 board_id, sauc, rev, i2c_det; + struct device *dev = &pdev->dev; + const char *board_name; + void __iomem *io_base; + int err; + io_base = devm_ioport_map(dev, TQMX86_IOBASE, TQMX86_IOSIZE); if (!io_base) return -ENOMEM; @@ -221,25 +266,23 @@ static int tqmx86_probe(struct platform_device *pdev) */ i2c_det = inb(TQMX86_REG_I2C_DETECT); - if (gpio_irq_cfg) { - io_ext_int_val = - gpio_irq_cfg << TQMX86_REG_IO_EXT_INT_GPIO_SHIFT; - iowrite8(io_ext_int_val, io_base + TQMX86_REG_IO_EXT_INT); - readback = ioread8(io_base + TQMX86_REG_IO_EXT_INT); - if (readback != io_ext_int_val) { - dev_warn(dev, "GPIO interrupts not supported.\n"); - return -EINVAL; - } - - /* Assumes the IRQ resource is first. */ - tqmx_gpio_resources[0].start = gpio_irq; - } else { - tqmx_gpio_resources[0].flags = 0; + if (gpio_irq) { + err = tqmx86_setup_irq(dev, "GPIO", gpio_irq, io_base, + TQMX86_REG_IO_EXT_INT_GPIO_SHIFT); + if (!err) + tqmx_gpio_resources[TQMX86_GPIO_IRQ] = DEFINE_RES_IRQ(gpio_irq); } ocores_platform_data.clock_khz = tqmx86_board_id_to_clk_rate(dev, board_id); if (i2c_det == TQMX86_REG_I2C_DETECT_SOFT) { + if (i2c1_irq) { + err = tqmx86_setup_irq(dev, "I2C1", i2c1_irq, io_base, + TQMX86_REG_IO_EXT_INT_I2C1_SHIFT); + if (!err) + tqmx_i2c_soft_resources[TQMX86_I2C1_IRQ] = DEFINE_RES_IRQ(i2c1_irq); + } + err = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, tqmx86_i2c_soft_dev, ARRAY_SIZE(tqmx86_i2c_soft_dev), diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index c130ffef182f..f89eda4a17fe 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -711,6 +711,10 @@ static struct of_dev_auxdata twl_auxdata_lookup[] = { { /* sentinel */ }, }; +static const struct mfd_cell twl6030_cells[] = { + { .name = "twl6030-clk" }, +}; + static const struct mfd_cell twl6032_cells[] = { { .name = "twl6032-clk" }, }; @@ -861,17 +865,23 @@ twl_probe(struct i2c_client *client) TWL4030_DCDC_GLOBAL_CFG); } - if (id->driver_data == (TWL6030_CLASS | TWL6032_SUBCLASS)) { - status = devm_mfd_add_devices(&client->dev, - PLATFORM_DEVID_NONE, - twl6032_cells, - ARRAY_SIZE(twl6032_cells), - NULL, 0, NULL); + if (twl_class_is_6030()) { + const struct mfd_cell *cells; + int num_cells; + + if (id->driver_data & TWL6032_SUBCLASS) { + cells = twl6032_cells; + num_cells = ARRAY_SIZE(twl6032_cells); + } else { + cells = twl6030_cells; + num_cells = ARRAY_SIZE(twl6030_cells); + } + + status = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE, + cells, num_cells, NULL, 0, NULL); if (status < 0) goto free; - } - if (twl_class_is_6030()) { if (of_device_is_system_power_controller(node)) { if (!pm_power_off) pm_power_off = twl6030_power_off; diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index d436ddf661da..a4a550bafb3c 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c @@ -276,7 +276,7 @@ static struct platform_driver twl4030_audio_driver = { .of_match_table = twl4030_audio_of_match, }, .probe = twl4030_audio_probe, - .remove_new = twl4030_audio_remove, + .remove = twl4030_audio_remove, }; module_platform_driver(twl4030_audio_driver); diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c index fcd182d51981..3c3080e8c8cf 100644 --- a/drivers/mfd/wcd934x.c +++ b/drivers/mfd/wcd934x.c @@ -284,6 +284,7 @@ static const struct slim_device_id wcd934x_slim_id[] = { SLIM_DEV_IDX_WCD9340, SLIM_DEV_INSTANCE_ID_WCD9340 }, {} }; +MODULE_DEVICE_TABLE(slim, wcd934x_slim_id); static struct slim_driver wcd934x_slim_driver = { .driver = { @@ -298,5 +299,4 @@ static struct slim_driver wcd934x_slim_driver = { module_slim_driver(wcd934x_slim_driver); MODULE_DESCRIPTION("WCD934X slim driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("slim:217:250:*"); MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index ca872e3465ed..0a7c7f29406c 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -207,6 +207,8 @@ AT24_CHIP_DATA(at24_data_24cs64, 16, AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY); AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16); +/* M24256E Additional Write lockable page (M24256E-F order codes) */ +AT24_CHIP_DATA(at24_data_24256e_wlp, 64, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA_BS(at24_data_24c1025, 1048576 / 8, AT24_FLAG_ADDR16, 2); @@ -240,6 +242,7 @@ static const struct i2c_device_id at24_ids[] = { { "24cs64", (kernel_ulong_t)&at24_data_24cs64 }, { "24c128", (kernel_ulong_t)&at24_data_24c128 }, { "24c256", (kernel_ulong_t)&at24_data_24c256 }, + { "24256e-wl", (kernel_ulong_t)&at24_data_24256e_wlp }, { "24c512", (kernel_ulong_t)&at24_data_24c512 }, { "24c1024", (kernel_ulong_t)&at24_data_24c1024 }, { "24c1025", (kernel_ulong_t)&at24_data_24c1025 }, @@ -278,6 +281,7 @@ static const struct of_device_id __maybe_unused at24_of_match[] = { { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 }, { .compatible = "microchip,24aa025e48", .data = &at24_data_24aa025e48 }, { .compatible = "microchip,24aa025e64", .data = &at24_data_24aa025e64 }, + { .compatible = "st,24256e-wl", .data = &at24_data_24256e_wlp }, { /* END OF LIST */ }, }; MODULE_DEVICE_TABLE(of, at24_of_match); diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 62ba01525479..376047beea3d 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -445,7 +445,7 @@ static void lkdtm_FAM_BOUNDS(void) pr_err("FAIL: survived access of invalid flexible array member index!\n"); - if (!__has_attribute(__counted_by__)) + if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY)) pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n", lkdtm_kernel_info); else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 9f2223d3e8e1..7c91429a670b 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1779,10 +1779,8 @@ static int __xipram do_write_oneword_retry(struct map_info *map, map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ - if (++retry_cnt <= MAX_RETRIES) { - ret = 0; + if (++retry_cnt <= MAX_RETRIES) goto retry; - } } xip_enable(map, chip, adr); diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c index 74f559bf8dfb..2edc46741774 100644 --- a/drivers/mtd/devices/bcm47xxsflash.c +++ b/drivers/mtd/devices/bcm47xxsflash.c @@ -367,7 +367,7 @@ static void bcm47xxsflash_bcma_remove(struct platform_device *pdev) static struct platform_driver bcma_sflash_driver = { .probe = bcm47xxsflash_bcma_probe, - .remove_new = bcm47xxsflash_bcma_remove, + .remove = bcm47xxsflash_bcma_remove, .driver = { .name = "bcma_sflash", }, diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index a2b643af7019..c93769c233d9 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -2075,7 +2075,7 @@ static struct platform_driver g3_driver = { }, .suspend = docg3_suspend, .resume = docg3_resume, - .remove_new = docg3_release, + .remove = docg3_release, }; module_platform_driver_probe(g3_driver, docg3_probe); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 1bf192f229d7..f756c60a4931 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -399,7 +399,7 @@ static void phram_remove(struct platform_device *pdev) static struct platform_driver phram_driver = { .probe = phram_probe, - .remove_new = phram_remove, + .remove = phram_remove, .driver = { .name = "phram", .of_match_table = of_match_ptr(phram_of_match), diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 10cd1d9b4885..a12427d9e20f 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -286,7 +286,7 @@ static struct platform_driver powernv_flash_driver = { .name = "powernv_flash", .of_match_table = powernv_flash_match, }, - .remove_new = powernv_flash_release, + .remove = powernv_flash_release, .probe = powernv_flash_probe, }; diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 1574296d47e2..f02f96bff450 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -1093,7 +1093,7 @@ static struct platform_driver spear_smi_driver = { .pm = &spear_smi_pm_ops, }, .probe = spear_smi_probe, - .remove_new = spear_smi_remove, + .remove = spear_smi_remove, }; module_platform_driver(spear_smi_driver); diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 3268de5fc780..dba584fa2a53 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -2132,7 +2132,7 @@ MODULE_DEVICE_TABLE(of, stfsm_match); static struct platform_driver stfsm_driver = { .probe = stfsm_probe, - .remove_new = stfsm_remove, + .remove = stfsm_remove, .driver = { .name = "st-spi-fsm", .of_match_table = stfsm_match, diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c index dbe3eb361cca..217f4e69233f 100644 --- a/drivers/mtd/hyperbus/hbmc-am654.c +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -254,7 +254,7 @@ MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids); static struct platform_driver am654_hbmc_platform_driver = { .probe = am654_hbmc_probe, - .remove_new = am654_hbmc_remove, + .remove = am654_hbmc_remove, .driver = { .name = "hbmc-am654", .of_match_table = am654_hbmc_dt_ids, diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index b22aa57119f2..f448e23f9260 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -163,9 +163,16 @@ static void rpcif_hb_remove(struct platform_device *pdev) pm_runtime_disable(hyperbus->rpc.dev); } +static const struct platform_device_id rpc_if_hyperflash_id_table[] = { + { .name = "rpc-if-hyperflash" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, rpc_if_hyperflash_id_table); + static struct platform_driver rpcif_platform_driver = { .probe = rpcif_hb_probe, - .remove_new = rpcif_hb_remove, + .remove = rpcif_hb_remove, + .id_table = rpc_if_hyperflash_id_table, .driver = { .name = "rpc-if-hyperflash", }, diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c index 9169e1155dbb..565b71f7fdd5 100644 --- a/drivers/mtd/lpddr/lpddr2_nvm.c +++ b/drivers/mtd/lpddr/lpddr2_nvm.c @@ -487,7 +487,7 @@ static struct platform_driver lpddr2_nvm_drv = { .name = "lpddr2_nvm", }, .probe = lpddr2_nvm_probe, - .remove_new = lpddr2_nvm_remove, + .remove = lpddr2_nvm_remove, }; module_platform_driver(lpddr2_nvm_drv); diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index 124b13c5d747..80d467eba92a 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c @@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match); static struct platform_driver ltq_mtd_driver = { .probe = ltq_mtd_probe, - .remove_new = ltq_mtd_remove, + .remove = ltq_mtd_remove, .driver = { .name = "ltq-nor", .of_match_table = ltq_mtd_match, diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c index 96eb2e782c38..2bd7a1af898c 100644 --- a/drivers/mtd/maps/physmap-core.c +++ b/drivers/mtd/maps/physmap-core.c @@ -621,7 +621,7 @@ static void physmap_flash_shutdown(struct platform_device *dev) static struct platform_driver physmap_flash_driver = { .probe = physmap_flash_probe, - .remove_new = physmap_flash_remove, + .remove = physmap_flash_remove, .shutdown = physmap_flash_shutdown, .driver = { .name = "physmap-flash", diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 8b736f029f81..1c541eaf477a 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -205,7 +205,7 @@ MODULE_ALIAS("platform:mtd-ram"); static struct platform_driver platram_driver = { .probe = platram_probe, - .remove_new = platram_remove, + .remove = platram_remove, .driver = { .name = "mtd-ram", }, diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index f2a2d4706f1f..f27c25db6778 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -128,7 +128,7 @@ static struct platform_driver pxa2xx_flash_driver = { .name = "pxa2xx-flash", }, .probe = pxa2xx_flash_probe, - .remove_new = pxa2xx_flash_remove, + .remove = pxa2xx_flash_remove, .shutdown = pxa2xx_flash_shutdown, }; diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index ac8a0a19a021..6a54a84d0d9c 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -293,7 +293,7 @@ static void sa1100_mtd_remove(struct platform_device *pdev) static struct platform_driver sa1100_mtd_driver = { .probe = sa1100_mtd_probe, - .remove_new = sa1100_mtd_remove, + .remove = sa1100_mtd_remove, .driver = { .name = "sa1100-mtd", }, diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index b69dade3f7ad..ea3aa026b55b 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -149,7 +149,7 @@ static struct platform_driver uflash_driver = { .of_match_table = uflash_match, }, .probe = uflash_probe, - .remove_new = uflash_remove, + .remove = uflash_remove, }; module_platform_driver(uflash_driver); diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 47e10945b8d2..56b56f726b99 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -723,21 +723,21 @@ static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand, return ret; } -static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = { +static const struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = { .init_ctx = mxic_ecc_init_ctx_external, .cleanup_ctx = mxic_ecc_cleanup_ctx, .prepare_io_req = mxic_ecc_prepare_io_req_external, .finish_io_req = mxic_ecc_finish_io_req_external, }; -static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = { +static const struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = { .init_ctx = mxic_ecc_init_ctx_pipelined, .cleanup_ctx = mxic_ecc_cleanup_ctx, .prepare_io_req = mxic_ecc_prepare_io_req_pipelined, .finish_io_req = mxic_ecc_finish_io_req_pipelined, }; -struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) +const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) { return &mxic_ecc_engine_pipelined_ops; } @@ -869,7 +869,7 @@ static struct platform_driver mxic_ecc_driver = { .of_match_table = mxic_ecc_of_ids, }, .probe = mxic_ecc_probe, - .remove_new = mxic_ecc_remove, + .remove = mxic_ecc_remove, }; module_platform_driver(mxic_ecc_driver); diff --git a/drivers/mtd/nand/ecc-sw-bch.c b/drivers/mtd/nand/ecc-sw-bch.c index 405552d014a8..0d9310dd6f52 100644 --- a/drivers/mtd/nand/ecc-sw-bch.c +++ b/drivers/mtd/nand/ecc-sw-bch.c @@ -384,7 +384,7 @@ static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand, return max_bitflips; } -static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = { +static const struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = { .init_ctx = nand_ecc_sw_bch_init_ctx, .cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx, .prepare_io_req = nand_ecc_sw_bch_prepare_io_req, diff --git a/drivers/mtd/nand/ecc-sw-hamming.c b/drivers/mtd/nand/ecc-sw-hamming.c index 254db2e7f8bb..f2d0effad9d2 100644 --- a/drivers/mtd/nand/ecc-sw-hamming.c +++ b/drivers/mtd/nand/ecc-sw-hamming.c @@ -638,7 +638,7 @@ static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand, return max_bitflips; } -static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = { +static const struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = { .init_ctx = nand_ecc_sw_hamming_init_ctx, .cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx, .prepare_io_req = nand_ecc_sw_hamming_prepare_io_req, diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c index 4e7de48f07a6..4e6fd1c34484 100644 --- a/drivers/mtd/nand/onenand/generic.c +++ b/drivers/mtd/nand/onenand/generic.c @@ -104,7 +104,7 @@ static struct platform_driver generic_onenand_driver = { .name = DRIVER_NAME, }, .probe = generic_onenand_probe, - .remove_new = generic_onenand_remove, + .remove = generic_onenand_remove, }; module_platform_driver(generic_onenand_driver); diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c index a12f8f3efd07..f9a386b69050 100644 --- a/drivers/mtd/nand/onenand/onenand_omap2.c +++ b/drivers/mtd/nand/onenand/onenand_omap2.c @@ -593,7 +593,7 @@ MODULE_DEVICE_TABLE(of, omap2_onenand_id_table); static struct platform_driver omap2_onenand_driver = { .probe = omap2_onenand_probe, - .remove_new = omap2_onenand_remove, + .remove = omap2_onenand_remove, .shutdown = omap2_onenand_shutdown, .driver = { .name = DRIVER_NAME, diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c index fd6890a03d55..f37a6138e461 100644 --- a/drivers/mtd/nand/onenand/onenand_samsung.c +++ b/drivers/mtd/nand/onenand/onenand_samsung.c @@ -991,7 +991,7 @@ static struct platform_driver s3c_onenand_driver = { }, .id_table = s3c_onenand_driver_ids, .probe = s3c_onenand_probe, - .remove_new = s3c_onenand_remove, + .remove = s3c_onenand_remove, }; module_platform_driver(s3c_onenand_driver); diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 919816a7aca7..fb2b7db70297 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -432,7 +432,7 @@ MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table); static struct platform_driver gpio_nand_driver = { .probe = gpio_nand_probe, - .remove_new = gpio_nand_remove, + .remove = gpio_nand_remove, .id_table = gpio_nand_plat_id_table, .driver = { .name = "ams-delta-nand", diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 5436ec4a8fde..db42aa0c7b6b 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -1500,7 +1500,7 @@ static struct platform_driver anfc_driver = { .of_match_table = anfc_ids, }, .probe = anfc_probe, - .remove_new = anfc_remove, + .remove = anfc_remove, }; module_platform_driver(anfc_driver); diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index f9ccfd02e804..dedcca87defc 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -2663,7 +2663,7 @@ static struct platform_driver atmel_nand_controller_driver = { .pm = &atmel_nand_controller_pm_ops, }, .probe = atmel_nand_controller_probe, - .remove_new = atmel_nand_controller_remove, + .remove = atmel_nand_controller_remove, }; module_platform_driver(atmel_nand_controller_driver); diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 4d7dc8a9c373..a22aab4ed4e8 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -362,7 +362,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, size = ALIGN(size, sizeof(s32)); size += (req->ecc.strength + 1) * sizeof(s32) * 3; - user = kzalloc(size, GFP_KERNEL); + user = devm_kzalloc(pmecc->dev, size, GFP_KERNEL); if (!user) return ERR_PTR(-ENOMEM); @@ -408,12 +408,6 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, } EXPORT_SYMBOL_GPL(atmel_pmecc_create_user); -void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user) -{ - kfree(user); -} -EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user); - static int get_strength(struct atmel_pmecc_user *user) { const int *strengths = user->pmecc->caps->strengths; diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h index 7851c05126cf..cc0c5af1f4f1 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.h +++ b/drivers/mtd/nand/raw/atmel/pmecc.h @@ -55,8 +55,6 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev); struct atmel_pmecc_user * atmel_pmecc_create_user(struct atmel_pmecc *pmecc, struct atmel_pmecc_user_req *req); -void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); - void atmel_pmecc_reset(struct atmel_pmecc *pmecc); int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); void atmel_pmecc_disable(struct atmel_pmecc_user *user); diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 063a5e0b8d4b..04d64724c400 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -357,7 +357,7 @@ static struct platform_driver au1550nd_driver = { .name = "au1550-nand", }, .probe = au1550nd_probe, - .remove_new = au1550nd_remove, + .remove = au1550nd_remove, }; module_platform_driver(au1550nd_driver); diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c index ebcf508e0606..4d4e185c22e5 100644 --- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c +++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c @@ -70,7 +70,7 @@ static void bcm47xxnflash_remove(struct platform_device *pdev) static struct platform_driver bcm47xxnflash_driver = { .probe = bcm47xxnflash_probe, - .remove_new = bcm47xxnflash_remove, + .remove = bcm47xxnflash_remove, .driver = { .name = "bcma_nflash", }, diff --git a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c index 05b7b653bdf3..a06cd87f839a 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c @@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match); static struct platform_driver bcm6368_nand_driver = { .probe = bcm6368_nand_probe, - .remove_new = brcmnand_remove, + .remove = brcmnand_remove, .driver = { .name = "bcm6368_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c index 4e7e435ba339..dd27977919fb 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c @@ -119,7 +119,7 @@ static int brcmnand_bcma_nand_probe(struct platform_device *pdev) static struct platform_driver brcmnand_bcma_nand_driver = { .probe = brcmnand_bcma_nand_probe, - .remove_new = brcmnand_remove, + .remove = brcmnand_remove, .driver = { .name = "bcma_brcmnand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c index ea534850b97a..c31d7f37dc52 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c @@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(of, bcmbca_nand_of_match); static struct platform_driver bcmbca_nand_driver = { .probe = bcmbca_nand_probe, - .remove_new = brcmnand_remove, + .remove = brcmnand_remove, .driver = { .name = "bcmbca_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 1b2ec0fec60c..9c253a511e45 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1561,7 +1561,7 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, (oob[j + 2] << 8) | (oob[j + 3] << 0)); - /* handle the remaing bytes */ + /* handle the remaining bytes */ while (j < tbytes) plast[k++] = oob[j++]; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c index 558f083b92e9..950923d977b7 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c @@ -23,7 +23,7 @@ static int brcmstb_nand_probe(struct platform_device *pdev) static struct platform_driver brcmstb_nand_driver = { .probe = brcmstb_nand_probe, - .remove_new = brcmnand_remove, + .remove = brcmnand_remove, .driver = { .name = "brcmstb_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c index bf46c8b85898..089c70fc6edf 100644 --- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c @@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(of, iproc_nand_of_match); static struct platform_driver iproc_nand_driver = { .probe = iproc_nand_probe, - .remove_new = brcmnand_remove, + .remove = brcmnand_remove, .driver = { .name = "iproc_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 3bc89b356963..8d1d710e439d 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -1891,7 +1891,7 @@ static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl, int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3; - /* read alingment data */ + /* read alignment data */ if (data_dma_width == 4) ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words); #ifdef CONFIG_64BIT @@ -3055,7 +3055,7 @@ static void cadence_nand_dt_remove(struct platform_device *ofdev) static struct platform_driver cadence_nand_dt_driver = { .probe = cadence_nand_dt_probe, - .remove_new = cadence_nand_dt_remove, + .remove = cadence_nand_dt_remove, .driver = { .name = "cadence-nand-controller", .of_match_table = cadence_nand_dt_ids, diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index f0a15717cf05..341318024a19 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -26,7 +26,7 @@ #define NR_CS553X_CONTROLLERS 4 -#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */ +#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilities */ #define CAP_CS5535 0x2df000ULL #define CAP_CS5536 0x5df500ULL diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 392678143a36..1f8354acfb50 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -10,15 +10,15 @@ * Dirk Behme <Dirk.Behme@gmail.com> */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> #include <linux/err.h> #include <linux/iopoll.h> -#include <linux/mtd/rawnand.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/mtd/partitions.h> +#include <linux/mtd/rawnand.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h> -#include <linux/of.h> #define NRCSR_OFFSET 0x00 #define NANDFCR_OFFSET 0x60 @@ -487,10 +487,10 @@ static const struct of_device_id davinci_nand_of_match[] = { }; MODULE_DEVICE_TABLE(of, davinci_nand_of_match); -static struct davinci_nand_pdata - *nand_davinci_get_pdata(struct platform_device *pdev) +static struct davinci_nand_pdata * +nand_davinci_get_pdata(struct platform_device *pdev) { - if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) { + if (!dev_get_platdata(&pdev->dev)) { struct davinci_nand_pdata *pdata; const char *mode; u32 prop; @@ -501,23 +501,24 @@ static struct davinci_nand_pdata pdev->dev.platform_data = pdata; if (!pdata) return ERR_PTR(-ENOMEM); - if (!of_property_read_u32(pdev->dev.of_node, - "ti,davinci-chipselect", &prop)) + if (!device_property_read_u32(&pdev->dev, + "ti,davinci-chipselect", &prop)) pdata->core_chipsel = prop; else return ERR_PTR(-EINVAL); - if (!of_property_read_u32(pdev->dev.of_node, - "ti,davinci-mask-ale", &prop)) + if (!device_property_read_u32(&pdev->dev, + "ti,davinci-mask-ale", &prop)) pdata->mask_ale = prop; - if (!of_property_read_u32(pdev->dev.of_node, - "ti,davinci-mask-cle", &prop)) + if (!device_property_read_u32(&pdev->dev, + "ti,davinci-mask-cle", &prop)) pdata->mask_cle = prop; - if (!of_property_read_u32(pdev->dev.of_node, - "ti,davinci-mask-chipsel", &prop)) + if (!device_property_read_u32(&pdev->dev, + "ti,davinci-mask-chipsel", &prop)) pdata->mask_chipsel = prop; - if (!of_property_read_string(pdev->dev.of_node, - "ti,davinci-ecc-mode", &mode)) { + if (!device_property_read_string(&pdev->dev, + "ti,davinci-ecc-mode", + &mode)) { if (!strncmp("none", mode, 4)) pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE; if (!strncmp("soft", mode, 4)) @@ -525,16 +526,17 @@ static struct davinci_nand_pdata if (!strncmp("hw", mode, 2)) pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; } - if (!of_property_read_u32(pdev->dev.of_node, - "ti,davinci-ecc-bits", &prop)) + if (!device_property_read_u32(&pdev->dev, + "ti,davinci-ecc-bits", &prop)) pdata->ecc_bits = prop; - if (!of_property_read_u32(pdev->dev.of_node, - "ti,davinci-nand-buswidth", &prop) && prop == 16) + if (!device_property_read_u32(&pdev->dev, + "ti,davinci-nand-buswidth", + &prop) && prop == 16) pdata->options |= NAND_BUSWIDTH_16; - if (of_property_read_bool(pdev->dev.of_node, - "ti,davinci-nand-use-bbt")) + if (device_property_read_bool(&pdev->dev, + "ti,davinci-nand-use-bbt")) pdata->bbt_options = NAND_BBT_USE_FLASH; /* @@ -548,17 +550,15 @@ static struct davinci_nand_pdata * then use "ti,davinci-nand" as the compatible in your * device-tree file. */ - if (of_device_is_compatible(pdev->dev.of_node, - "ti,keystone-nand")) { + if (device_is_compatible(&pdev->dev, "ti,keystone-nand")) pdata->options |= NAND_NO_SUBPAGE_WRITE; - } } return dev_get_platdata(&pdev->dev); } #else -static struct davinci_nand_pdata - *nand_davinci_get_pdata(struct platform_device *pdev) +static struct davinci_nand_pdata * +nand_davinci_get_pdata(struct platform_device *pdev) { return dev_get_platdata(&pdev->dev); } @@ -901,7 +901,7 @@ static void nand_davinci_remove(struct platform_device *pdev) static struct platform_driver nand_davinci_driver = { .probe = nand_davinci_probe, - .remove_new = nand_davinci_remove, + .remove = nand_davinci_remove, .driver = { .name = "davinci_nand", .of_match_table = of_match_ptr(davinci_nand_of_match), diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index 2f5666511fda..e0dd59bba4bd 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -225,7 +225,7 @@ static void denali_dt_remove(struct platform_device *pdev) static struct platform_driver denali_dt_driver = { .probe = denali_dt_probe, - .remove_new = denali_dt_remove, + .remove = denali_dt_remove, .driver = { .name = "denali-nand-dt", .of_match_table = denali_nand_dt_ids, diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index df6a0d5c86bb..03dbe37df021 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -999,7 +999,7 @@ static struct platform_driver fsl_elbc_nand_driver = { .of_match_table = fsl_elbc_nand_match, }, .probe = fsl_elbc_nand_probe, - .remove_new = fsl_elbc_nand_remove, + .remove = fsl_elbc_nand_remove, }; module_platform_driver(fsl_elbc_nand_driver); diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index f0e2318ce088..7be95d0be248 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -1130,7 +1130,7 @@ static struct platform_driver fsl_ifc_nand_driver = { .of_match_table = fsl_ifc_nand_match, }, .probe = fsl_ifc_nand_probe, - .remove_new = fsl_ifc_nand_remove, + .remove = fsl_ifc_nand_remove, }; module_platform_driver(fsl_ifc_nand_driver); diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 315e9d2b573d..f4dc990a8da1 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -259,7 +259,7 @@ static struct platform_driver of_fun_driver = { .of_match_table = of_fun_match, }, .probe = fun_probe, - .remove_new = fun_remove, + .remove = fun_remove, }; module_platform_driver(of_fun_driver); diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 811982da3557..d579d5dd60d6 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -1221,7 +1221,7 @@ static const struct of_device_id fsmc_nand_id_table[] = { MODULE_DEVICE_TABLE(of, fsmc_nand_id_table); static struct platform_driver fsmc_nand_driver = { - .remove_new = fsmc_nand_remove, + .remove = fsmc_nand_remove, .driver = { .name = "fsmc-nand", .of_match_table = fsmc_nand_id_table, diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c index d6cc2cb65214..69e5e43532a4 100644 --- a/drivers/mtd/nand/raw/gpio.c +++ b/drivers/mtd/nand/raw/gpio.c @@ -392,7 +392,7 @@ out_ce: static struct platform_driver gpio_nand_driver = { .probe = gpio_nand_probe, - .remove_new = gpio_nand_remove, + .remove = gpio_nand_remove, .driver = { .name = "gpio-nand", .of_match_table = of_match_ptr(gpio_nand_id_table), diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index e1b515304e3c..d76802944453 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -15,6 +15,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/pinctrl/consumer.h> #include <linux/dma/mxs-dma.h> #include "gpmi-nand.h" #include "gpmi-regs.h" @@ -737,9 +738,8 @@ static int bch_set_geometry(struct gpmi_nand_data *this) if (ret) return ret; - ret = pm_runtime_get_sync(this->dev); + ret = pm_runtime_resume_and_get(this->dev); if (ret < 0) { - pm_runtime_put_autosuspend(this->dev); return ret; } @@ -2761,15 +2761,9 @@ static int gpmi_nand_probe(struct platform_device *pdev) if (ret) goto exit_acquire_resources; - ret = __gpmi_enable_clk(this, true); - if (ret) - goto exit_acquire_resources; - + pm_runtime_enable(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); ret = gpmi_init(this); if (ret) @@ -2779,15 +2773,12 @@ static int gpmi_nand_probe(struct platform_device *pdev) if (ret) goto exit_nfc_init; - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); - dev_info(this->dev, "driver registered.\n"); return 0; exit_nfc_init: - pm_runtime_put(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); release_resources(this); exit_acquire_resources: @@ -2801,23 +2792,23 @@ static void gpmi_nand_remove(struct platform_device *pdev) struct nand_chip *chip = &this->nand; int ret; - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); - ret = mtd_device_unregister(nand_to_mtd(chip)); WARN_ON(ret); nand_cleanup(chip); gpmi_free_dma_buffer(this); release_resources(this); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); } -#ifdef CONFIG_PM_SLEEP static int gpmi_pm_suspend(struct device *dev) { - struct gpmi_nand_data *this = dev_get_drvdata(dev); + int ret; - release_dma_channels(this); - return 0; + pinctrl_pm_select_sleep_state(dev); + ret = pm_runtime_force_suspend(dev); + + return ret; } static int gpmi_pm_resume(struct device *dev) @@ -2825,9 +2816,13 @@ static int gpmi_pm_resume(struct device *dev) struct gpmi_nand_data *this = dev_get_drvdata(dev); int ret; - ret = acquire_dma_channels(this); - if (ret < 0) + ret = pm_runtime_force_resume(dev); + if (ret) { + dev_err(this->dev, "Error in resume %d\n", ret); return ret; + } + + pinctrl_pm_select_default_state(dev); /* re-init the GPMI registers */ ret = gpmi_init(this); @@ -2849,35 +2844,45 @@ static int gpmi_pm_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM_SLEEP */ -static int __maybe_unused gpmi_runtime_suspend(struct device *dev) +#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) +#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) + +static int gpmi_runtime_suspend(struct device *dev) { struct gpmi_nand_data *this = dev_get_drvdata(dev); - return __gpmi_enable_clk(this, false); + gpmi_disable_clk(this); + + return 0; } -static int __maybe_unused gpmi_runtime_resume(struct device *dev) +static int gpmi_runtime_resume(struct device *dev) { struct gpmi_nand_data *this = dev_get_drvdata(dev); + int ret; + + ret = gpmi_enable_clk(this); + if (ret) + return ret; + + return 0; - return __gpmi_enable_clk(this, true); } static const struct dev_pm_ops gpmi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) - SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) + RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL) }; static struct platform_driver gpmi_nand_driver = { .driver = { .name = "gpmi-nand", - .pm = &gpmi_pm_ops, + .pm = pm_ptr(&gpmi_pm_ops), .of_match_table = gpmi_nand_id_table, }, - .probe = gpmi_nand_probe, - .remove_new = gpmi_nand_remove, + .probe = gpmi_nand_probe, + .remove = gpmi_nand_remove, }; module_platform_driver(gpmi_nand_driver); diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c index fe291a2e5c77..d97270ec1185 100644 --- a/drivers/mtd/nand/raw/hisi504_nand.c +++ b/drivers/mtd/nand/raw/hisi504_nand.c @@ -858,7 +858,7 @@ static struct platform_driver hisi_nfc_driver = { .pm = &hisi_nfc_pm_ops, }, .probe = hisi_nfc_probe, - .remove_new = hisi_nfc_remove, + .remove = hisi_nfc_remove, }; module_platform_driver(hisi_nfc_driver); diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 0e7dd9ca4b2b..47dc3efcee92 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -549,7 +549,7 @@ MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match); static struct platform_driver ingenic_nand_driver = { .probe = ingenic_nand_probe, - .remove_new = ingenic_nand_remove, + .remove = ingenic_nand_remove, .driver = { .name = DRV_NAME, .of_match_table = ingenic_nand_dt_match, diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c index f0f0522b2fa2..01cefdaf115d 100644 --- a/drivers/mtd/nand/raw/intel-nand-controller.c +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -728,7 +728,7 @@ MODULE_DEVICE_TABLE(of, ebu_nand_match); static struct platform_driver ebu_nand_driver = { .probe = ebu_nand_probe, - .remove_new = ebu_nand_remove, + .remove = ebu_nand_remove, .driver = { .name = "intel-nand-controller", .of_match_table = ebu_nand_match, diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index b9c3adc54c01..19b13ae536d4 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -891,7 +891,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match); static struct platform_driver lpc32xx_nand_driver = { .probe = lpc32xx_nand_probe, - .remove_new = lpc32xx_nand_remove, + .remove = lpc32xx_nand_remove, .resume = pm_ptr(lpc32xx_nand_resume), .suspend = pm_ptr(lpc32xx_nand_suspend), .driver = { diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c index ade971e4cc3b..b54d76547ffb 100644 --- a/drivers/mtd/nand/raw/lpc32xx_slc.c +++ b/drivers/mtd/nand/raw/lpc32xx_slc.c @@ -1010,7 +1010,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match); static struct platform_driver lpc32xx_nand_driver = { .probe = lpc32xx_nand_probe, - .remove_new = lpc32xx_nand_remove, + .remove = lpc32xx_nand_remove, .resume = pm_ptr(lpc32xx_nand_resume), .suspend = pm_ptr(lpc32xx_nand_suspend), .driver = { diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index aa113a5d88c8..303b3016a070 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -3183,7 +3183,7 @@ static struct platform_driver marvell_nfc_driver = { }, .id_table = marvell_nfc_platform_ids, .probe = marvell_nfc_probe, - .remove_new = marvell_nfc_remove, + .remove = marvell_nfc_remove, }; module_platform_driver(marvell_nfc_driver); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index fbb06aa305cb..b8834aa96e81 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1621,7 +1621,7 @@ static void meson_nfc_remove(struct platform_device *pdev) static struct platform_driver meson_nfc_driver = { .probe = meson_nfc_probe, - .remove_new = meson_nfc_remove, + .remove = meson_nfc_remove, .driver = { .name = "meson-nand", .of_match_table = meson_nfc_id_table, diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index 215610f808f1..97b4e7f3e1bb 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -835,7 +835,7 @@ MODULE_DEVICE_TABLE(of, mpc5121_nfc_match); static struct platform_driver mpc5121_nfc_driver = { .probe = mpc5121_nfc_probe, - .remove_new = mpc5121_nfc_remove, + .remove = mpc5121_nfc_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc5121_nfc_match, diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 586868b4139f..21c7e1102746 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1640,7 +1640,7 @@ static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume); static struct platform_driver mtk_nfc_driver = { .probe = mtk_nfc_probe, - .remove_new = mtk_nfc_remove, + .remove = mtk_nfc_remove, .driver = { .name = MTK_NAME, .of_match_table = mtk_nfc_id_table, diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 736808150e74..8c56b685bf91 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -1824,7 +1824,7 @@ static struct platform_driver mxcnd_driver = { .of_match_table = mxcnd_dt_ids, }, .probe = mxcnd_probe, - .remove_new = mxcnd_remove, + .remove = mxcnd_remove, }; module_platform_driver(mxcnd_driver); diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index be8050e84b4f..92de26697359 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -574,7 +574,7 @@ MODULE_DEVICE_TABLE(of, mxic_nfc_of_ids); static struct platform_driver mxic_nfc_driver = { .probe = mxic_nfc_probe, - .remove_new = mxic_nfc_remove, + .remove = mxic_nfc_remove, .driver = { .name = "mxic-nfc", .of_match_table = mxic_nfc_of_ids, diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index e229de32ff50..03237310852c 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -113,7 +113,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip) rand_otp = of_property_read_bool(dn, "mxic,enable-randomizer-otp"); mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor; - /* Subpage write is prohibited in randomizer operatoin */ + /* Subpage write is prohibited in randomizer operation */ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE && mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) { if (p->supports_set_get_features) { diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c index 3bb32a7c6d67..13365128194d 100644 --- a/drivers/mtd/nand/raw/ndfc.c +++ b/drivers/mtd/nand/raw/ndfc.c @@ -266,7 +266,7 @@ static struct platform_driver ndfc_driver = { .of_match_table = ndfc_match, }, .probe = ndfc_probe, - .remove_new = ndfc_remove, + .remove = ndfc_remove, }; module_platform_driver(ndfc_driver); diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index cf76afc6c0ed..d9141f3c0dd1 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -2291,7 +2291,7 @@ MODULE_DEVICE_TABLE(of, omap_nand_ids); static struct platform_driver omap_nand_driver = { .probe = omap_nand_probe, - .remove_new = omap_nand_remove, + .remove = omap_nand_remove, .driver = { .name = DRIVER_NAME, .of_match_table = omap_nand_ids, diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 4a97d4a76454..c1d19b8e6715 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -560,7 +560,7 @@ static struct platform_driver elm_driver = { .pm = &elm_pm_ops, }, .probe = elm_probe, - .remove_new = elm_remove, + .remove = elm_remove, }; module_platform_driver(elm_driver); diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index 2951d81614fd..47e80d5e58c5 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(of, orion_nand_of_match_table); #endif static struct platform_driver orion_nand_driver = { - .remove_new = orion_nand_remove, + .remove = orion_nand_remove, .driver = { .name = "orion_nand", .of_match_table = of_match_ptr(orion_nand_of_match_table), diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 19b2c9d25863..0b1f7670660e 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -237,7 +237,7 @@ static struct platform_driver pasemi_nand_driver = .of_match_table = pasemi_nand_match, }, .probe = pasemi_nand_probe, - .remove_new = pasemi_nand_remove, + .remove = pasemi_nand_remove, }; module_platform_driver(pasemi_nand_driver); diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c index 2570fd0beea0..09440ed4652e 100644 --- a/drivers/mtd/nand/raw/pl35x-nand-controller.c +++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c @@ -187,7 +187,7 @@ static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = { .free = pl35x_ecc_ooblayout16_free, }; -/* Generic flash bbt decriptors */ +/* Generic flash bbt descriptors */ static u8 bbt_pattern[] = { 'B', 'b', 't', '0' }; static u8 mirror_pattern[] = { '1', 't', 'b', 'B' }; @@ -1184,7 +1184,7 @@ MODULE_DEVICE_TABLE(of, pl35x_nand_of_match); static struct platform_driver pl35x_nandc_driver = { .probe = pl35x_nand_probe, - .remove_new = pl35x_nand_remove, + .remove = pl35x_nand_remove, .driver = { .name = PL35X_NANDC_DRIVER_NAME, .of_match_table = pl35x_nand_of_match, diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index b5c374b51ecd..0bcd455328ef 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(of, plat_nand_match); static struct platform_driver plat_nand_driver = { .probe = plat_nand_probe, - .remove_new = plat_nand_remove, + .remove = plat_nand_remove, .driver = { .name = "gen_nand", .of_match_table = plat_nand_match, diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index b8cff9240b28..636bba2528bf 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -3535,8 +3535,8 @@ static struct platform_driver qcom_nandc_driver = { .name = "qcom-nandc", .of_match_table = qcom_nandc_of_match, }, - .probe = qcom_nandc_probe, - .remove_new = qcom_nandc_remove, + .probe = qcom_nandc_probe, + .remove = qcom_nandc_remove, }; module_platform_driver(qcom_nandc_driver); diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index ed0cf732d20e..b07c2f8b4035 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -335,7 +335,7 @@ static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl) else dev->ctlreg &= ~R852_CTL_WRITE; - /* when write is stareted, enable write access */ + /* when write is started, enable write access */ if (dat == NAND_CMD_ERASE1) dev->ctlreg |= R852_CTL_WRITE; @@ -372,7 +372,7 @@ static int r852_wait(struct nand_chip *chip) nand_status_op(chip, &status); - /* Unfortunelly, no way to send detailed error status... */ + /* Unfortunately, no way to send detailed error status... */ if (dev->dma_error) { status |= NAND_STATUS_FAIL; dev->dma_error = 0; diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c index 0e92d50c5249..44f6603736d1 100644 --- a/drivers/mtd/nand/raw/renesas-nand-controller.c +++ b/drivers/mtd/nand/raw/renesas-nand-controller.c @@ -1402,7 +1402,7 @@ static struct platform_driver rnandc_driver = { .of_match_table = rnandc_id_table, }, .probe = rnandc_probe, - .remove_new = rnandc_remove, + .remove = rnandc_remove, }; module_platform_driver(rnandc_driver); diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 51c9cf9013dc..63e7b9e39a5a 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -1477,7 +1477,7 @@ static const struct dev_pm_ops rk_nfc_pm_ops = { static struct platform_driver rk_nfc_driver = { .probe = rk_nfc_probe, - .remove_new = rk_nfc_remove, + .remove = rk_nfc_remove, .driver = { .name = "rockchip-nfc", .of_match_table = rk_nfc_id_table, diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index 48c1d0eb66ca..229f2e87d56e 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -1213,7 +1213,7 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); static struct platform_driver s3c24xx_nand_driver = { .probe = s3c24xx_nand_probe, - .remove_new = s3c24xx_nand_remove, + .remove = s3c24xx_nand_remove, .suspend = s3c24xx_nand_suspend, .resume = s3c24xx_nand_resume, .id_table = s3c24xx_driver_ids, diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index 2a8164efb273..97f733e481ff 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -1216,7 +1216,7 @@ static void flctl_remove(struct platform_device *pdev) static struct platform_driver flctl_driver = { .probe = flctl_probe, - .remove_new = flctl_remove, + .remove = flctl_remove, .driver = { .name = "sh_flctl", .of_match_table = of_flctl_match, diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index 2402dc5465d5..142e93b200a3 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -234,7 +234,7 @@ static struct platform_driver sharpsl_nand_driver = { .name = "sharpsl-nand", }, .probe = sharpsl_nand_probe, - .remove_new = sharpsl_nand_remove, + .remove = sharpsl_nand_remove, }; module_platform_driver(sharpsl_nand_driver); diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c index 24f52a30fb13..e238784c8c3e 100644 --- a/drivers/mtd/nand/raw/sm_common.c +++ b/drivers/mtd/nand/raw/sm_common.c @@ -52,8 +52,8 @@ static const struct mtd_ooblayout_ops oob_sm_ops = { .free = oob_sm_ooblayout_free, }; -/* NOTE: This layout is not compatabable with SmartMedia, */ -/* because the 256 byte devices have page depenent oob layout */ +/* NOTE: This layout is not compatible with SmartMedia, */ +/* because the 256 byte devices have page dependent oob layout */ /* However it does preserve the bad block markers */ /* If you use smftl, it will bypass this and work correctly */ /* If you not, then you break SmartMedia compliance anyway */ diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 76d50eb9f1db..668584683ce5 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -231,7 +231,7 @@ static struct platform_driver socrates_nand_driver = { .of_match_table = socrates_nand_match, }, .probe = socrates_nand_probe, - .remove_new = socrates_nand_remove, + .remove = socrates_nand_remove, }; module_platform_driver(socrates_nand_driver); diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 0f67e96cc240..a960403081f1 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -2147,7 +2147,7 @@ MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match); static struct platform_driver stm32_fmc2_nfc_driver = { .probe = stm32_fmc2_nfc_probe, - .remove_new = stm32_fmc2_nfc_remove, + .remove = stm32_fmc2_nfc_remove, .driver = { .name = "stm32_fmc2_nfc", .of_match_table = stm32_fmc2_nfc_match, diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index c28634e20abf..fab371e3e9b7 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2196,7 +2196,7 @@ static struct platform_driver sunxi_nfc_driver = { .of_match_table = sunxi_nfc_ids, }, .probe = sunxi_nfc_probe, - .remove_new = sunxi_nfc_remove, + .remove = sunxi_nfc_remove, }; module_platform_driver(sunxi_nfc_driver); diff --git a/drivers/mtd/nand/raw/technologic-nand-controller.c b/drivers/mtd/nand/raw/technologic-nand-controller.c index 0e45a6fd91dd..a3294aaf43bd 100644 --- a/drivers/mtd/nand/raw/technologic-nand-controller.c +++ b/drivers/mtd/nand/raw/technologic-nand-controller.c @@ -213,7 +213,7 @@ static struct platform_driver ts72xx_nand_driver = { .of_match_table = ts72xx_id_table, }, .probe = ts72xx_nand_probe, - .remove_new = ts72xx_nand_remove, + .remove = ts72xx_nand_remove, }; module_platform_driver(ts72xx_nand_driver); diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index a553e3ac8ff4..7f9eb5f042a7 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -1279,7 +1279,7 @@ static struct platform_driver tegra_nand_driver = { .pm = &tegra_nand_pm, }, .probe = tegra_nand_probe, - .remove_new = tegra_nand_remove, + .remove = tegra_nand_remove, }; module_platform_driver(tegra_nand_driver); diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index 37f79c019a72..907fb5de4269 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -405,7 +405,7 @@ static int txx9ndfmc_resume(struct platform_device *dev) static struct platform_driver txx9ndfmc_driver = { .probe = txx9ndfmc_probe, - .remove_new = txx9ndfmc_remove, + .remove = txx9ndfmc_remove, .resume = txx9ndfmc_resume, .driver = { .name = "txx9ndfmc", diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index f31d23219f91..4b5ba3187853 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -941,7 +941,7 @@ static struct platform_driver vf610_nfc_driver = { .pm = &vf610_nfc_pm_ops, }, .probe = vf610_nfc_probe, - .remove_new = vf610_nfc_remove, + .remove = vf610_nfc_remove, }; module_platform_driver(vf610_nfc_driver); diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 008549011fb9..af84395dc66e 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -256,7 +256,7 @@ static const struct of_device_id xway_nand_match[] = { static struct platform_driver xway_nand_driver = { .probe = xway_nand_probe, - .remove_new = xway_nand_remove, + .remove = xway_nand_remove, .driver = { .name = "lantiq,nand-xway", .of_match_table = xway_nand_match, diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 4d76f9f71a0e..b1df7f627161 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -337,7 +337,7 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, return ret; } -static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { +static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { .init_ctx = spinand_ondie_ecc_init_ctx, .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, .prepare_io_req = spinand_ondie_ecc_prepare_io_req, diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index f3bb81d7e460..7180e615ac97 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -161,17 +161,18 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand, } static const struct spinand_info winbond_spinand_table[] = { - SPINAND_INFO("W25M02GV", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), - NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2), + /* 512M-bit densities */ + SPINAND_INFO("W25N512GW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20), + NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), - SPINAND_SELECT_TARGET(w25m02gv_select_target)), - SPINAND_INFO("W25N01GV", + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + /* 1G-bit densities */ + SPINAND_INFO("W25N01GV", /* 3.3V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), @@ -180,52 +181,63 @@ static const struct spinand_info winbond_spinand_table[] = { &update_cache_variants), 0, SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), - SPINAND_INFO("W25N01KV", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21), - NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1), - NAND_ECCREQ(4, 512), + SPINAND_INFO("W25N01GW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N02KV", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22), - NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), - NAND_ECCREQ(8, 512), + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_INFO("W25N01JW", /* high-speed 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N01JW", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21), - NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_INFO("W25N01KV", /* 3.3V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21), + NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(4, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N02JWZEIF", + SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)), + /* 2G-bit densities */ + SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), + SPINAND_SELECT_TARGET(w25m02gv_select_target)), + SPINAND_INFO("W25N02JW", /* high-speed 1.8V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1), - NAND_ECCREQ(4, 512), + NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N512GW", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20), - NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1), - NAND_ECCREQ(4, 512), + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_INFO("W25N02KV", /* 3.3V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22), + NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N02KWZEIR", + SPINAND_INFO("W25N02KW", /* 1.8V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x22), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), @@ -234,18 +246,19 @@ static const struct spinand_info winbond_spinand_table[] = { &update_cache_variants), 0, SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N01GWZEIG", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21), - NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), - NAND_ECCREQ(4, 512), + /* 4G-bit densities */ + SPINAND_INFO("W25N04KV", /* 3.3V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23), + NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1), + NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)), - SPINAND_INFO("W25N04KV", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23), - NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1), + SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)), + SPINAND_INFO("W25N04KW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x23), + NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c index 89a7f0bbc4b3..db948da2c4c5 100644 --- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -488,7 +488,7 @@ static struct platform_driver hisi_spi_nor_driver = { .of_match_table = hisi_spi_nor_dt_ids, }, .probe = hisi_spi_nor_probe, - .remove_new = hisi_spi_nor_remove, + .remove = hisi_spi_nor_remove, }; module_platform_driver(hisi_spi_nor_driver); diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c index 5aee62f51031..1a92d71755db 100644 --- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -446,7 +446,7 @@ MODULE_DEVICE_TABLE(of, nxp_spifi_match); static struct platform_driver nxp_spifi_driver = { .probe = nxp_spifi_probe, - .remove_new = nxp_spifi_remove, + .remove = nxp_spifi_remove, .driver = { .name = "nxp-spifi", .of_match_table = nxp_spifi_match, diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 9d6e85bf227b..66949d9f0cc5 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor, op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); if (op->dummy.nbytes) - op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); + op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto); if (op->data.nbytes) op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); @@ -113,6 +113,9 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor, op->cmd.opcode = (op->cmd.opcode << 8) | ext; op->cmd.nbytes = 2; } + + if (proto == SNOR_PROTO_8_8_8_DTR && nor->flags & SNOR_F_SWAP16) + op->data.swap16 = true; } /** diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 1516b6d0dc37..5c33740ed7f5 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -140,6 +140,7 @@ enum spi_nor_option_flags { SNOR_F_RWW = BIT(14), SNOR_F_ECC = BIT(15), SNOR_F_NO_WP = BIT(16), + SNOR_F_SWAP16 = BIT(17), }; struct spi_nor_read_command { diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index ea6be95e75a5..830da21eea08 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -8,6 +8,23 @@ #include "core.h" +#define MXIC_NOR_OP_RD_CR2 0x71 /* Read configuration register 2 opcode */ +#define MXIC_NOR_OP_WR_CR2 0x72 /* Write configuration register 2 opcode */ +#define MXIC_NOR_ADDR_CR2_MODE 0x00000000 /* CR2 address for setting spi/sopi/dopi mode */ +#define MXIC_NOR_ADDR_CR2_DC 0x00000300 /* CR2 address for setting dummy cycles */ +#define MXIC_NOR_REG_DOPI_EN 0x2 /* Enable Octal DTR */ +#define MXIC_NOR_REG_SPI_EN 0x0 /* Enable SPI */ + +/* Convert dummy cycles to bit pattern */ +#define MXIC_NOR_REG_DC(p) \ + ((20 - (p)) >> 1) + +#define MXIC_NOR_WR_CR2(addr, ndata, buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(MXIC_NOR_OP_WR_CR2, 0), \ + SPI_MEM_OP_ADDR(4, addr, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(ndata, buf, 0)) + static int mx25l25635_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, @@ -182,9 +199,88 @@ static const struct flash_info macronix_nor_parts[] = { .name = "mx25l3255e", .size = SZ_4M, .no_sfdp_flags = SECT_4K, - } + }, + /* + * This spares us of adding new flash entries for flashes that can be + * initialized solely based on the SFDP data, but still need the + * manufacturer hooks to set parameters that can't be discovered at SFDP + * parsing time. + */ + { .id = SNOR_ID(0xc2) } }; +static int macronix_nor_octal_dtr_en(struct spi_nor *nor) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf, i; + int ret; + + /* Use dummy cycles which is parse by SFDP and convert to bit pattern. */ + buf[0] = MXIC_NOR_REG_DC(nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].num_wait_states); + op = (struct spi_mem_op)MXIC_NOR_WR_CR2(MXIC_NOR_ADDR_CR2_DC, 1, buf); + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + /* Set the octal and DTR enable bits. */ + buf[0] = MXIC_NOR_REG_DOPI_EN; + op = (struct spi_mem_op)MXIC_NOR_WR_CR2(MXIC_NOR_ADDR_CR2_MODE, 1, buf); + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + /* Read flash ID to make sure the switch was successful. */ + ret = spi_nor_read_id(nor, 4, 4, buf, SNOR_PROTO_8_8_8_DTR); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret); + return ret; + } + + /* Macronix SPI-NOR flash 8D-8D-8D read ID would get 6 bytes data A-A-B-B-C-C */ + for (i = 0; i < nor->info->id->len; i++) + if (buf[i * 2] != buf[(i * 2) + 1] || buf[i * 2] != nor->info->id->bytes[i]) + return -EINVAL; + + return 0; +} + +static int macronix_nor_octal_dtr_dis(struct spi_nor *nor) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + int ret; + + /* + * The register is 1-byte wide, but 1-byte transactions are not + * allowed in 8D-8D-8D mode. Since there is no register at the + * next location, just initialize the value to 0 and let the + * transaction go on. + */ + buf[0] = MXIC_NOR_REG_SPI_EN; + buf[1] = 0x0; + op = (struct spi_mem_op)MXIC_NOR_WR_CR2(MXIC_NOR_ADDR_CR2_MODE, 2, buf); + ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR); + if (ret) + return ret; + + /* Read flash ID to make sure the switch was successful. */ + ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret); + return ret; + } + + if (memcmp(buf, nor->info->id->bytes, nor->info->id->len)) + return -EINVAL; + + return 0; +} + +static int macronix_nor_set_octal_dtr(struct spi_nor *nor, bool enable) +{ + return enable ? macronix_nor_octal_dtr_en(nor) : macronix_nor_octal_dtr_dis(nor); +} + static void macronix_nor_default_init(struct spi_nor *nor) { nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; @@ -194,6 +290,7 @@ static int macronix_nor_late_init(struct spi_nor *nor) { if (!nor->params->set_4byte_addr_mode) nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b; + nor->params->set_octal_dtr = macronix_nor_set_octal_dtr; return 0; } diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 5b1117265bd2..21727f9a4ac6 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -671,6 +671,10 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, return -EOPNOTSUPP; } + /* Byte order in 8D-8D-8D mode */ + if (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_BYTE_ORDER_SWAPPED) + nor->flags |= SNOR_F_SWAP16; + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); } diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h index da0fe5aa9bb0..f74a0eb339ea 100644 --- a/drivers/mtd/spi-nor/sfdp.h +++ b/drivers/mtd/spi-nor/sfdp.h @@ -130,6 +130,7 @@ struct sfdp_bfpt { #define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */ #define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */ #define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */ +#define BFPT_DWORD18_BYTE_ORDER_SWAPPED BIT(31) /* Byte order swapped in 8D-8D-8D mode */ struct sfdp_parameter_header { u8 id_lsb; diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index d6c92595f6bc..5a88a6096ca8 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -106,6 +106,7 @@ static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr) int ret; if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.addr.nbytes = nor->addr_nbytes; op.dummy.nbytes = params->rdsr_dummy; op.data.nbytes = 2; } diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 9f7ce5763e71..8d0a00d69e12 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -129,6 +129,7 @@ static const struct flash_info winbond_nor_parts[] = { .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { .id = SNOR_ID(0xef, 0x40, 0x18), + /* Flavors w/ and w/o SFDP. */ .name = "w25q128", .size = SZ_16M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5f7bdafcf05d..4ec4934a4edd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4661,7 +4661,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) struct net_device *dev = bp->dev; if (page_mode) { - bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); bp->flags |= BNXT_FLAG_RX_PAGE_MODE; if (bp->xdp_prog->aux->xdp_has_frags) @@ -8256,6 +8256,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; + if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV) + bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; + switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: @@ -9296,7 +9299,6 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) struct hwrm_port_mac_ptp_qcfg_output *resp; struct hwrm_port_mac_ptp_qcfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - bool phc_cfg; u8 flags; int rc; @@ -9343,8 +9345,9 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) rc = -ENODEV; goto exit; } - phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; - rc = bnxt_ptp_init(bp, phc_cfg); + ptp->rtc_configured = + (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; + rc = bnxt_ptp_init(bp); if (rc) netdev_warn(bp->dev, "PTP initialization failed.\n"); exit: @@ -9422,6 +9425,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_UDP_GSO_CAP; if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; + if (BNXT_PF(bp) && + (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -14740,6 +14746,14 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) bnxt_close_nic(bp, true, false); WRITE_ONCE(dev->mtu, new_mtu); + + /* MTU change may change the AGG ring settings if an XDP multi-buffer + * program is attached. We need to set the AGG rings settings and + * rx_skb_func accordingly. + */ + if (READ_ONCE(bp->xdp_prog)) + bnxt_set_rx_skb_mode(bp, true); + bnxt_set_ring_params(bp); if (netif_running(dev)) @@ -15477,6 +15491,13 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { vnic = &bp->vnic_info[i]; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID); @@ -16230,6 +16251,7 @@ static void bnxt_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); + bnxt_ptp_clear(bp); bnxt_clear_int_mode(bp); pci_disable_device(pdev); @@ -16257,6 +16279,7 @@ static int bnxt_suspend(struct device *device) rc = bnxt_close(dev); } bnxt_hwrm_func_drv_unrgtr(bp); + bnxt_ptp_clear(bp); pci_disable_device(bp->pdev); bnxt_free_ctx_mem(bp, false); rtnl_unlock(); @@ -16300,6 +16323,10 @@ static int bnxt_resume(struct device *device) if (bp->fw_crash_mem) bnxt_hwrm_crash_dump_mem_cfg(bp); + if (bnxt_ptp_init(bp)) { + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } bnxt_get_wol_settings(bp); if (netif_running(dev)) { rc = bnxt_open(dev); @@ -16478,8 +16505,12 @@ static void bnxt_io_resume(struct pci_dev *pdev) rtnl_lock(); err = bnxt_hwrm_func_qcaps(bp); - if (!err && netif_running(netdev)) - err = bnxt_open(netdev); + if (!err) { + if (netif_running(netdev)) + err = bnxt_open(netdev); + else + err = bnxt_reserve_rings(bp, true); + } if (!err) netif_device_attach(netdev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 231e38933984..23f1aff214b4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2446,6 +2446,8 @@ struct bnxt { #define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2) #define BNXT_FW_CAP_NEW_RM BIT_ULL(3) #define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4) + #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5) + #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6) #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7) #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10) #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11) @@ -2492,6 +2494,10 @@ struct bnxt { #define BNXT_SUPPORTS_QUEUE_API(bp) \ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH)) +#define BNXT_RDMA_SRIOV_EN(bp) \ + ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV) +#define BNXT_ROCE_VF_RESC_CAP(bp) \ + ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED) u32 hwrm_spec_code; u16 hwrm_cmd_seq; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2f4987ec7464..d87681d71106 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1187,10 +1187,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) } } - if (fltr->base.flags & BNXT_ACT_DROP) + if (fltr->base.flags & BNXT_ACT_DROP) { fs->ring_cookie = RX_CLS_FLOW_DISC; - else + } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { + fs->flow_type |= FLOW_RSS; + cmd->rss_context = fltr->base.fw_vnic_id; + } else { fs->ring_cookie = fltr->base.rxq; + } rc = 0; fltr_err: @@ -2837,19 +2841,24 @@ static int bnxt_get_link_ksettings(struct net_device *dev, } base->port = PORT_NONE; - if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + if (media == BNXT_MEDIA_TP) { base->port = PORT_TP; linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, lk_ksettings->link_modes.supported); linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, lk_ksettings->link_modes.advertising); + } else if (media == BNXT_MEDIA_KR) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, + lk_ksettings->link_modes.advertising); } else { linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, lk_ksettings->link_modes.supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, lk_ksettings->link_modes.advertising); - if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) + if (media == BNXT_MEDIA_CR) base->port = PORT_DA; else base->port = PORT_FIBRE; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 075ccd589845..2d4e19b96ee7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -1038,7 +1038,7 @@ static void bnxt_ptp_free(struct bnxt *bp) } } -int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) +int bnxt_ptp_init(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; int rc; @@ -1061,7 +1061,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); - rc = bnxt_ptp_init_rtc(bp, phc_cfg); + rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured); if (rc) goto out; } else { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index c7851f8c971c..a95f05e9c579 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -135,6 +135,7 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; + u8 rtc_configured:1; int rx_filter; u32 tstamp_filters; @@ -168,7 +169,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, struct tx_ts_cmp *tscmp); void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); -int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); +int bnxt_ptp_init(struct bnxt *bp); void bnxt_ptp_clear(struct bnxt *bp); static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7bb8a5d74430..12b6ed51fd88 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -520,6 +520,56 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) return hwrm_req_send(bp, req); } +static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_cfg_input *cfg_req; + struct hwrm_func_qcaps_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto err; + + rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG); + if (rc) + goto err; + + cfg_req->fid = cpu_to_le16(0xffff); + cfg_req->enables2 = + cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF); + cfg_req->roce_max_av_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs); + cfg_req->roce_max_cq_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs); + cfg_req->roce_max_mrw_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs); + cfg_req->roce_max_qp_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs); + cfg_req->roce_max_srq_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs); + cfg_req->roce_max_gid_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs); + + rc = hwrm_req_send(bp, cfg_req); + +err: + hwrm_req_drop(bp, req); + if (rc) + netdev_err(bp->dev, "RoCE sriov configuration failed\n"); +} + /* Only called by PF to reserve resources for VFs, returns actual number of * VFs configured, or < 0 on error. */ @@ -759,6 +809,9 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) *num_vfs = rc; } + if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp)) + bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs); + return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index fdd6356f21ef..b771c84cdd89 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -414,6 +414,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; if (bp->flags & BNXT_FLAG_VF) edev->flags |= BNXT_EN_FLAG_VF; + if (BNXT_ROCE_VF_RESC_CAP(bp)) + edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT; edev->chip_num = bp->chip_num; edev->hw_ring_stats_size = bp->hw_ring_stats_size; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 4f4914f5c84c..5d6aac60f236 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -64,6 +64,7 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ULP_STOPPED 0x8 #define BNXT_EN_FLAG_VF 0x10 #define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF) + #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20 struct bnxt_ulp *ulp_tbl; int l2_db_size; /* Doorbell BAR size in diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 9fd717b9cf69..984f0dd7b62e 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -239,12 +239,11 @@ static int cavium_ptp_probe(struct pci_dev *pdev, if (err) goto error_free; - err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev)); + clock->reg_base = pcim_iomap_region(pdev, PCI_PTP_BAR_NO, pci_name(pdev)); + err = PTR_ERR_OR_ZERO(clock->reg_base); if (err) goto error_free; - clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; - spin_lock_init(&clock->spin_lock); cc = &clock->cycle_counter; @@ -292,7 +291,7 @@ error_stop: clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); - pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO); + pcim_iounmap_region(pdev, PCI_PTP_BAR_NO); error_free: devm_kfree(dev, clock); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 35634c516e26..535969fa0fdb 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -29,6 +29,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr); static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, u8 preemptible_tcs) { + if (!(priv->si->hw_features & ENETC_SI_F_QBU)) + return; + priv->preemptible_tcs = preemptible_tcs; enetc_mm_commit_preemptible_tcs(priv); } @@ -1756,15 +1759,6 @@ void enetc_get_si_caps(struct enetc_si *si) rss = enetc_rd(hw, ENETC_SIRSSCAPR); si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); } - - if (val & ENETC_SIPCAPR0_QBV) - si->hw_features |= ENETC_SI_F_QBV; - - if (val & ENETC_SIPCAPR0_QBU) - si->hw_features |= ENETC_SI_F_QBU; - - if (val & ENETC_SIPCAPR0_PSFP) - si->hw_features |= ENETC_SI_F_PSFP; } EXPORT_SYMBOL_GPL(enetc_get_si_caps); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 7c3285584f8a..55ba949230ff 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -23,10 +23,7 @@ #define ENETC_SICTR0 0x18 #define ENETC_SICTR1 0x1c #define ENETC_SIPCAPR0 0x20 -#define ENETC_SIPCAPR0_PSFP BIT(9) #define ENETC_SIPCAPR0_RSS BIT(8) -#define ENETC_SIPCAPR0_QBV BIT(4) -#define ENETC_SIPCAPR0_QBU BIT(3) #define ENETC_SIPCAPR0_RFS BIT(2) #define ENETC_SIPCAPR1 0x24 #define ENETC_SITGTGR 0x30 @@ -194,6 +191,9 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PCAPR0 0x0900 #define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) #define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR0_PSFP BIT(9) +#define ENETC_PCAPR0_QBV BIT(4) +#define ENETC_PCAPR0_QBU BIT(3) #define ENETC_PCAPR1 0x0904 #define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ #define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index c47b4a743d93..203862ec1114 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -409,6 +409,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si) enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); } +static void enetc_port_get_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + + if (val & ENETC_PCAPR0_QBV) + si->hw_features |= ENETC_SI_F_QBV; + + if (val & ENETC_PCAPR0_QBU) + si->hw_features |= ENETC_SI_F_QBU; + + if (val & ENETC_PCAPR0_PSFP) + si->hw_features |= ENETC_SI_F_PSFP; +} + static void enetc_port_si_configure(struct enetc_si *si) { struct enetc_pf *pf = enetc_si_priv(si); @@ -416,6 +433,8 @@ static void enetc_port_si_configure(struct enetc_si *si) int num_rings, i; u32 val; + enetc_port_get_caps(si); + val = enetc_port_rd(hw, ENETC_PCAPR0); num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 27935c54b91b..8216f843a7cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -112,6 +112,11 @@ struct mac_ops *get_mac_ops(void *cgxd) return ((struct cgx *)cgxd)->mac_ops; } +u32 cgx_get_fifo_len(void *cgxd) +{ + return ((struct cgx *)cgxd)->fifo_len; +} + void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) { writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + @@ -209,6 +214,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; } +static u8 cgx_get_nix_resetbit(struct cgx *cgx) +{ + int first_lmac; + u8 p2x; + + /* non 98XX silicons supports only NIX0 block */ + if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX) + return CGX_NIX0_RESET; + + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); + p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac); + + if (p2x == CMR_P2X_SEL_NIX1) + return CGX_NIX1_RESET; + else + return CGX_NIX0_RESET; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -501,7 +524,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id) u8 num_lmacs; u32 fifo_len; - fifo_len = cgx->mac_ops->fifo_len; + fifo_len = cgx->fifo_len; num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); switch (num_lmacs) { @@ -1719,6 +1742,8 @@ static int cgx_lmac_init(struct cgx *cgx) lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); } + /* Start X2P reset on given MAC block */ + cgx->mac_ops->mac_x2p_reset(cgx, true); return cgx_lmac_verify_fwi_version(cgx); err_bitmap_free: @@ -1764,7 +1789,7 @@ static void cgx_populate_features(struct cgx *cgx) u64 cfg; cfg = cgx_read(cgx, 0, CGX_CONST); - cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); if (is_dev_rpm(cgx)) @@ -1784,6 +1809,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) return 0x60; } +static void cgx_x2p_reset(void *cgxd, bool enable) +{ + struct cgx *cgx = cgxd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac) + cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP; + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } else { + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP); + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } +} + +static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + if (enable) + cfg |= DATA_PKT_RX_EN; + else + cfg &= ~DATA_PKT_RX_EN; + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return 0; +} + static struct mac_ops cgx_mac_ops = { .name = "cgx", .csr_offset = 0, @@ -1815,6 +1879,8 @@ static struct mac_ops cgx_mac_ops = { .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, .mac_reset = cgx_lmac_reset, .mac_stats_reset = cgx_stats_reset, + .mac_x2p_reset = cgx_x2p_reset, + .mac_enadis_rx = cgx_enadis_rx, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index dc9ace30554a..1cf12e5c7da8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -32,6 +32,10 @@ #define CGX_LMAC_TYPE_MASK 0xF #define CGXX_CMRX_INT 0x040 #define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMR_GLOBAL_CONFIG 0x08 +#define CGX_NIX0_RESET BIT_ULL(2) +#define CGX_NIX1_RESET BIT_ULL(3) +#define CGX_NSCI_DROP BIT_ULL(9) #define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_STAT0 0x070 @@ -185,4 +189,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause, int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, int pfvf_idx); int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr); +u32 cgx_get_fifo_len(void *cgxd); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 5d84386ed22d..406c59100a35 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -159,6 +159,7 @@ enum nix_scheduler { #define SDP_HW_MIN_FRS 16 #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ +#define SDP_LINK_CREDIT 0x320202 /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 9ffc6790c513..6180e68e1765 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -72,7 +72,6 @@ struct mac_ops { u8 irq_offset; u8 int_ena_bit; u8 lmac_fwi; - u32 fifo_len; bool non_contiguous_serdes_lane; /* RPM & CGX differs in number of Receive/transmit stats */ u8 rx_stats_cnt; @@ -133,6 +132,8 @@ struct mac_ops { int (*get_fec_stats)(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int (*mac_stats_reset)(void *cgxd, int lmac_id); + void (*mac_x2p_reset)(void *cgxd, bool enable); + int (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable); }; struct cgx { @@ -142,6 +143,10 @@ struct cgx { u8 lmac_count; /* number of LMACs per MAC could be 4 or 8 */ u8 max_lmac_per_mac; + /* length of fifo varies depending on the number + * of LMACS + */ + u32 fifo_len; #define MAX_LMAC_COUNT 8 struct lmac *lmac_idmap[MAX_LMAC_COUNT]; struct work_struct cgx_cmd_work; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 1b34cf9c9703..2e9945446199 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -39,6 +39,8 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, .mac_stats_reset = rpm_stats_reset, + .mac_x2p_reset = rpm_x2p_reset, + .mac_enadis_rx = rpm_enadis_rx, }; static struct mac_ops rpm2_mac_ops = { @@ -72,6 +74,8 @@ static struct mac_ops rpm2_mac_ops = { .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, .mac_stats_reset = rpm_stats_reset, + .mac_x2p_reset = rpm_x2p_reset, + .mac_enadis_rx = rpm_enadis_rx, }; bool is_dev_rpm2(void *rpmd) @@ -467,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id) int err; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); - err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); + err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id); if (!err) return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); return err; @@ -480,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id) u8 num_lmacs; u32 fifo_len; - fifo_len = rpm->mac_ops->fifo_len; + fifo_len = rpm->fifo_len; num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm); switch (num_lmacs) { @@ -533,9 +537,9 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id) */ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF; if (max_lmac > 4) - fifo_len = rpm->mac_ops->fifo_len / 2; + fifo_len = rpm->fifo_len / 2; else - fifo_len = rpm->mac_ops->fifo_len; + fifo_len = rpm->fifo_len; if (lmac_id < 4) { num_lmacs = hweight8(lmac_info & 0xF); @@ -699,46 +703,51 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp) if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) return 0; + /* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common + * for all counters. Acquire lock to ensure serialized reads + */ + mutex_lock(&rpm->lock); if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { - val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO); - val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_corr_blks = (val_hi << 16 | val_lo); - val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO); - val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_uncorr_blks = (val_hi << 16 | val_lo); /* 50G uses 2 Physical serdes lines */ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id == LMAC_MODE_50G_R) { - val_lo = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_VL1_CCW_LO); - val_hi = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_corr_blks += (val_hi << 16 | val_lo); - val_lo = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_VL1_NCCW_LO); - val_hi = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_uncorr_blks += (val_hi << 16 | val_lo); } } else { /* enable RS-FEC capture */ - cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); + cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL); cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id); - rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); + rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2); - val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC); rsp->fec_corr_blks = (val_hi << 32 | val_lo); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3); - val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC); rsp->fec_uncorr_blks = (val_hi << 32 | val_lo); } + mutex_unlock(&rpm->lock); return 0; } @@ -763,3 +772,41 @@ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr) return 0; } + +void rpm_x2p_reset(void *rpmd, bool enable) +{ + rpm_t *rpm = rpmd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac) + rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG); + rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET); + } else { + cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG); + cfg &= ~RPM_NIX0_RESET; + rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg); + } +} + +int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + if (enable) + cfg |= RPM_RX_EN; + else + cfg &= ~RPM_RX_EN; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index 34b11deb0f3c..b8d3972e096a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -17,6 +17,8 @@ /* Registers */ #define RPMX_CMRX_CFG 0x00 +#define RPMX_CMR_GLOBAL_CFG 0x08 +#define RPM_NIX0_RESET BIT_ULL(3) #define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17) #define RPMX_CMRX_RX_ID_MAP 0x80 @@ -84,16 +86,18 @@ /* FEC stats */ #define RPMX_MTI_STAT_STATN_CONTROL 0x10018 #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 -#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27) +#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(28) #define RPMX_CMD_CLEAR_RX BIT_ULL(30) #define RPMX_CMD_CLEAR_TX BIT_ULL(31) +#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL 0x40018 +#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC 0x40000 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058 -#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618 -#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620 -#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628 -#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630 -#define RPMX_MTI_FCFECX_CW_HI 0x38638 +#define RPMX_MTI_FCFECX_VL0_CCW_LO(a) (0x38618 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a) (0x38620 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL1_CCW_LO(a) (0x38628 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a) (0x38630 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_CW_HI(a) (0x38638 + ((a) * 0x40)) /* CN10KB CSR Declaration */ #define RPM2_CMRX_SW_INT 0x1b0 @@ -137,4 +141,6 @@ bool is_dev_rpm2(void *rpmd); int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr); int rpm_stats_reset(void *rpmd, int lmac_id); +void rpm_x2p_reset(void *rpmd, bool enable); +int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 1a97fb9032fa..cd0d7b7774f1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1162,6 +1162,7 @@ cpt: } rvu_program_channels(rvu); + cgx_start_linkup(rvu); err = rvu_mcs_init(rvu); if (err) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b897845e25fd..a383b5ef5b2d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -1025,6 +1025,7 @@ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_ int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); void rvu_mac_reset(struct rvu *rvu, u16 pcifunc); u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac); +void cgx_start_linkup(struct rvu *rvu); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 266ecbc1b97a..992fa0b82e8d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -349,6 +349,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu) int rvu_cgx_init(struct rvu *rvu) { + struct mac_ops *mac_ops; int cgx, err; void *cgxd; @@ -375,6 +376,15 @@ int rvu_cgx_init(struct rvu *rvu) if (err) return err; + /* Clear X2P reset on all MAC blocks */ + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_x2p_reset(cgxd, false); + } + /* Register for CGX events */ err = cgx_lmac_event_handler_init(rvu); if (err) @@ -382,10 +392,26 @@ int rvu_cgx_init(struct rvu *rvu) mutex_init(&rvu->cgx_cfg_lock); - /* Ensure event handler registration is completed, before - * we turn on the links - */ - mb(); + return 0; +} + +void cgx_start_linkup(struct rvu *rvu) +{ + unsigned long lmac_bmap; + struct mac_ops *mac_ops; + int cgx, lmac, err; + void *cgxd; + + /* Enable receive on all LMACS */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + lmac_bmap = cgx_get_lmac_bmap(cgxd); + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) + mac_ops->mac_enadis_rx(cgxd, lmac, true); + } /* Do link up for all CGX ports */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { @@ -398,8 +424,6 @@ int rvu_cgx_init(struct rvu *rvu) "Link up process failed to start on cgx %d\n", cgx); } - - return 0; } int rvu_cgx_exit(struct rvu *rvu) @@ -923,13 +947,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { - struct mac_ops *mac_ops; - u32 fifo_len; + void *cgxd = rvu_first_cgx_pdata(rvu); - mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); - fifo_len = mac_ops ? mac_ops->fifo_len : 0; + if (!cgxd) + return 0; - return fifo_len; + return cgx_get_fifo_len(cgxd); } u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 5d5a01dbbca1..a5d1e2bddd58 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4672,6 +4672,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); + /* Set SDP link credit */ + rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT); + /* Set default min/max packet lengths allowed on NIX Rx links. * * With HW reset minlen value of 60byte, HW will treat ARP pkts diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index fe38426ec42d..2bf426cea6dd 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1394,18 +1394,15 @@ static int pxa168_eth_probe(struct platform_device *pdev) printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); + dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n"); return -ENODEV; } - clk_prepare_enable(clk); dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); - if (!dev) { - err = -ENOMEM; - goto err_clk; - } + if (!dev) + return -ENOMEM; platform_set_drvdata(pdev, dev); pep = netdev_priv(dev); @@ -1523,8 +1520,6 @@ err_free_mdio: mdiobus_free(pep->smi_bus); err_netdev: free_netdev(dev); -err_clk: - clk_disable_unprepare(clk); return err; } @@ -1542,7 +1537,6 @@ static void pxa168_eth_remove(struct platform_device *pdev) if (dev->phydev) phy_disconnect(dev->phydev); - clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 8577db3308cc..7f68468c2e75 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -516,6 +516,7 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, blocking_notifier_call_chain(&dev0->priv.lag_nh, MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE, ndev); + dev_put(ndev); } } @@ -918,6 +919,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct lag_tracker tracker = { }; + struct net_device *ndev; bool do_bond, roce_lag; int err; int i; @@ -981,6 +983,16 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) return; } } + if (tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + ndev = mlx5_lag_active_backup_get_netdev(dev0); + /** Only sriov and roce lag should have tracker->TX_type + * set so no need to check the mode + */ + blocking_notifier_call_chain(&dev0->priv.lag_nh, + MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE, + ndev); + dev_put(ndev); + } } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) { mlx5_modify_lag(ldev, &tracker); } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) { diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index 942f1e531a85..dbc3f92eebc4 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -9,7 +9,10 @@ #ifndef RTASE_H #define RTASE_H -#define RTASE_HW_VER_MASK 0x7C800000 +#define RTASE_HW_VER_MASK 0x7C800000 +#define RTASE_HW_VER_906X_7XA 0x00800000 +#define RTASE_HW_VER_906X_7XC 0x04000000 +#define RTASE_HW_VER_907XD_V1 0x04800000 #define RTASE_RX_DMA_BURST_256 4 #define RTASE_TX_DMA_BURST_UNLIMITED 7 @@ -327,6 +330,8 @@ struct rtase_private { u16 int_nums; u16 tx_int_mit; u16 rx_int_mit; + + u32 hw_ver; }; #define RTASE_LSO_64K 64000 diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 874994d9ceb9..de7f11232593 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1714,10 +1714,21 @@ static int rtase_get_settings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + const struct rtase_private *tp = netdev_priv(dev); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); - cmd->base.speed = SPEED_5000; + + switch (tp->hw_ver) { + case RTASE_HW_VER_906X_7XA: + case RTASE_HW_VER_906X_7XC: + cmd->base.speed = SPEED_5000; + break; + case RTASE_HW_VER_907XD_V1: + cmd->base.speed = SPEED_10000; + break; + } + cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_MII; cmd->base.autoneg = AUTONEG_DISABLE; @@ -1972,20 +1983,21 @@ static void rtase_init_software_variable(struct pci_dev *pdev, tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE; } -static bool rtase_check_mac_version_valid(struct rtase_private *tp) +static int rtase_check_mac_version_valid(struct rtase_private *tp) { - u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK; - bool known_ver = false; + int ret = -ENODEV; + + tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK; - switch (hw_ver) { - case 0x00800000: - case 0x04000000: - case 0x04800000: - known_ver = true; + switch (tp->hw_ver) { + case RTASE_HW_VER_906X_7XA: + case RTASE_HW_VER_906X_7XC: + case RTASE_HW_VER_907XD_V1: + ret = 0; break; } - return known_ver; + return ret; } static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, @@ -2105,9 +2117,13 @@ static int rtase_init_one(struct pci_dev *pdev, tp->pdev = pdev; /* identify chip attached to board */ - if (!rtase_check_mac_version_valid(tp)) - return dev_err_probe(&pdev->dev, -ENODEV, - "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n"); + ret = rtase_check_mac_version_valid(tp); + if (ret != 0) { + dev_err(&pdev->dev, + "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n", + tp->hw_ver); + goto err_out_release_board; + } rtase_init_software_variable(pdev, tp); rtase_init_hardware(tp); @@ -2181,6 +2197,7 @@ err_out_del_napi: netif_napi_del(&ivec->napi); } +err_out_release_board: rtase_release_board(pdev, dev, ioaddr); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 248b30d7b864..16020b72dec8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -487,6 +487,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->select_pcs = socfpga_dwmac_select_pcs; plat_dat->has_gmac = true; + plat_dat->riwt_off = 1; + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3cdc3910f3a0..9b262cdad60b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1177,6 +1177,9 @@ static int stmmac_init_phy(struct net_device *dev) return -ENODEV; } + if (priv->dma_cap.eee) + phy_support_eee(phydev); + ret = phylink_connect_phy(priv->phylink, phydev); } else { fwnode_handle_put(phy_fwnode); diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index dd3ed2d6430b..d9a94df482d9 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -352,8 +352,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) /* The platform resource is provided on the chipset IPQ5018 */ /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) + if (res) { priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->eth_ldo_rdy)) + return PTR_ERR(priv->eth_ldo_rdy); + } bus->name = "ipq4019_mdio"; bus->read = ipq4019_mdio_read_c22; diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 96d0b3a5a9d3..944ae98ad110 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -1530,7 +1530,7 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev, return ret; data->eee_enabled = is_enabled; - data->eee_active = ret; + data->eee_active = phydev->eee_active; linkmode_copy(data->supported, phydev->supported_eee); return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a660a80f34b7..0d20b534122b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -990,14 +990,14 @@ static int phy_check_link_status(struct phy_device *phydev) phydev->state = PHY_RUNNING; err = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL); - if (err <= 0) - phydev->enable_tx_lpi = false; - else - phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled; + phydev->eee_active = err > 0; + phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && + phydev->eee_active; phy_link_up(phydev); } else if (!phydev->link && phydev->state != PHY_NOLINK) { phydev->state = PHY_NOLINK; + phydev->eee_active = false; phydev->enable_tx_lpi = false; phy_link_down(phydev); } @@ -1685,15 +1685,21 @@ EXPORT_SYMBOL(phy_ethtool_get_eee); static void phy_ethtool_set_eee_noneg(struct phy_device *phydev, const struct eee_config *old_cfg) { - if (phydev->eee_cfg.tx_lpi_enabled != old_cfg->tx_lpi_enabled || + bool enable_tx_lpi; + + if (!phydev->link) + return; + + enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && phydev->eee_active; + + if (phydev->enable_tx_lpi != enable_tx_lpi || phydev->eee_cfg.tx_lpi_timer != old_cfg->tx_lpi_timer) { - phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg); - if (phydev->link) { - phydev->link = false; - phy_link_down(phydev); - phydev->link = true; - phy_link_up(phydev); - } + phydev->enable_tx_lpi = false; + phydev->link = false; + phy_link_down(phydev); + phydev->enable_tx_lpi = enable_tx_lpi; + phydev->link = true; + phy_link_up(phydev); } } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index d19b3bd0866b..86f1d87a909c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3535,7 +3535,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie, **priv; struct iwl_trans *trans; int ret, addr_size; - void __iomem * const *table; u32 bar0; /* reassign our BAR 0 if invalid due to possible runtime PM races */ @@ -3661,22 +3660,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) { - dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); + dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n"); goto out_no_pci; } - table = pcim_iomap_table(pdev); - if (!table) { - dev_err(&pdev->dev, "pcim_iomap_table failed\n"); - ret = -ENOMEM; - goto out_no_pci; - } - - trans_pcie->hw_base = table[0]; + trans_pcie->hw_base = pcim_iomap(pdev, 0, 0); if (!trans_pcie->hw_base) { - dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); + dev_err(&pdev->dev, "Could not ioremap PCI BAR 0.\n"); ret = -ENODEV; goto out_no_pci; } diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index 6fc9dfe82474..544d8a4d2af5 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -2671,15 +2671,20 @@ static int idt_init_pci(struct idt_ntb_dev *ndev) */ pci_set_master(pdev); - /* Request all BARs resources and map BAR0 only */ - ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME); + /* Request all BARs resources */ + ret = pcim_request_all_regions(pdev, NTB_NAME); if (ret != 0) { dev_err(&pdev->dev, "Failed to request resources\n"); goto err_clear_master; } - /* Retrieve virtual address of BAR0 - PCI configuration space */ - ndev->cfgspc = pcim_iomap_table(pdev)[0]; + /* ioremap BAR0 - PCI configuration space */ + ndev->cfgspc = pcim_iomap(pdev, 0, 0); + if (!ndev->cfgspc) { + dev_err(&pdev->dev, "Failed to ioremap BAR 0\n"); + ret = -ENOMEM; + goto err_clear_master; + } /* Put the IDT driver data pointer to the PCI-device private pointer */ pci_set_drvdata(pdev, ndev); diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 6b4922de3047..37b743acbb7b 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -106,12 +106,12 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) nvdimm_bus_lock(&ndns->dev); nd_dax = nd_dax_alloc(nd_region); - nd_pfn = &nd_dax->nd_pfn; - dax_dev = nd_pfn_devinit(nd_pfn, ndns); + dax_dev = nd_dax_devinit(nd_dax, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dax_dev) return -ENOMEM; pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); + nd_pfn = &nd_dax->nd_pfn; nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, DAX_SIG); dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>"); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 2dbb1dca17b5..5ca06e9a2d29 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -600,6 +600,13 @@ struct nd_dax *to_nd_dax(struct device *dev); int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns); bool is_nd_dax(const struct device *dev); struct device *nd_dax_create(struct nd_region *nd_region); +static inline struct device *nd_dax_devinit(struct nd_dax *nd_dax, + struct nd_namespace_common *ndns) +{ + if (!nd_dax) + return NULL; + return nd_pfn_devinit(&nd_dax->nd_pfn, ndns); +} #else static inline int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index f55d60922b87..c3f07be4aa22 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -97,7 +97,7 @@ static int virtio_pmem_flush(struct nd_region *nd_region) dev_info(&vdev->dev, "failed to send command to virtio pmem device\n"); err = -EIO; } else { - /* A host repsonse results in "host_ack" getting called */ + /* A host response results in "host_ack" getting called */ wait_event(req_data->host_acked, req_data->done); err = le32_to_cpu(req_data->resp.ret); } diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 586348125b61..cfdfe0eaa512 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -539,7 +539,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) if (!nd_pfn->uuid) { /* - * When probing a namepace via nd_pfn_probe() the uuid + * When probing a namespace via nd_pfn_probe() the uuid * is NULL (see: nd_pfn_devinit()) we init settings from * pfn_sb */ diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 210fb77f51ba..d81faa9d89c9 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -316,7 +316,7 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, * range, filesystem turns the normal pwrite to a dax_recovery_write. * * The recovery write consists of clearing media poison, clearing page - * HWPoison bit, reenable page-wide read-write permission, flush the + * HWPoison bit, re-enable page-wide read-write permission, flush the * caches and finally write. A competing pread thread will be held * off during the recovery process since data read back might not be * valid, and this is achieved by clearing the badblock records after diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index c9b97aeabf85..2396d19ce549 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -143,6 +143,28 @@ static void virtio_pmem_remove(struct virtio_device *vdev) virtio_reset_device(vdev); } +static int virtio_pmem_freeze(struct virtio_device *vdev) +{ + vdev->config->del_vqs(vdev); + virtio_reset_device(vdev); + + return 0; +} + +static int virtio_pmem_restore(struct virtio_device *vdev) +{ + int ret; + + ret = init_vq(vdev->priv); + if (ret) { + dev_err(&vdev->dev, "failed to initialize virtio pmem's vq\n"); + return ret; + } + virtio_device_ready(vdev); + + return 0; +} + static unsigned int features[] = { VIRTIO_PMEM_F_SHMEM_REGION, }; @@ -155,6 +177,8 @@ static struct virtio_driver virtio_pmem_driver = { .validate = virtio_pmem_validate, .probe = virtio_pmem_probe, .remove = virtio_pmem_remove, + .freeze = virtio_pmem_freeze, + .restore = virtio_pmem_restore, }; module_virtio_driver(virtio_pmem_driver); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 4bdb79ffa101..430651e7424a 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -263,9 +263,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *o int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); -void _put_opp_list_kref(struct opp_table *opp_table); void _required_opps_available(struct dev_pm_opp *opp, int count); -void _update_set_required_opps(struct opp_table *opp_table); static inline bool lazy_linking_pending(struct opp_table *opp_table) { diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 0d94e4a967d8..2fbd379923fd 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -173,6 +173,15 @@ config PCI_PASID If unsure, say N. +config PCIE_TPH + bool "TLP Processing Hints" + help + This option adds support for PCIe TLP Processing Hints (TPH). + TPH allows endpoint devices to provide optimization hints, such as + desired caching behavior, for requests that target memory space. + These hints, called Steering Tags, can empower the system hardware + to optimize the utilization of platform resources. + config PCI_P2PDMA bool "PCI peer-to-peer transfer support" depends on ZONE_DEVICE @@ -305,6 +314,6 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" source "drivers/pci/switch/Kconfig" -source "drivers/pci/pwrctl/Kconfig" +source "drivers/pci/pwrctrl/Kconfig" endif diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 374c5c06d92f..67647f1880fb 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ obj-$(CONFIG_PCI) += msi/ obj-$(CONFIG_PCI) += pcie/ -obj-$(CONFIG_PCI) += pwrctl/ +obj-$(CONFIG_PCI) += pwrctrl/ ifdef CONFIG_PCI obj-$(CONFIG_PROC_FS) += proc.o @@ -36,6 +36,7 @@ obj-$(CONFIG_VGA_ARB) += vgaarb.o obj-$(CONFIG_PCI_DOE) += doe.o obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o obj-$(CONFIG_PCI_NPEM) += npem.o +obj-$(CONFIG_PCIE_TPH) += tph.o # Endpoint library must be initialized before its users obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 55c853686051..98910bc0fcc4 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -13,11 +13,24 @@ #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include "pci.h" +/* + * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond + * to P2P or CardBus bridge windows) go in a table. Additional ones (for + * buses below host bridges or subtractive decode bridges) go in the list. + * Use pci_bus_for_each_resource() to iterate through all the resources. + */ + +struct pci_bus_resource { + struct list_head list; + struct resource *res; +}; + void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset) { @@ -46,8 +59,7 @@ void pci_free_resource_list(struct list_head *resources) } EXPORT_SYMBOL(pci_free_resource_list); -void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, - unsigned int flags) +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res) { struct pci_bus_resource *bus_res; @@ -58,7 +70,6 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, } bus_res->res = res; - bus_res->flags = flags; list_add_tail(&bus_res->list, &bus->resources); } @@ -320,6 +331,47 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } +/* + * Create pwrctrl devices (if required) for the PCI devices to handle the power + * state. + */ +static void pci_pwrctrl_create_devices(struct pci_dev *dev) +{ + struct device_node *np = dev_of_node(&dev->dev); + struct device *parent = &dev->dev; + struct platform_device *pdev; + + /* + * First ensure that we are starting from a PCI bridge and it has a + * corresponding devicetree node. + */ + if (np && pci_is_bridge(dev)) { + /* + * Now look for the child PCI device nodes and create pwrctrl + * devices for them. The pwrctrl device drivers will manage the + * power state of the devices. + */ + for_each_available_child_of_node_scoped(np, child) { + /* + * First check whether the pwrctrl device really + * needs to be created or not. This is decided + * based on at least one of the power supplies + * being defined in the devicetree node of the + * device. + */ + if (!of_pci_supply_present(child)) { + pci_dbg(dev, "skipping OF node: %s\n", child->name); + return; + } + + /* Now create the pwrctrl device */ + pdev = of_platform_device_create(child, NULL, parent); + if (!pdev) + pci_err(dev, "failed to create OF node: %s\n", child->name); + } + } +} + /** * pci_bus_add_device - start driver for a single device * @dev: device to add @@ -329,6 +381,7 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } void pci_bus_add_device(struct pci_dev *dev) { struct device_node *dn = dev->dev.of_node; + struct platform_device *pdev; int retval; /* @@ -343,20 +396,28 @@ void pci_bus_add_device(struct pci_dev *dev) pci_proc_attach_device(dev); pci_bridge_d3_update(dev); + pci_pwrctrl_create_devices(dev); + + /* + * If the PCI device is associated with a pwrctrl device with a + * power supply, create a device link between the PCI device and + * pwrctrl device. This ensures that pwrctrl drivers are probed + * before PCI client drivers. + */ + pdev = of_find_device_by_node(dn); + if (pdev && of_pci_supply_present(dn)) { + if (!device_link_add(&dev->dev, &pdev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER)) + pci_err(dev, "failed to add device link to power control device %s\n", + pdev->name); + } + dev->match_driver = !dn || of_device_is_available(dn); retval = device_attach(&dev->dev); if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_dev_assign_added(dev, true); - - if (dev_of_node(&dev->dev) && pci_is_bridge(dev)) { - retval = of_platform_populate(dev_of_node(&dev->dev), NULL, NULL, - &dev->dev); - if (retval) - pci_err(dev, "failed to populate child OF nodes (%d)\n", - retval); - } + pci_dev_assign_added(dev); } EXPORT_SYMBOL_GPL(pci_bus_add_device); @@ -389,41 +450,23 @@ void pci_bus_add_devices(const struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_add_devices); -static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), - void *userdata, bool locked) +static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata) { struct pci_dev *dev; - struct pci_bus *bus; - struct list_head *next; - int retval; + int ret = 0; - bus = top; - if (!locked) - down_read(&pci_bus_sem); - next = top->devices.next; - for (;;) { - if (next == &bus->devices) { - /* end of this bus, go up or finish */ - if (bus == top) + list_for_each_entry(dev, &top->devices, bus_list) { + ret = cb(dev, userdata); + if (ret) + break; + if (dev->subordinate) { + ret = __pci_walk_bus(dev->subordinate, cb, userdata); + if (ret) break; - next = bus->self->bus_list.next; - bus = bus->self->bus; - continue; } - dev = list_entry(next, struct pci_dev, bus_list); - if (dev->subordinate) { - /* this is a pci-pci bridge, do its devices next */ - next = dev->subordinate->devices.next; - bus = dev->subordinate; - } else - next = dev->bus_list.next; - - retval = cb(dev, userdata); - if (retval) - break; } - if (!locked) - up_read(&pci_bus_sem); + return ret; } /** @@ -441,7 +484,9 @@ static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void */ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) { - __pci_walk_bus(top, cb, userdata, false); + down_read(&pci_bus_sem); + __pci_walk_bus(top, cb, userdata); + up_read(&pci_bus_sem); } EXPORT_SYMBOL_GPL(pci_walk_bus); @@ -449,9 +494,8 @@ void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void * { lockdep_assert_held(&pci_bus_sem); - __pci_walk_bus(top, cb, userdata, true); + __pci_walk_bus(top, cb, userdata); } -EXPORT_SYMBOL_GPL(pci_walk_bus_locked); struct pci_bus *pci_bus_get(struct pci_bus *bus) { diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 284f2e0e4d26..0341d51d6aed 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -386,6 +386,13 @@ static const struct j721e_pcie_data j784s4_pcie_ep_data = { .max_lanes = 4, }; +static const struct j721e_pcie_data j722s_pcie_rc_data = { + .mode = PCI_MODE_RC, + .linkdown_irq_regfield = J7200_LINK_DOWN, + .byte_access_allowed = true, + .max_lanes = 1, +}; + static const struct of_device_id of_j721e_pcie_match[] = { { .compatible = "ti,j721e-pcie-host", @@ -419,6 +426,10 @@ static const struct of_device_id of_j721e_pcie_match[] = { .compatible = "ti,j784s4-pcie-ep", .data = &j784s4_pcie_ep_data, }, + { + .compatible = "ti,j722s-pcie-host", + .data = &j722s_pcie_rc_data, + }, {}, }; @@ -572,15 +583,14 @@ static int j721e_pcie_probe(struct platform_device *pdev) pcie->refclk = clk; /* - * The "Power Sequencing and Reset Signal Timings" table of the - * PCI Express Card Electromechanical Specification, Revision - * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST# - * should be deasserted after minimum of 100us once REFCLK is - * stable. The REFCLK to the connector in RC mode is selected - * while enabling the PHY. So deassert PERST# after 100 us. + * Section 2.2 of the PCI Express Card Electromechanical + * Specification (Revision 5.1) mandates that the deassertion + * of the PERST# signal should be delayed by 100 ms (TPVPERL). + * This shall ensure that the power and the reference clock + * are stable. */ if (gpiod) { - fsleep(PCIE_T_PERST_CLK_US); + msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(gpiod, 1); } @@ -671,15 +681,14 @@ static int j721e_pcie_resume_noirq(struct device *dev) return ret; /* - * The "Power Sequencing and Reset Signal Timings" table of the - * PCI Express Card Electromechanical Specification, Revision - * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST# - * should be deasserted after minimum of 100us once REFCLK is - * stable. The REFCLK to the connector in RC mode is selected - * while enabling the PHY. So deassert PERST# after 100 us. + * Section 2.2 of the PCI Express Card Electromechanical + * Specification (Revision 5.1) mandates that the deassertion + * of the PERST# signal should be delayed by 100 ms (TPVPERL). + * This shall ensure that the power and the reference clock + * are stable. */ if (pcie->reset_gpio) { - fsleep(PCIE_T_PERST_CLK_US); + msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(pcie->reset_gpio, 1); } @@ -712,7 +721,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops, static struct platform_driver j721e_pcie_driver = { .probe = j721e_pcie_probe, - .remove_new = j721e_pcie_remove, + .remove = j721e_pcie_remove, .driver = { .name = "j721e-pcie", .of_match_table = of_j721e_pcie_match, diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index 4251fac5e310..204e045aed8c 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -197,7 +197,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) phy_count = of_property_count_strings(np, "phy-names"); if (phy_count < 1) { - dev_err(dev, "no phy-names. PHY will not be initialized\n"); + dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n"); pcie->phy_count = 0; return 0; } @@ -260,7 +260,7 @@ static int cdns_pcie_resume_noirq(struct device *dev) ret = cdns_pcie_enable_phy(pcie); if (ret) { - dev_err(dev, "failed to enable phy\n"); + dev_err(dev, "failed to enable PHY\n"); return ret; } diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index fa45da28a218..6a830166d37f 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -383,7 +383,7 @@ static const struct of_device_id exynos_pcie_of_match[] = { static struct platform_driver exynos_pcie_driver = { .probe = exynos_pcie_probe, - .remove_new = exynos_pcie_remove, + .remove = exynos_pcie_remove, .driver = { .name = "exynos-pcie", .of_match_table = exynos_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 808d1f105417..c8d5c90aa4d4 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -82,6 +82,11 @@ enum imx_pcie_variants { #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) +/* + * Because of ERR005723 (PCIe does not support L2 power down) we need to + * workaround suspend resume on some devices which are affected by this errata. + */ +#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) #define imx_check_flag(pci, val) (pci->drvdata->flags & val) @@ -1237,9 +1242,19 @@ static int imx_pcie_suspend_noirq(struct device *dev) return 0; imx_pcie_msi_save_restore(imx_pcie, true); - imx_pcie_pm_turnoff(imx_pcie); - imx_pcie_stop_link(imx_pcie->pci); - imx_pcie_host_exit(pp); + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { + /* + * The minimum for a workaround would be to set PERST# and to + * set the PCIE_TEST_PD flag. However, we can also disable the + * clock which saves some power. + */ + imx_pcie_assert_core_reset(imx_pcie); + imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); + } else { + imx_pcie_pm_turnoff(imx_pcie); + imx_pcie_stop_link(imx_pcie->pci); + imx_pcie_host_exit(pp); + } return 0; } @@ -1253,14 +1268,32 @@ static int imx_pcie_resume_noirq(struct device *dev) if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; - ret = imx_pcie_host_init(pp); - if (ret) - return ret; - imx_pcie_msi_save_restore(imx_pcie, false); - dw_pcie_setup_rc(pp); + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { + ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); + if (ret) + return ret; + ret = imx_pcie_deassert_core_reset(imx_pcie); + if (ret) + return ret; + /* + * Using PCIE_TEST_PD seems to disable MSI and powers down the + * root complex. This is why we have to setup the rc again and + * why we have to restore the MSI register. + */ + ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); + if (ret) + return ret; + imx_pcie_msi_save_restore(imx_pcie, false); + } else { + ret = imx_pcie_host_init(pp); + if (ret) + return ret; + imx_pcie_msi_save_restore(imx_pcie, false); + dw_pcie_setup_rc(pp); - if (imx_pcie->link_is_up) - imx_pcie_start_link(imx_pcie->pci); + if (imx_pcie->link_is_up) + imx_pcie_start_link(imx_pcie->pci); + } return 0; } @@ -1485,7 +1518,9 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, .flags = IMX_PCIE_FLAG_IMX_PHY | - IMX_PCIE_FLAG_IMX_SPEED_CHANGE, + IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_BROKEN_SUSPEND | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", .clk_names = imx6q_clks, diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 2219b1a866fa..63bd5003da45 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -455,6 +455,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 reg; + /* + * Checking whether the link is up here is a last line of defense + * against platforms that forward errors on the system bus as + * SError upon PCI configuration transactions issued when the link + * is down. This check is racy by definition and does not stop + * the system from triggering an SError if the link goes down + * after this check is performed. + */ + if (!dw_pcie_link_up(pci)) + return NULL; + reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); if (!pci_is_root_bus(bus->parent)) @@ -1093,6 +1104,7 @@ static int ks_pcie_am654_set_mode(struct device *dev, static const struct ks_pcie_of_data ks_pcie_rc_of_data = { .host_ops = &ks_pcie_host_ops, + .mode = DW_PCIE_RC_TYPE, .version = DW_PCIE_VER_365A, }; @@ -1363,7 +1375,7 @@ static void ks_pcie_remove(struct platform_device *pdev) static struct platform_driver ks_pcie_driver = { .probe = ks_pcie_probe, - .remove_new = ks_pcie_remove, + .remove = ks_pcie_remove, .driver = { .name = "keystone-pcie", .of_match_table = ks_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c index 76d0ddea8007..1340edc18d12 100644 --- a/drivers/pci/controller/dwc/pcie-bt1.c +++ b/drivers/pci/controller/dwc/pcie-bt1.c @@ -632,7 +632,7 @@ MODULE_DEVICE_TABLE(of, bt1_pcie_of_match); static struct platform_driver bt1_pcie_driver = { .probe = bt1_pcie_probe, - .remove_new = bt1_pcie_remove, + .remove = bt1_pcie_remove, .driver = { .name = "bt1-pcie", .of_match_table = bt1_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 43ba5c6738df..f3ac7d46a855 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -268,6 +268,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, return -EINVAL; } +static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, + size_t *pci_size, size_t *offset) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u64 mask = pci->region_align - 1; + size_t ofst = pci_addr & mask; + + *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size); + *offset = ofst; + + return pci_addr & ~mask; +} + static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr) { @@ -280,6 +294,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, if (ret < 0) return; + ep->outbound_addr[atu_index] = 0; dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); clear_bit(atu_index, ep->ob_window_map); } @@ -444,6 +459,7 @@ static const struct pci_epc_ops epc_ops = { .write_header = dw_pcie_ep_write_header, .set_bar = dw_pcie_ep_set_bar, .clear_bar = dw_pcie_ep_clear_bar, + .align_addr = dw_pcie_ep_align_addr, .map_addr = dw_pcie_ep_map_addr, .unmap_addr = dw_pcie_ep_unmap_addr, .set_msi = dw_pcie_ep_set_msi, @@ -488,7 +504,8 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u32 msg_addr_lower, msg_addr_upper, reg; struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; - unsigned int aligned_offset; + size_t map_size = sizeof(u32); + size_t offset; u16 msg_ctrl, msg_data; bool has_upper; u64 msg_addr; @@ -516,14 +533,13 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, } msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; - aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); + msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, - epc->mem->window.page_size); + map_size); if (ret) return ret; - writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); + writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset); dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); @@ -574,8 +590,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, struct pci_epf_msix_tbl *msix_tbl; struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; + size_t map_size = sizeof(u32); + size_t offset; u32 reg, msg_data, vec_ctrl; - unsigned int aligned_offset; u32 tbl_offset; u64 msg_addr; int ret; @@ -600,14 +617,13 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, return -EPERM; } - aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); + msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, - epc->mem->window.page_size); + map_size); if (ret) return ret; - writel(msg_data, ep->msi_mem + aligned_offset); + writel(msg_data, ep->msi_mem + offset); dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); @@ -689,7 +705,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) * for 1 MB BAR size only. */ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); } dw_pcie_setup(pci); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 3e41865c7290..d2291c3ceb8b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -474,8 +474,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (pci_msi_enabled()) { pp->has_msi_ctrl = !(pp->ops->msi_init || - of_property_read_bool(np, "msi-parent") || - of_property_read_bool(np, "msi-map")); + of_property_present(np, "msi-parent") || + of_property_present(np, "msi-map")); /* * For the has_msi_ctrl case the default assignment is handled diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 7a11c618b9d9..615a0e3e6d7e 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -439,7 +439,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match); static struct platform_driver histb_pcie_platform_driver = { .probe = histb_pcie_probe, - .remove_new = histb_pcie_remove, + .remove = histb_pcie_remove, .driver = { .name = "histb-pcie", .of_match_table = histb_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 676d2aba4fbd..9b53b8f6f268 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -443,7 +443,7 @@ static const struct of_device_id of_intel_pcie_match[] = { static struct platform_driver intel_pcie_driver = { .probe = intel_pcie_probe, - .remove_new = intel_pcie_remove, + .remove = intel_pcie_remove, .driver = { .name = "intel-gw-pcie", .of_match_table = of_intel_pcie_match, diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 85a2c77b1835..1b2088acb538 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -769,7 +769,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) static struct platform_driver kirin_pcie_driver = { .probe = kirin_pcie_probe, - .remove_new = kirin_pcie_remove, + .remove = kirin_pcie_remove, .driver = { .name = "kirin-pcie", .of_match_table = kirin_pcie_match, diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index e588fcc54589..c08f64d7a825 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -396,6 +396,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) return ret; } + /* Perform cleanup that requires refclk */ + pci_epc_deinit_notify(pci->ep.epc); + dw_pcie_ep_cleanup(&pci->ep); + /* Assert WAKE# to RC to indicate device is ready */ gpiod_set_value_cansleep(pcie_ep->wake, 1); usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500); @@ -540,8 +544,6 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); - pci_epc_deinit_notify(pci->ep.epc); - dw_pcie_ep_cleanup(&pci->ep); qcom_pcie_disable_resources(pcie_ep); pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; } @@ -937,7 +939,7 @@ MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); static struct platform_driver qcom_pcie_ep_driver = { .probe = qcom_pcie_ep_probe, - .remove_new = qcom_pcie_ep_remove, + .remove = qcom_pcie_ep_remove, .driver = { .name = "qcom-pcie-ep", .of_match_table = qcom_pcie_ep_match, diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index ef44a82be058..dc102d8bd58c 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -133,6 +133,7 @@ /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ #define PARF_INT_ALL_LINK_UP BIT(13) +#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) /* PARF_NO_SNOOP_OVERIDE register fields */ #define WR_NO_SNOOP_OVERIDE_EN BIT(1) @@ -1364,6 +1365,16 @@ static const struct qcom_pcie_ops ops_1_9_0 = { .config_sid = qcom_pcie_config_sid_1_9_0, }; +/* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */ +static const struct qcom_pcie_ops ops_1_21_0 = { + .get_resources = qcom_pcie_get_resources_2_7_0, + .init = qcom_pcie_init_2_7_0, + .post_init = qcom_pcie_post_init_2_7_0, + .host_post_init = qcom_pcie_host_post_init_2_7_0, + .deinit = qcom_pcie_deinit_2_7_0, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ static const struct qcom_pcie_ops ops_2_9_0 = { .get_resources = qcom_pcie_get_resources_2_9_0, @@ -1411,7 +1422,7 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = { }; static const struct qcom_pcie_cfg cfg_sc8280xp = { - .ops = &ops_1_9_0, + .ops = &ops_1_21_0, .no_l0s = true, }; @@ -1716,7 +1727,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_host_deinit; } - writel_relaxed(PARF_INT_ALL_LINK_UP, pcie->parf + PARF_INT_ALL_MASK); + writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7, + pcie->parf + PARF_INT_ALL_MASK); } qcom_pcie_icc_opp_update(pcie); @@ -1828,6 +1840,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, + { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, @@ -1843,7 +1856,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, - { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp }, { } }; diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 3a5511c3f7d9..fc872dd35029 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -775,7 +775,7 @@ static struct platform_driver rcar_gen4_pcie_driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = rcar_gen4_pcie_probe, - .remove_new = rcar_gen4_pcie_remove, + .remove = rcar_gen4_pcie_remove, }; module_platform_driver(rcar_gen4_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index c1394f2ab63f..5103995cd6c7 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1704,9 +1704,6 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) if (ret) dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); - pci_epc_deinit_notify(pcie->pci.ep.epc); - dw_pcie_ep_cleanup(&pcie->pci.ep); - reset_control_assert(pcie->core_rst); tegra_pcie_disable_phy(pcie); @@ -1785,6 +1782,10 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) goto fail_phy; } + /* Perform cleanup that requires refclk */ + pci_epc_deinit_notify(pcie->pci.ep.epc); + dw_pcie_ep_cleanup(&pcie->pci.ep); + /* Clear any stale interrupt statuses */ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); @@ -2493,7 +2494,7 @@ static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { static struct platform_driver tegra_pcie_dw_driver = { .probe = tegra_pcie_dw_probe, - .remove_new = tegra_pcie_dw_remove, + .remove = tegra_pcie_dw_remove, .shutdown = tegra_pcie_dw_shutdown, .driver = { .name = "tegra194-pcie", diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index a598a98247ce..a29796cce420 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -1996,7 +1996,7 @@ static struct platform_driver advk_pcie_driver = { .of_match_table = advk_pcie_of_match_table, }, .probe = advk_pcie_probe, - .remove_new = advk_pcie_remove, + .remove = advk_pcie_remove, }; module_platform_driver(advk_pcie_driver); diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index 5f06f94db7b1..4051b9b61dac 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c @@ -82,7 +82,7 @@ static struct platform_driver gen_pci_driver = { .of_match_table = gen_pci_of_match, }, .probe = pci_host_common_probe, - .remove_new = pci_host_common_remove, + .remove = pci_host_common_remove, }; module_platform_driver(gen_pci_driver); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 29fe09c99e7d..46d3afe1d308 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1727,7 +1727,7 @@ static struct platform_driver mvebu_pcie_driver = { .pm = &mvebu_pcie_pm_ops, }, .probe = mvebu_pcie_probe, - .remove_new = mvebu_pcie_remove, + .remove = mvebu_pcie_remove, }; module_platform_driver(mvebu_pcie_driver); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index d7517c3976e7..b3cdbc5927de 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -1460,7 +1460,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) pcie->cs = *res; /* constrain configuration space to 4 KiB */ - pcie->cs.end = pcie->cs.start + SZ_4K - 1; + resource_set_size(&pcie->cs, SZ_4K); pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); if (IS_ERR(pcie->cfg)) { @@ -2800,6 +2800,6 @@ static struct platform_driver tegra_pcie_driver = { .pm = &tegra_pcie_pm_ops, }, .probe = tegra_pcie_probe, - .remove_new = tegra_pcie_remove, + .remove = tegra_pcie_remove, }; module_platform_driver(tegra_pcie_driver); diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c index 06a9855cb431..f1bd5de67997 100644 --- a/drivers/pci/controller/pci-thunder-pem.c +++ b/drivers/pci/controller/pci-thunder-pem.c @@ -400,9 +400,9 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg) * Reserve 64K size PEM specific resources. The full 16M range * size is required for thunder_pem_init() call. */ - res_pem->end = res_pem->start + SZ_64K - 1; + resource_set_size(res_pem, SZ_64K); thunder_pem_reserve_range(dev, root->segment, res_pem); - res_pem->end = res_pem->start + SZ_16M - 1; + resource_set_size(res_pem, SZ_16M); /* Reserve PCI configuration space as well. */ thunder_pem_reserve_range(dev, root->segment, &cfg->res); diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 3ce38dfd0d29..88c0977bc41a 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -518,7 +518,7 @@ static struct platform_driver xgene_msi_driver = { .of_match_table = xgene_msi_match_table, }, .probe = xgene_msi_probe, - .remove_new = xgene_msi_remove, + .remove = xgene_msi_remove, }; static int __init xgene_pcie_msi_init(void) diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index e36a6e158d23..e1cee3c0575f 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -267,7 +267,7 @@ static struct platform_driver altera_msi_driver = { .of_match_table = altera_msi_of_match, }, .probe = altera_msi_probe, - .remove_new = altera_msi_remove, + .remove = altera_msi_remove, }; static int __init altera_msi_init(void) diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 650b2dd81c48..eb55a7f8573a 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -815,10 +815,10 @@ static void altera_pcie_remove(struct platform_device *pdev) } static struct platform_driver altera_pcie_driver = { - .probe = altera_pcie_probe, - .remove_new = altera_pcie_remove, + .probe = altera_pcie_probe, + .remove = altera_pcie_remove, .driver = { - .name = "altera-pcie", + .name = "altera-pcie", .of_match_table = altera_pcie_of_match, }, }; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 9321280f6edb..e733a27dc8df 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -1928,7 +1928,7 @@ static const struct dev_pm_ops brcm_pcie_pm_ops = { static struct platform_driver brcm_pcie_driver = { .probe = brcm_pcie_probe, - .remove_new = brcm_pcie_remove, + .remove = brcm_pcie_remove, .driver = { .name = "brcm-pcie", .of_match_table = brcm_pcie_match, diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c index ad9d5ffcd9e3..aaf1ed2b6e59 100644 --- a/drivers/pci/controller/pcie-hisi-error.c +++ b/drivers/pci/controller/pcie-hisi-error.c @@ -317,7 +317,7 @@ static struct platform_driver hisi_pcie_error_handler_driver = { .acpi_match_table = hisi_pcie_acpi_match, }, .probe = hisi_pcie_error_handler_probe, - .remove_new = hisi_pcie_error_handler_remove, + .remove = hisi_pcie_error_handler_remove, }; module_platform_driver(hisi_pcie_error_handler_driver); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index 4e6aa882a567..0cb78c583c7e 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -134,7 +134,7 @@ static struct platform_driver iproc_pltfm_pcie_driver = { .of_match_table = of_match_ptr(iproc_pcie_of_match_table), }, .probe = iproc_pltfm_pcie_probe, - .remove_new = iproc_pltfm_pcie_remove, + .remove = iproc_pltfm_pcie_remove, .shutdown = iproc_pltfm_pcie_shutdown, }; module_platform_driver(iproc_pltfm_pcie_driver); diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 66ce4b5d309b..be52e3a123ab 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -28,7 +28,12 @@ #include "../pci.h" +#define PCIE_BASE_CFG_REG 0x14 +#define PCIE_BASE_CFG_SPEED GENMASK(15, 8) + #define PCIE_SETTING_REG 0x80 +#define PCIE_SETTING_LINK_WIDTH GENMASK(11, 8) +#define PCIE_SETTING_GEN_SUPPORT GENMASK(14, 12) #define PCIE_PCI_IDS_1 0x9c #define PCI_CLASS(class) (class << 8) #define PCIE_RC_MODE BIT(0) @@ -125,6 +130,9 @@ struct mtk_gen3_pcie; +#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0) +#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0) + /** * struct mtk_gen3_pcie_pdata - differentiate between host generations * @power_up: pcie power_up callback @@ -160,6 +168,8 @@ struct mtk_msi_set { * @phy: PHY controller block * @clks: PCIe clocks * @num_clks: PCIe clocks count for this port + * @max_link_speed: Maximum link speed (PCIe Gen) for this port + * @num_lanes: Number of PCIe lanes for this port * @irq: PCIe controller interrupt number * @saved_irq_state: IRQ enable state saved at suspend time * @irq_lock: lock protecting IRQ register access @@ -180,6 +190,8 @@ struct mtk_gen3_pcie { struct phy *phy; struct clk_bulk_data *clks; int num_clks; + u8 max_link_speed; + u8 num_lanes; int irq; u32 saved_irq_state; @@ -381,11 +393,35 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) int err; u32 val; - /* Set as RC mode */ + /* Set as RC mode and set controller PCIe Gen speed restriction, if any */ val = readl_relaxed(pcie->base + PCIE_SETTING_REG); val |= PCIE_RC_MODE; + if (pcie->max_link_speed) { + val &= ~PCIE_SETTING_GEN_SUPPORT; + + /* Can enable link speed support only from Gen2 onwards */ + if (pcie->max_link_speed >= 2) + val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT, + GENMASK(pcie->max_link_speed - 2, 0)); + } + if (pcie->num_lanes) { + val &= ~PCIE_SETTING_LINK_WIDTH; + + /* Zero means one lane, each bit activates x2/x4/x8/x16 */ + if (pcie->num_lanes > 1) + val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH, + GENMASK(fls(pcie->num_lanes >> 2), 0)); + } writel_relaxed(val, pcie->base + PCIE_SETTING_REG); + /* Set Link Control 2 (LNKCTL2) speed restriction, if any */ + if (pcie->max_link_speed) { + val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS); + val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED; + val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed); + writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS); + } + /* Set class code */ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); val &= ~GENMASK(31, 8); @@ -813,6 +849,7 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *regs; + u32 num_lanes; regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); if (!regs) @@ -858,6 +895,14 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) return pcie->num_clks; } + ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); + if (ret == 0) { + if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2)) + dev_warn(dev, "invalid num-lanes, using controller defaults\n"); + else + pcie->num_lanes = num_lanes; + } + return 0; } @@ -1004,9 +1049,21 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); } +static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie) +{ + u32 val; + int ret; + + val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG); + val = FIELD_GET(PCIE_BASE_CFG_SPEED, val); + ret = fls(val); + + return ret > 0 ? ret : -EINVAL; +} + static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) { - int err; + int err, max_speed; err = mtk_pcie_parse_port(pcie); if (err) @@ -1031,6 +1088,20 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) if (err) return err; + err = of_pci_get_max_link_speed(pcie->dev->of_node); + if (err) { + /* Get the maximum speed supported by the controller */ + max_speed = mtk_pcie_get_controller_max_link_speed(pcie); + + /* Set max_link_speed only if the controller supports it */ + if (max_speed >= 0 && max_speed <= err) { + pcie->max_link_speed = err; + dev_info(pcie->dev, + "maximum controller link speed Gen%d, overriding to Gen%u", + max_speed, pcie->max_link_speed); + } + } + /* Try link up */ err = mtk_pcie_startup_port(pcie); if (err) @@ -1225,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_of_match); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, - .remove_new = mtk_pcie_remove, + .remove = mtk_pcie_remove, .driver = { .name = "mtk-pcie-gen3", .of_match_table = mtk_pcie_of_match, diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 7f7d04c2ea57..3bcfc4e58ba2 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1235,7 +1235,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_ids); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, - .remove_new = mtk_pcie_remove, + .remove = mtk_pcie_remove, .driver = { .name = "mtk-pcie", .of_match_table = mtk_pcie_ids, diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 9b4754a45515..776caa0b1011 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -541,7 +541,7 @@ MODULE_DEVICE_TABLE(of, mt7621_pcie_ids); static struct platform_driver mt7621_pcie_driver = { .probe = mt7621_pcie_probe, - .remove_new = mt7621_pcie_remove, + .remove = mt7621_pcie_remove, .driver = { .name = "mt7621-pci", .of_match_table = mt7621_pcie_ids, diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 3dd653f3d784..7c92eada04af 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -796,8 +796,8 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) rcar_pci_write_reg(pcie, 0, PCIEMSIIER); /* - * Setup MSI data target using RC base address address, which - * is guaranteed to be in the low 32bit range on any R-Car HW. + * Setup MSI data target using RC base address, which is guaranteed + * to be in the low 32bit range on any R-Car HW. */ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR); rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR); diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 136274533656..1064b7b06cef 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -10,12 +10,16 @@ #include <linux/configfs.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/iopoll.h> #include <linux/kernel.h> +#include <linux/irq.h> #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> #include <linux/pci-epf.h> #include <linux/sizes.h> +#include <linux/workqueue.h> #include "pcie-rockchip.h" @@ -48,6 +52,10 @@ struct rockchip_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; + int perst_irq; + bool perst_asserted; + bool link_up; + struct delayed_work link_training; }; static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, @@ -63,15 +71,25 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); } +static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip, + u64 pci_addr, size_t size) +{ + int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1)); + + return clamp(num_pass_bits, + ROCKCHIP_PCIE_AT_MIN_NUM_BITS, + ROCKCHIP_PCIE_AT_MAX_NUM_BITS); +} + static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, u32 r, u64 cpu_addr, u64 pci_addr, size_t size) { - int num_pass_bits = fls64(size - 1); + int num_pass_bits; u32 addr0, addr1, desc0; - if (num_pass_bits < 8) - num_pass_bits = 8; + num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip, + pci_addr, size); addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); @@ -228,6 +246,28 @@ static inline u32 rockchip_ob_region(phys_addr_t addr) return (addr >> ilog2(SZ_1M)) & 0x1f; } +static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, + size_t *pci_size, size_t *addr_offset) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + size_t size = *pci_size; + u64 offset, mask; + int num_bits; + + num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip, + pci_addr, size); + mask = (1ULL << num_bits) - 1; + + offset = pci_addr & mask; + if (size + offset > SZ_1M) + size = SZ_1M - offset; + + *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN); + *addr_offset = offset; + + return pci_addr & ~mask; +} + static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr, u64 pci_addr, size_t size) @@ -236,6 +276,9 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, struct rockchip_pcie *pcie = &ep->rockchip; u32 r = rockchip_ob_region(addr); + if (test_bit(r, &ep->ob_region_map)) + return -EBUSY; + rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); @@ -249,13 +292,9 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; - u32 r; - - for (r = 0; r < ep->max_regions; r++) - if (ep->ob_addr[r] == addr) - break; + u32 r = rockchip_ob_region(addr); - if (r == ep->max_regions) + if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map)) return; rockchip_pcie_clear_ep_ob_atu(rockchip, r); @@ -351,9 +390,10 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, { struct rockchip_pcie *rockchip = &ep->rockchip; u32 flags, mme, data, data_mask; + size_t irq_pci_size, offset; + u64 irq_pci_addr; u8 msi_count; u64 pci_addr; - u32 r; /* Check MSI enable bit */ flags = rockchip_pcie_read(&ep->rockchip, @@ -389,18 +429,21 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, PCI_MSI_ADDRESS_LO); /* Set the outbound region if needed. */ - if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) || + irq_pci_size = ~PCIE_ADDR_MASK + 1; + irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc, + pci_addr & PCIE_ADDR_MASK, + &irq_pci_size, &offset); + if (unlikely(ep->irq_pci_addr != irq_pci_addr || ep->irq_pci_fn != fn)) { - r = rockchip_ob_region(ep->irq_phys_addr); - rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, - ep->irq_phys_addr, - pci_addr & PCIE_ADDR_MASK, - ~PCIE_ADDR_MASK + 1); - ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK); + rockchip_pcie_prog_ep_ob_atu(rockchip, fn, + rockchip_ob_region(ep->irq_phys_addr), + ep->irq_phys_addr, + irq_pci_addr, irq_pci_size); + ep->irq_pci_addr = irq_pci_addr; ep->irq_pci_fn = fn; } - writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK)); + writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK)); return 0; } @@ -432,14 +475,222 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc) rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); + if (rockchip->perst_gpio) + enable_irq(ep->perst_irq); + + /* Enable configuration and start link training */ + rockchip_pcie_write(rockchip, + PCIE_CLIENT_LINK_TRAIN_ENABLE | + PCIE_CLIENT_CONF_ENABLE, + PCIE_CLIENT_CONFIG); + + if (!rockchip->perst_gpio) + schedule_delayed_work(&ep->link_training, 0); + + return 0; +} + +static void rockchip_pcie_ep_stop(struct pci_epc *epc) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + + if (rockchip->perst_gpio) { + ep->perst_asserted = true; + disable_irq(ep->perst_irq); + } + + cancel_delayed_work_sync(&ep->link_training); + + /* Stop link training and disable configuration */ + rockchip_pcie_write(rockchip, + PCIE_CLIENT_CONF_DISABLE | + PCIE_CLIENT_LINK_TRAIN_DISABLE, + PCIE_CLIENT_CONFIG); +} + +static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RL; + rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS); +} + +static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip) +{ + u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1); + + return PCIE_LINK_UP(val); +} + +static void rockchip_pcie_ep_link_training(struct work_struct *work) +{ + struct rockchip_pcie_ep *ep = + container_of(work, struct rockchip_pcie_ep, link_training.work); + struct rockchip_pcie *rockchip = &ep->rockchip; + struct device *dev = rockchip->dev; + u32 val; + int ret; + + /* Enable Gen1 training and wait for its completion */ + ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, + val, PCIE_LINK_TRAINING_DONE(val), 50, + LINK_TRAIN_TIMEOUT); + if (ret) + goto again; + + /* Make sure that the link is up */ + ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, + val, PCIE_LINK_UP(val), 50, + LINK_TRAIN_TIMEOUT); + if (ret) + goto again; + + /* + * Check the current speed: if gen2 speed was requested and we are not + * at gen2 speed yet, retrain again for gen2. + */ + val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); + if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) { + /* Enable retrain for gen2 */ + rockchip_pcie_ep_retrain_link(rockchip); + readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, + val, PCIE_LINK_IS_GEN2(val), 50, + LINK_TRAIN_TIMEOUT); + } + + /* Check again that the link is up */ + if (!rockchip_pcie_ep_link_up(rockchip)) + goto again; + + /* + * If PERST# was asserted while polling the link, do not notify + * the function. + */ + if (ep->perst_asserted) + return; + + val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0); + dev_info(dev, + "link up (negotiated speed: %sGT/s, width: x%lu)\n", + (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5", + ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >> + PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1); + + /* Notify the function */ + pci_epc_linkup(ep->epc); + ep->link_up = true; + + return; + +again: + schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5)); +} + +static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep) +{ + struct rockchip_pcie *rockchip = &ep->rockchip; + + dev_dbg(rockchip->dev, "PERST# asserted, link down\n"); + + if (ep->perst_asserted) + return; + + ep->perst_asserted = true; + + cancel_delayed_work_sync(&ep->link_training); + + if (ep->link_up) { + pci_epc_linkdown(ep->epc); + ep->link_up = false; + } +} + +static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep) +{ + struct rockchip_pcie *rockchip = &ep->rockchip; + + dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n"); + + if (!ep->perst_asserted) + return; + + ep->perst_asserted = false; + + /* Enable link re-training */ + rockchip_pcie_ep_retrain_link(rockchip); + + /* Start link training */ + schedule_delayed_work(&ep->link_training, 0); +} + +static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data) +{ + struct pci_epc *epc = data; + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + u32 perst = gpiod_get_value(rockchip->perst_gpio); + + if (perst) + rockchip_pcie_ep_perst_assert(ep); + else + rockchip_pcie_ep_perst_deassert(ep); + + irq_set_irq_type(ep->perst_irq, + (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW)); + + return IRQ_HANDLED; +} + +static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + struct device *dev = rockchip->dev; + int ret; + + if (!rockchip->perst_gpio) + return 0; + + /* PCIe reset interrupt */ + ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio); + if (ep->perst_irq < 0) { + dev_err(dev, + "failed to get IRQ for PERST# GPIO: %d\n", + ep->perst_irq); + + return ep->perst_irq; + } + + /* + * The perst_gpio is active low, so when it is inactive on start, it + * is high and will trigger the perst_irq handler. So treat this initial + * IRQ as a dummy one by faking the host asserting PERST#. + */ + ep->perst_asserted = true; + irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL, + rockchip_pcie_ep_perst_irq_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "pcie-ep-perst", epc); + if (ret) { + dev_err(dev, + "failed to request IRQ for PERST# GPIO: %d\n", + ret); + + return ret; + } + return 0; } static const struct pci_epc_features rockchip_pcie_epc_features = { - .linkup_notifier = false, + .linkup_notifier = true, .msi_capable = true, .msix_capable = false, - .align = 256, + .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN, }; static const struct pci_epc_features* @@ -452,17 +703,19 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = { .write_header = rockchip_pcie_ep_write_header, .set_bar = rockchip_pcie_ep_set_bar, .clear_bar = rockchip_pcie_ep_clear_bar, + .align_addr = rockchip_pcie_ep_align_addr, .map_addr = rockchip_pcie_ep_map_addr, .unmap_addr = rockchip_pcie_ep_unmap_addr, .set_msi = rockchip_pcie_ep_set_msi, .get_msi = rockchip_pcie_ep_get_msi, .raise_irq = rockchip_pcie_ep_raise_irq, .start = rockchip_pcie_ep_start, + .stop = rockchip_pcie_ep_stop, .get_features = rockchip_pcie_ep_get_features, }; -static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, - struct rockchip_pcie_ep *ep) +static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip, + struct rockchip_pcie_ep *ep) { struct device *dev = rockchip->dev; int err; @@ -496,91 +749,63 @@ static const struct of_device_id rockchip_pcie_ep_of_match[] = { {}, }; -static int rockchip_pcie_ep_probe(struct platform_device *pdev) +static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep) { - struct device *dev = &pdev->dev; - struct rockchip_pcie_ep *ep; - struct rockchip_pcie *rockchip; - struct pci_epc *epc; - size_t max_regions; + struct rockchip_pcie *rockchip = &ep->rockchip; + struct device *dev = rockchip->dev; struct pci_epc_mem_window *windows = NULL; int err, i; - u32 cfg_msi, cfg_msix_cp; - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) - return -ENOMEM; - rockchip = &ep->rockchip; - rockchip->is_rc = false; - rockchip->dev = dev; - - epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); - if (IS_ERR(epc)) { - dev_err(dev, "failed to create epc device\n"); - return PTR_ERR(epc); - } - - ep->epc = epc; - epc_set_drvdata(epc, ep); - - err = rockchip_pcie_parse_ep_dt(rockchip, ep); - if (err) - return err; - - err = rockchip_pcie_enable_clocks(rockchip); - if (err) - return err; - - err = rockchip_pcie_init_port(rockchip); - if (err) - goto err_disable_clocks; - - /* Establish the link automatically */ - rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, - PCIE_CLIENT_CONFIG); - - max_regions = ep->max_regions; - ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr), + ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr), GFP_KERNEL); - if (!ep->ob_addr) { - err = -ENOMEM; - goto err_uninit_port; - } - - /* Only enable function 0 by default */ - rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); + if (!ep->ob_addr) + return -ENOMEM; windows = devm_kcalloc(dev, ep->max_regions, sizeof(struct pci_epc_mem_window), GFP_KERNEL); - if (!windows) { - err = -ENOMEM; - goto err_uninit_port; - } + if (!windows) + return -ENOMEM; + for (i = 0; i < ep->max_regions; i++) { windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i); windows[i].size = SZ_1M; windows[i].page_size = SZ_1M; } - err = pci_epc_multi_mem_init(epc, windows, ep->max_regions); + err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions); devm_kfree(dev, windows); if (err < 0) { dev_err(dev, "failed to initialize the memory space\n"); - goto err_uninit_port; + return err; } - ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, + ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr, SZ_1M); if (!ep->irq_cpu_addr) { dev_err(dev, "failed to reserve memory space for MSI\n"); - err = -ENOMEM; goto err_epc_mem_exit; } ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; + return 0; + +err_epc_mem_exit: + pci_epc_mem_exit(ep->epc); + + return err; +} + +static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep) +{ + pci_epc_mem_exit(ep->epc); +} + +static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip) +{ + u32 cfg_msi, cfg_msix_cp; + /* * MSI-X is not supported but the controller still advertises the MSI-X * capability by default, which can lead to the Root Complex side @@ -603,19 +828,68 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) rockchip_pcie_write(rockchip, cfg_msi, PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); +} - rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE, - PCIE_CLIENT_CONFIG); +static int rockchip_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_pcie_ep *ep; + struct rockchip_pcie *rockchip; + struct pci_epc *epc; + int err; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + rockchip = &ep->rockchip; + rockchip->is_rc = false; + rockchip->dev = dev; + INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training); + + epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "failed to create EPC device\n"); + return PTR_ERR(epc); + } + + ep->epc = epc; + epc_set_drvdata(epc, ep); + + err = rockchip_pcie_ep_get_resources(rockchip, ep); + if (err) + return err; + + err = rockchip_pcie_ep_init_ob_mem(ep); + if (err) + return err; + + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + goto err_exit_ob_mem; + + err = rockchip_pcie_init_port(rockchip); + if (err) + goto err_disable_clocks; + + rockchip_pcie_ep_hide_broken_msix_cap(rockchip); + + /* Only enable function 0 by default */ + rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); pci_epc_init_notify(epc); + err = rockchip_pcie_ep_setup_irq(epc); + if (err < 0) + goto err_uninit_port; + return 0; -err_epc_mem_exit: - pci_epc_mem_exit(epc); err_uninit_port: rockchip_pcie_deinit_phys(rockchip); err_disable_clocks: rockchip_pcie_disable_clocks(rockchip); +err_exit_ob_mem: + rockchip_pcie_ep_exit_ob_mem(ep); return err; } diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index cbec71114825..5adac6adc046 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -294,7 +294,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) int err, i = MAX_LANE_NUM; u32 status; - gpiod_set_value_cansleep(rockchip->ep_gpio, 0); + gpiod_set_value_cansleep(rockchip->perst_gpio, 0); err = rockchip_pcie_init_port(rockchip); if (err) @@ -323,7 +323,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) PCIE_CLIENT_CONFIG); msleep(PCIE_T_PVPERL_MS); - gpiod_set_value_cansleep(rockchip->ep_gpio, 1); + gpiod_set_value_cansleep(rockchip->perst_gpio, 1); msleep(PCIE_T_RRS_READY_MS); @@ -1050,7 +1050,7 @@ static struct platform_driver rockchip_pcie_driver = { .pm = &rockchip_pcie_pm_ops, }, .probe = rockchip_pcie_probe, - .remove_new = rockchip_pcie_remove, + .remove = rockchip_pcie_remove, }; module_platform_driver(rockchip_pcie_driver); diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c index c07d7129f1c7..b9ade7632e11 100644 --- a/drivers/pci/controller/pcie-rockchip.c +++ b/drivers/pci/controller/pcie-rockchip.c @@ -119,13 +119,15 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return PTR_ERR(rockchip->aclk_rst); } - if (rockchip->is_rc) { - rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep", - GPIOD_OUT_LOW); - if (IS_ERR(rockchip->ep_gpio)) - return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio), - "failed to get ep GPIO\n"); - } + if (rockchip->is_rc) + rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep", + GPIOD_OUT_LOW); + else + rockchip->perst_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_IN); + if (IS_ERR(rockchip->perst_gpio)) + return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio), + "failed to get PERST# GPIO\n"); rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); if (IS_ERR(rockchip->aclk_pcie)) { @@ -244,11 +246,12 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, PCIE_CLIENT_CONFIG); - regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE | + regs = PCIE_CLIENT_ARI_ENABLE | PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes); if (rockchip->is_rc) - regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; + regs |= PCIE_CLIENT_LINK_TRAIN_ENABLE | + PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; else regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP; diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 6111de35f84c..a51b087ce878 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -26,12 +26,14 @@ #define MAX_LANE_NUM 4 #define MAX_REGION_LIMIT 32 #define MIN_EP_APERTURE 28 +#define LINK_TRAIN_TIMEOUT (500 * USEC_PER_MSEC) #define PCIE_CLIENT_BASE 0x0 #define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) #define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) #define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0) #define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) +#define PCIE_CLIENT_LINK_TRAIN_DISABLE HIWORD_UPDATE(0x0002, 0) #define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) #define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) #define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) @@ -49,6 +51,10 @@ #define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) #define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 #define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19 +#define PCIE_CLIENT_BASIC_STATUS0 (PCIE_CLIENT_BASE + 0x44) +#define PCIE_CLIENT_NEG_LINK_WIDTH_MASK GENMASK(7, 6) +#define PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT 6 +#define PCIE_CLIENT_NEG_LINK_SPEED BIT(5) #define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) #define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 #define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 @@ -86,6 +92,8 @@ #define PCIE_CORE_CTRL_MGMT_BASE 0x900000 #define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) +#define PCIE_CORE_PL_CONF_LS_MASK 0x00000001 +#define PCIE_CORE_PL_CONF_LS_READY 0x00000001 #define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 #define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 #define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 @@ -143,6 +151,7 @@ #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_EP_CONFIG_BASE 0xa00000 #define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) +#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0) #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 @@ -154,6 +163,7 @@ #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) +#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0) #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) @@ -191,6 +201,8 @@ #define ROCKCHIP_VENDOR_ID 0x1d87 #define PCIE_LINK_IS_L2(x) \ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) +#define PCIE_LINK_TRAINING_DONE(x) \ + (((x) & PCIE_CORE_PL_CONF_LS_MASK) == PCIE_CORE_PL_CONF_LS_READY) #define PCIE_LINK_UP(x) \ (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) #define PCIE_LINK_IS_GEN2(x) \ @@ -241,10 +253,20 @@ #define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8) #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 #define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 + +#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8 +#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20 +#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS) + #define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \ (PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12))) #define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \ (PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12))) + +#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8 +#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20 +#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS) + #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008) #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ @@ -307,7 +329,7 @@ struct rockchip_pcie { struct regulator *vpcie3v3; /* 3.3V power supply */ struct regulator *vpcie1v8; /* 1.8V power supply */ struct regulator *vpcie0v9; /* 0.9V power supply */ - struct gpio_desc *ep_gpio; + struct gpio_desc *perst_gpio; u32 lanes; u8 lanes_map; int link_gen; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index a8ae14474dd0..8d6e2a89b067 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -916,6 +916,6 @@ static struct platform_driver nwl_pcie_driver = { .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, - .remove_new = nwl_pcie_remove, + .remove = nwl_pcie_remove, }; builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c index 48f60a04b740..6630cacef301 100644 --- a/drivers/pci/controller/plda/pcie-microchip-host.c +++ b/drivers/pci/controller/plda/pcie-microchip-host.c @@ -25,9 +25,6 @@ #define MC_PCIE1_BRIDGE_ADDR 0x00008000u #define MC_PCIE1_CTRL_ADDR 0x0000a000u -#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR) -#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR) - /* PCIe Controller Phy Regs */ #define SEC_ERROR_EVENT_CNT 0x20 #define DED_ERROR_EVENT_CNT 0x24 @@ -128,7 +125,6 @@ [EVENT_LOCAL_ ## x] = { __stringify(x), s } #define PCIE_EVENT(x) \ - .base = MC_PCIE_CTRL_ADDR, \ .offset = PCIE_EVENT_INT, \ .mask_offset = PCIE_EVENT_INT, \ .mask_high = 1, \ @@ -136,7 +132,6 @@ .enb_mask = PCIE_EVENT_INT_ENB_MASK #define SEC_EVENT(x) \ - .base = MC_PCIE_CTRL_ADDR, \ .offset = SEC_ERROR_INT, \ .mask_offset = SEC_ERROR_INT_MASK, \ .mask = SEC_ERROR_INT_ ## x ## _INT, \ @@ -144,7 +139,6 @@ .enb_mask = 0 #define DED_EVENT(x) \ - .base = MC_PCIE_CTRL_ADDR, \ .offset = DED_ERROR_INT, \ .mask_offset = DED_ERROR_INT_MASK, \ .mask_high = 1, \ @@ -152,7 +146,6 @@ .enb_mask = 0 #define LOCAL_EVENT(x) \ - .base = MC_PCIE_BRIDGE_ADDR, \ .offset = ISTATUS_LOCAL, \ .mask_offset = IMASK_LOCAL, \ .mask_high = 0, \ @@ -179,7 +172,8 @@ struct event_map { struct mc_pcie { struct plda_pcie_rp plda; - void __iomem *axi_base_addr; + void __iomem *bridge_base_addr; + void __iomem *ctrl_base_addr; }; struct cause { @@ -253,7 +247,6 @@ static struct event_map local_status_to_event[] = { }; static struct { - u32 base; u32 offset; u32 mask; u32 shift; @@ -325,8 +318,7 @@ static inline u32 reg_to_event(u32 reg, struct event_map field) static u32 pcie_events(struct mc_pcie *port) { - void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; - u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT); + u32 reg = readl_relaxed(port->ctrl_base_addr + PCIE_EVENT_INT); u32 val = 0; int i; @@ -338,8 +330,7 @@ static u32 pcie_events(struct mc_pcie *port) static u32 sec_errors(struct mc_pcie *port) { - void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; - u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT); + u32 reg = readl_relaxed(port->ctrl_base_addr + SEC_ERROR_INT); u32 val = 0; int i; @@ -351,8 +342,7 @@ static u32 sec_errors(struct mc_pcie *port) static u32 ded_errors(struct mc_pcie *port) { - void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; - u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT); + u32 reg = readl_relaxed(port->ctrl_base_addr + DED_ERROR_INT); u32 val = 0; int i; @@ -364,8 +354,7 @@ static u32 ded_errors(struct mc_pcie *port) static u32 local_events(struct mc_pcie *port) { - void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; - u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); + u32 reg = readl_relaxed(port->bridge_base_addr + ISTATUS_LOCAL); u32 val = 0; int i; @@ -412,8 +401,12 @@ static void mc_ack_event_irq(struct irq_data *data) void __iomem *addr; u32 mask; - addr = mc_port->axi_base_addr + event_descs[event].base + - event_descs[event].offset; + if (event_descs[event].offset == ISTATUS_LOCAL) + addr = mc_port->bridge_base_addr; + else + addr = mc_port->ctrl_base_addr; + + addr += event_descs[event].offset; mask = event_descs[event].mask; mask |= event_descs[event].enb_mask; @@ -429,8 +422,12 @@ static void mc_mask_event_irq(struct irq_data *data) u32 mask; u32 val; - addr = mc_port->axi_base_addr + event_descs[event].base + - event_descs[event].mask_offset; + if (event_descs[event].offset == ISTATUS_LOCAL) + addr = mc_port->bridge_base_addr; + else + addr = mc_port->ctrl_base_addr; + + addr += event_descs[event].mask_offset; mask = event_descs[event].mask; if (event_descs[event].enb_mask) { mask <<= PCIE_EVENT_INT_ENB_SHIFT; @@ -460,8 +457,12 @@ static void mc_unmask_event_irq(struct irq_data *data) u32 mask; u32 val; - addr = mc_port->axi_base_addr + event_descs[event].base + - event_descs[event].mask_offset; + if (event_descs[event].offset == ISTATUS_LOCAL) + addr = mc_port->bridge_base_addr; + else + addr = mc_port->ctrl_base_addr; + + addr += event_descs[event].mask_offset; mask = event_descs[event].mask; if (event_descs[event].enb_mask) @@ -554,26 +555,20 @@ static const struct plda_event mc_event = { static inline void mc_clear_secs(struct mc_pcie *port) { - void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; - - writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr + - SEC_ERROR_INT); - writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT); + writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, + port->ctrl_base_addr + SEC_ERROR_INT); + writel_relaxed(0, port->ctrl_base_addr + SEC_ERROR_EVENT_CNT); } static inline void mc_clear_deds(struct mc_pcie *port) { - void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; - - writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr + - DED_ERROR_INT); - writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT); + writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, + port->ctrl_base_addr + DED_ERROR_INT); + writel_relaxed(0, port->ctrl_base_addr + DED_ERROR_EVENT_CNT); } static void mc_disable_interrupts(struct mc_pcie *port) { - void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; - void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; u32 val; /* Ensure ECC bypass is enabled */ @@ -581,22 +576,22 @@ static void mc_disable_interrupts(struct mc_pcie *port) ECC_CONTROL_RX_RAM_ECC_BYPASS | ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS | ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS; - writel_relaxed(val, ctrl_base_addr + ECC_CONTROL); + writel_relaxed(val, port->ctrl_base_addr + ECC_CONTROL); /* Disable SEC errors and clear any outstanding */ - writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr + - SEC_ERROR_INT_MASK); + writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, + port->ctrl_base_addr + SEC_ERROR_INT_MASK); mc_clear_secs(port); /* Disable DED errors and clear any outstanding */ - writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr + - DED_ERROR_INT_MASK); + writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, + port->ctrl_base_addr + DED_ERROR_INT_MASK); mc_clear_deds(port); /* Disable local interrupts and clear any outstanding */ - writel_relaxed(0, bridge_base_addr + IMASK_LOCAL); - writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL); - writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI); + writel_relaxed(0, port->bridge_base_addr + IMASK_LOCAL); + writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_LOCAL); + writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_MSI); /* Disable PCIe events and clear any outstanding */ val = PCIE_EVENT_INT_L2_EXIT_INT | @@ -605,11 +600,11 @@ static void mc_disable_interrupts(struct mc_pcie *port) PCIE_EVENT_INT_L2_EXIT_INT_MASK | PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK | PCIE_EVENT_INT_DLUP_EXIT_INT_MASK; - writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT); + writel_relaxed(val, port->ctrl_base_addr + PCIE_EVENT_INT); /* Disable host interrupts and clear any outstanding */ - writel_relaxed(0, bridge_base_addr + IMASK_HOST); - writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST); + writel_relaxed(0, port->bridge_base_addr + IMASK_HOST); + writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST); } static int mc_platform_init(struct pci_config_window *cfg) @@ -617,12 +612,10 @@ static int mc_platform_init(struct pci_config_window *cfg) struct device *dev = cfg->parent; struct platform_device *pdev = to_platform_device(dev); struct pci_host_bridge *bridge = platform_get_drvdata(pdev); - void __iomem *bridge_base_addr = - port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; int ret; /* Configure address translation table 0 for PCIe config space */ - plda_pcie_setup_window(bridge_base_addr, 0, cfg->res.start, + plda_pcie_setup_window(port->bridge_base_addr, 0, cfg->res.start, cfg->res.start, resource_size(&cfg->res)); @@ -649,7 +642,7 @@ static int mc_platform_init(struct pci_config_window *cfg) static int mc_host_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - void __iomem *bridge_base_addr; + void __iomem *apb_base_addr; struct plda_pcie_rp *plda; int ret; u32 val; @@ -661,30 +654,45 @@ static int mc_host_probe(struct platform_device *pdev) plda = &port->plda; plda->dev = dev; - port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(port->axi_base_addr)) - return PTR_ERR(port->axi_base_addr); + port->bridge_base_addr = devm_platform_ioremap_resource_byname(pdev, + "bridge"); + port->ctrl_base_addr = devm_platform_ioremap_resource_byname(pdev, + "ctrl"); + if (!IS_ERR(port->bridge_base_addr) && !IS_ERR(port->ctrl_base_addr)) + goto addrs_set; + + /* + * The original, incorrect, binding that lumped the control and + * bridge addresses together still needs to be handled by the driver. + */ + apb_base_addr = devm_platform_ioremap_resource_byname(pdev, "apb"); + if (IS_ERR(apb_base_addr)) + return dev_err_probe(dev, PTR_ERR(apb_base_addr), + "both legacy apb register and ctrl/bridge regions missing"); + + port->bridge_base_addr = apb_base_addr + MC_PCIE1_BRIDGE_ADDR; + port->ctrl_base_addr = apb_base_addr + MC_PCIE1_CTRL_ADDR; +addrs_set: mc_disable_interrupts(port); - bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; - plda->bridge_addr = bridge_base_addr; + plda->bridge_addr = port->bridge_base_addr; plda->num_events = NUM_EVENTS; /* Allow enabling MSI by disabling MSI-X */ - val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0); + val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0); val &= ~MSIX_CAP_MASK; - writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0); + writel(val, port->bridge_base_addr + PCIE_PCI_IRQ_DW0); /* Pick num vectors from bitfile programmed onto FPGA fabric */ - val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0); + val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0); val &= NUM_MSI_MSGS_MASK; val >>= NUM_MSI_MSGS_SHIFT; plda->msi.num_vectors = 1 << val; /* Pick vector address from design */ - plda->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR); + plda->msi.vector_phy = readl_relaxed(port->bridge_base_addr + IMSI_ADDR); ret = mc_pcie_init_clks(dev); if (ret) { diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c index c9933ecf6833..e73c1b7bc8ef 100644 --- a/drivers/pci/controller/plda/pcie-starfive.c +++ b/drivers/pci/controller/plda/pcie-starfive.c @@ -404,6 +404,9 @@ static int starfive_pcie_probe(struct platform_device *pdev) if (ret) return ret; + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + plda->host_ops = &sf_host_ops; plda->num_events = PLDA_MAX_EVENT_NUM; /* mask doorbell event */ @@ -413,11 +416,12 @@ static int starfive_pcie_probe(struct platform_device *pdev) plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS; ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops, &stf_pcie_event); - if (ret) + if (ret) { + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; + } - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); platform_set_drvdata(pdev, pcie); return 0; @@ -480,7 +484,7 @@ static struct platform_driver starfive_pcie_driver = { .pm = pm_sleep_ptr(&starfive_pcie_pm_ops), }, .probe = starfive_pcie_probe, - .remove_new = starfive_pcie_remove, + .remove = starfive_pcie_remove, }; module_platform_driver(starfive_pcie_driver); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 264a180403a0..9d9596947350 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -740,11 +740,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) if (!(features & VMD_FEAT_BIOS_PM_QUIRK)) return 0; - pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR); if (!pos) - return 0; + goto out_state_change; /* * Skip if the max snoop LTR is non-zero, indicating BIOS has set it @@ -752,7 +750,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) */ pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, <r_reg); if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK))) - return 0; + goto out_state_change; /* * Set the default values to the maximum required by the platform to @@ -764,6 +762,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg); pci_info(pdev, "VMD: Default LTR value set by driver\n"); +out_state_change: + /* + * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per + * PCIe r6.0, sec 5.5.4. + */ + pci_set_power_state_locked(pdev, PCI_D0); + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); return 0; } @@ -1100,6 +1105,10 @@ static const struct pci_device_id vmd_ids[] = { .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), .driver_data = VMD_FEATS_CLIENT,}, + {PCI_VDEVICE(INTEL, 0xb60b), + .driver_data = VMD_FEATS_CLIENT,}, + {PCI_VDEVICE(INTEL, 0xb06f), + .driver_data = VMD_FEATS_CLIENT,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c index b133967faef8..3b59a86a764b 100644 --- a/drivers/pci/devres.c +++ b/drivers/pci/devres.c @@ -773,7 +773,7 @@ EXPORT_SYMBOL(pcim_iomap_region); * Unmap a BAR and release its region manually. Only pass BARs that were * previously mapped by pcim_iomap_region(). */ -static void pcim_iounmap_region(struct pci_dev *pdev, int bar) +void pcim_iounmap_region(struct pci_dev *pdev, int bar) { struct pcim_addr_devres res_searched; @@ -784,6 +784,7 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar) devres_release(&pdev->dev, pcim_addr_resource_release, pcim_addr_resources_match, &res_searched); } +EXPORT_SYMBOL(pcim_iounmap_region); /** * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED) @@ -939,7 +940,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev) * desired, release individual regions with pcim_release_region() or all of * them at once with pcim_release_all_regions(). */ -static int pcim_request_all_regions(struct pci_dev *pdev, const char *name) +int pcim_request_all_regions(struct pci_dev *pdev, const char *name) { int ret; int bar; @@ -957,69 +958,17 @@ err: return ret; } +EXPORT_SYMBOL(pcim_request_all_regions); /** - * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones - * (DEPRECATED) - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to iomap - * @name: Name associated with the requests - * - * Returns: 0 on success, negative error code on failure. - * - * Request all PCI BARs and iomap regions specified by @mask. - * - * To release these resources manually, call pcim_release_region() for the - * regions and pcim_iounmap() for the mappings. - * - * This function is DEPRECATED. Don't use it in new code. Instead, use one - * of the pcim_* region request functions in combination with a pcim_* - * mapping function. - */ -int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, - const char *name) -{ - int bar; - int ret; - void __iomem **legacy_iomap_table; - - ret = pcim_request_all_regions(pdev, name); - if (ret != 0) - return ret; - - for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { - if (!mask_contains_bar(mask, bar)) - continue; - if (!pcim_iomap(pdev, bar, 0)) - goto err; - } - - return 0; - -err: - /* - * If bar is larger than 0, then pcim_iomap() above has most likely - * failed because of -EINVAL. If it is equal 0, most likely the table - * couldn't be created, indicating -ENOMEM. - */ - ret = bar > 0 ? -EINVAL : -ENOMEM; - legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev); - - while (--bar >= 0) - pcim_iounmap(pdev, legacy_iomap_table[bar]); - - pcim_release_all_regions(pdev); - - return ret; -} -EXPORT_SYMBOL(pcim_iomap_regions_request_all); - -/** - * pcim_iounmap_regions - Unmap and release PCI BARs + * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED) * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to unmap and release * * Unmap and release regions specified by @mask. + * + * This function is DEPRECATED. Do not use it in new code. + * Use pcim_iounmap_region() instead. */ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) { diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index 652d63df9d22..7bd7892c5222 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -146,6 +146,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; + unsigned long timeout_jiffies; size_t length, remainder; u32 val; int i; @@ -155,8 +156,19 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, * someone other than Linux (e.g. firmware) is using the mailbox. Note * it is expected that firmware and OS will negotiate access rights via * an, as yet to be defined, method. + * + * Wait up to one PCI_DOE_TIMEOUT period to allow the prior command to + * finish. Otherwise, simply error out as unable to field the request. + * + * PCIe r6.2 sec 6.30.3 states no interrupt is raised when the DOE Busy + * bit is cleared, so polling here is our best option for the moment. */ - pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; + do { + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + } while (FIELD_GET(PCI_DOE_STATUS_BUSY, val) && + !time_after(jiffies, timeout_jiffies)); + if (FIELD_GET(PCI_DOE_STATUS_BUSY, val)) return -EBUSY; diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 1c40d2506aef..260b7de2dbd5 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -55,7 +55,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev, bus_range_max = resource_size(cfgres) >> bus_shift; if (bus_range > bus_range_max) { bus_range = bus_range_max; - cfg->busr.end = busr->start + bus_range - 1; + resource_set_size(&cfg->busr, bus_range); dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n", cfgres, &cfg->busr, busr); } diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c index 7d070b1def11..54286a40bdfb 100644 --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -867,12 +867,18 @@ static int pci_epf_mhi_bind(struct pci_epf *epf) { struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf); struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; struct platform_device *pdev = to_platform_device(epc->dev.parent); struct resource *res; int ret; /* Get MMIO base address from Endpoint controller */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); + if (!res) { + dev_err(dev, "Failed to get \"mmio\" resource\n"); + return -ENODEV; + } + epf_mhi->mmio_phys = res->start; epf_mhi->mmio_size = resource_size(res); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 7c2ed6eae53a..ef6677f34116 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -291,8 +291,6 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) dma_release_channel(epf_test->dma_chan_rx); epf_test->dma_chan_rx = NULL; - - return; } static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, @@ -317,91 +315,92 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, static void pci_epf_test_copy(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg) { - int ret; - void __iomem *src_addr; - void __iomem *dst_addr; - phys_addr_t src_phys_addr; - phys_addr_t dst_phys_addr; + int ret = 0; struct timespec64 start, end; struct pci_epf *epf = epf_test->epf; - struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct pci_epc_map src_map, dst_map; + u64 src_addr = reg->src_addr; + u64 dst_addr = reg->dst_addr; + size_t copy_size = reg->size; + ssize_t map_size = 0; + void *copy_buf = NULL, *buf; - src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); - if (!src_addr) { - dev_err(dev, "Failed to allocate source address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; - ret = -ENOMEM; - goto err; - } - - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, - reg->src_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map source address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; - goto err_src_addr; - } - - dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); - if (!dst_addr) { - dev_err(dev, "Failed to allocate destination address\n"); - reg->status = STATUS_DST_ADDR_INVALID; - ret = -ENOMEM; - goto err_src_map_addr; - } - - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, - reg->dst_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map destination address\n"); - reg->status = STATUS_DST_ADDR_INVALID; - goto err_dst_addr; - } - - ktime_get_ts64(&start); if (reg->flags & FLAG_USE_DMA) { if (epf_test->dma_private) { dev_err(dev, "Cannot transfer data using DMA\n"); ret = -EINVAL; - goto err_map_addr; + goto set_status; } - - ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, - src_phys_addr, reg->size, 0, - DMA_MEM_TO_MEM); - if (ret) - dev_err(dev, "Data transfer failed\n"); } else { - void *buf; - - buf = kzalloc(reg->size, GFP_KERNEL); - if (!buf) { + copy_buf = kzalloc(copy_size, GFP_KERNEL); + if (!copy_buf) { ret = -ENOMEM; - goto err_map_addr; + goto set_status; } - - memcpy_fromio(buf, src_addr, reg->size); - memcpy_toio(dst_addr, buf, reg->size); - kfree(buf); + buf = copy_buf; } - ktime_get_ts64(&end); - pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end, - reg->flags & FLAG_USE_DMA); -err_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); + while (copy_size) { + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, + src_addr, copy_size, &src_map); + if (ret) { + dev_err(dev, "Failed to map source address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto free_buf; + } + + ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, + dst_addr, copy_size, &dst_map); + if (ret) { + dev_err(dev, "Failed to map destination address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, + &src_map); + goto free_buf; + } -err_dst_addr: - pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); + map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size); -err_src_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); + ktime_get_ts64(&start); + if (reg->flags & FLAG_USE_DMA) { + ret = pci_epf_test_data_transfer(epf_test, + dst_map.phys_addr, src_map.phys_addr, + map_size, 0, DMA_MEM_TO_MEM); + if (ret) { + dev_err(dev, "Data transfer failed\n"); + goto unmap; + } + } else { + memcpy_fromio(buf, src_map.virt_addr, map_size); + memcpy_toio(dst_map.virt_addr, buf, map_size); + buf += map_size; + } + ktime_get_ts64(&end); -err_src_addr: - pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); + copy_size -= map_size; + src_addr += map_size; + dst_addr += map_size; -err: + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); + map_size = 0; + } + + pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, + &end, reg->flags & FLAG_USE_DMA); + +unmap: + if (map_size) { + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); + } + +free_buf: + kfree(copy_buf); + +set_status: if (!ret) reg->status |= STATUS_COPY_SUCCESS; else @@ -411,82 +410,89 @@ err: static void pci_epf_test_read(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg) { - int ret; - void __iomem *src_addr; - void *buf; + int ret = 0; + void *src_buf, *buf; u32 crc32; - phys_addr_t phys_addr; + struct pci_epc_map map; phys_addr_t dst_phys_addr; struct timespec64 start, end; struct pci_epf *epf = epf_test->epf; - struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; struct device *dma_dev = epf->epc->dev.parent; + u64 src_addr = reg->src_addr; + size_t src_size = reg->size; + ssize_t map_size = 0; - src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); - if (!src_addr) { - dev_err(dev, "Failed to allocate address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; + src_buf = kzalloc(src_size, GFP_KERNEL); + if (!src_buf) { ret = -ENOMEM; - goto err; + goto set_status; } + buf = src_buf; - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, - reg->src_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; - goto err_addr; - } - - buf = kzalloc(reg->size, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto err_map_addr; - } + while (src_size) { + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, + src_addr, src_size, &map); + if (ret) { + dev_err(dev, "Failed to map address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto free_buf; + } - if (reg->flags & FLAG_USE_DMA) { - dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, - DMA_FROM_DEVICE); - if (dma_mapping_error(dma_dev, dst_phys_addr)) { - dev_err(dev, "Failed to map destination buffer addr\n"); - ret = -ENOMEM; - goto err_dma_map; + map_size = map.pci_size; + if (reg->flags & FLAG_USE_DMA) { + dst_phys_addr = dma_map_single(dma_dev, buf, map_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dst_phys_addr)) { + dev_err(dev, + "Failed to map destination buffer addr\n"); + ret = -ENOMEM; + goto unmap; + } + + ktime_get_ts64(&start); + ret = pci_epf_test_data_transfer(epf_test, + dst_phys_addr, map.phys_addr, + map_size, src_addr, DMA_DEV_TO_MEM); + if (ret) + dev_err(dev, "Data transfer failed\n"); + ktime_get_ts64(&end); + + dma_unmap_single(dma_dev, dst_phys_addr, map_size, + DMA_FROM_DEVICE); + + if (ret) + goto unmap; + } else { + ktime_get_ts64(&start); + memcpy_fromio(buf, map.virt_addr, map_size); + ktime_get_ts64(&end); } - ktime_get_ts64(&start); - ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, - phys_addr, reg->size, - reg->src_addr, DMA_DEV_TO_MEM); - if (ret) - dev_err(dev, "Data transfer failed\n"); - ktime_get_ts64(&end); + src_size -= map_size; + src_addr += map_size; + buf += map_size; - dma_unmap_single(dma_dev, dst_phys_addr, reg->size, - DMA_FROM_DEVICE); - } else { - ktime_get_ts64(&start); - memcpy_fromio(buf, src_addr, reg->size); - ktime_get_ts64(&end); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); + map_size = 0; } - pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end, - reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, + &end, reg->flags & FLAG_USE_DMA); - crc32 = crc32_le(~0, buf, reg->size); + crc32 = crc32_le(~0, src_buf, reg->size); if (crc32 != reg->checksum) ret = -EIO; -err_dma_map: - kfree(buf); - -err_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); +unmap: + if (map_size) + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); -err_addr: - pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); +free_buf: + kfree(src_buf); -err: +set_status: if (!ret) reg->status |= STATUS_READ_SUCCESS; else @@ -496,71 +502,79 @@ err: static void pci_epf_test_write(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg) { - int ret; - void __iomem *dst_addr; - void *buf; - phys_addr_t phys_addr; + int ret = 0; + void *dst_buf, *buf; + struct pci_epc_map map; phys_addr_t src_phys_addr; struct timespec64 start, end; struct pci_epf *epf = epf_test->epf; - struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; struct device *dma_dev = epf->epc->dev.parent; + u64 dst_addr = reg->dst_addr; + size_t dst_size = reg->size; + ssize_t map_size = 0; - dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); - if (!dst_addr) { - dev_err(dev, "Failed to allocate address\n"); - reg->status = STATUS_DST_ADDR_INVALID; + dst_buf = kzalloc(dst_size, GFP_KERNEL); + if (!dst_buf) { ret = -ENOMEM; - goto err; + goto set_status; } + get_random_bytes(dst_buf, dst_size); + reg->checksum = crc32_le(~0, dst_buf, dst_size); + buf = dst_buf; - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, - reg->dst_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map address\n"); - reg->status = STATUS_DST_ADDR_INVALID; - goto err_addr; - } - - buf = kzalloc(reg->size, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto err_map_addr; - } - - get_random_bytes(buf, reg->size); - reg->checksum = crc32_le(~0, buf, reg->size); - - if (reg->flags & FLAG_USE_DMA) { - src_phys_addr = dma_map_single(dma_dev, buf, reg->size, - DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, src_phys_addr)) { - dev_err(dev, "Failed to map source buffer addr\n"); - ret = -ENOMEM; - goto err_dma_map; + while (dst_size) { + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, + dst_addr, dst_size, &map); + if (ret) { + dev_err(dev, "Failed to map address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + goto free_buf; } - ktime_get_ts64(&start); + map_size = map.pci_size; + if (reg->flags & FLAG_USE_DMA) { + src_phys_addr = dma_map_single(dma_dev, buf, map_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, src_phys_addr)) { + dev_err(dev, + "Failed to map source buffer addr\n"); + ret = -ENOMEM; + goto unmap; + } + + ktime_get_ts64(&start); + + ret = pci_epf_test_data_transfer(epf_test, + map.phys_addr, src_phys_addr, + map_size, dst_addr, + DMA_MEM_TO_DEV); + if (ret) + dev_err(dev, "Data transfer failed\n"); + ktime_get_ts64(&end); + + dma_unmap_single(dma_dev, src_phys_addr, map_size, + DMA_TO_DEVICE); + + if (ret) + goto unmap; + } else { + ktime_get_ts64(&start); + memcpy_toio(map.virt_addr, buf, map_size); + ktime_get_ts64(&end); + } - ret = pci_epf_test_data_transfer(epf_test, phys_addr, - src_phys_addr, reg->size, - reg->dst_addr, - DMA_MEM_TO_DEV); - if (ret) - dev_err(dev, "Data transfer failed\n"); - ktime_get_ts64(&end); + dst_size -= map_size; + dst_addr += map_size; + buf += map_size; - dma_unmap_single(dma_dev, src_phys_addr, reg->size, - DMA_TO_DEVICE); - } else { - ktime_get_ts64(&start); - memcpy_toio(dst_addr, buf, reg->size); - ktime_get_ts64(&end); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); + map_size = 0; } - pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end, - reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, + &end, reg->flags & FLAG_USE_DMA); /* * wait 1ms inorder for the write to complete. Without this delay L3 @@ -568,16 +582,14 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test, */ usleep_range(1000, 2000); -err_dma_map: - kfree(buf); - -err_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); +unmap: + if (map_size) + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); -err_addr: - pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); +free_buf: + kfree(dst_buf); -err: +set_status: if (!ret) reg->status |= STATUS_WRITE_SUCCESS; else @@ -786,7 +798,7 @@ static void pci_epf_test_epc_deinit(struct pci_epf *epf) { struct pci_epf_test *epf_test = epf_get_drvdata(epf); - cancel_delayed_work(&epf_test->cmd_handler); + cancel_delayed_work_sync(&epf_test->cmd_handler); pci_epf_test_clean_dma_chan(epf_test); pci_epf_test_clear_bar(epf); } @@ -917,7 +929,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf) struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct pci_epc *epc = epf->epc; - cancel_delayed_work(&epf_test->cmd_handler); + cancel_delayed_work_sync(&epf_test->cmd_handler); if (epc->init_complete) { pci_epf_test_clean_dma_chan(epf_test); pci_epf_test_clear_bar(epf); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 17f007109255..bed7c7d1fe3c 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -128,6 +128,18 @@ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features } EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); +static bool pci_epc_function_is_valid(struct pci_epc *epc, + u8 func_no, u8 vfunc_no) +{ + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) + return false; + + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + return false; + + return true; +} + /** * pci_epc_get_features() - get the features supported by EPC * @epc: the features supported by *this* EPC device will be returned @@ -145,10 +157,7 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, { const struct pci_epc_features *epc_features; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return NULL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return NULL; if (!epc->ops->get_features) @@ -218,10 +227,7 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; if (!epc->ops->raise_irq) @@ -262,10 +268,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc)) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; if (!epc->ops->map_msi_irq) @@ -293,10 +296,7 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { int interrupt; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return 0; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return 0; if (!epc->ops->get_msi) @@ -329,11 +329,10 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) int ret; u8 encode_int; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - interrupts < 1 || interrupts > 32) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (interrupts < 1 || interrupts > 32) return -EINVAL; if (!epc->ops->set_msi) @@ -361,10 +360,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { int interrupt; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return 0; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return 0; if (!epc->ops->get_msix) @@ -397,11 +393,10 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - interrupts < 1 || interrupts > 2048) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (interrupts < 1 || interrupts > 2048) return -EINVAL; if (!epc->ops->set_msix) @@ -428,10 +423,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix); void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr) { - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return; if (!epc->ops->unmap_addr) @@ -459,10 +451,7 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; if (!epc->ops->map_addr) @@ -478,6 +467,109 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, EXPORT_SYMBOL_GPL(pci_epc_map_addr); /** + * pci_epc_mem_map() - allocate and map a PCI address to a CPU address + * @epc: the EPC device on which the CPU address is to be allocated and mapped + * @func_no: the physical endpoint function number in the EPC device + * @vfunc_no: the virtual endpoint function number in the physical function + * @pci_addr: PCI address to which the CPU address should be mapped + * @pci_size: the number of bytes to map starting from @pci_addr + * @map: where to return the mapping information + * + * Allocate a controller memory address region and map it to a RC PCI address + * region, taking into account the controller physical address mapping + * constraints using the controller operation align_addr(). If this operation is + * not defined, we assume that there are no alignment constraints for the + * mapping. + * + * The effective size of the PCI address range mapped from @pci_addr is + * indicated by @map->pci_size. This size may be less than the requested + * @pci_size. The local virtual CPU address for the mapping is indicated by + * @map->virt_addr (@map->phys_addr indicates the physical address). + * The size and CPU address of the controller memory allocated and mapped are + * respectively indicated by @map->map_size and @map->virt_base (and + * @map->phys_base for the physical address of @map->virt_base). + * + * Returns 0 on success and a negative error code in case of error. + */ +int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u64 pci_addr, size_t pci_size, struct pci_epc_map *map) +{ + size_t map_size = pci_size; + size_t map_offset = 0; + int ret; + + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) + return -EINVAL; + + if (!pci_size || !map) + return -EINVAL; + + /* + * Align the PCI address to map. If the controller defines the + * .align_addr() operation, use it to determine the PCI address to map + * and the size of the mapping. Otherwise, assume that the controller + * has no alignment constraint. + */ + memset(map, 0, sizeof(*map)); + map->pci_addr = pci_addr; + if (epc->ops->align_addr) + map->map_pci_addr = + epc->ops->align_addr(epc, pci_addr, + &map_size, &map_offset); + else + map->map_pci_addr = pci_addr; + map->map_size = map_size; + if (map->map_pci_addr + map->map_size < pci_addr + pci_size) + map->pci_size = map->map_pci_addr + map->map_size - pci_addr; + else + map->pci_size = pci_size; + + map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base, + map->map_size); + if (!map->virt_base) + return -ENOMEM; + + map->phys_addr = map->phys_base + map_offset; + map->virt_addr = map->virt_base + map_offset; + + ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base, + map->map_pci_addr, map->map_size); + if (ret) { + pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, + map->map_size); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pci_epc_mem_map); + +/** + * pci_epc_mem_unmap() - unmap and free a CPU address region + * @epc: the EPC device on which the CPU address is allocated and mapped + * @func_no: the physical endpoint function number in the EPC device + * @vfunc_no: the virtual endpoint function number in the physical function + * @map: the mapping information + * + * Unmap and free a CPU address region that was allocated and mapped with + * pci_epc_mem_map(). + */ +void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epc_map *map) +{ + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) + return; + + if (!map || !map->virt_base) + return; + + pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base); + pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, + map->map_size); +} +EXPORT_SYMBOL_GPL(pci_epc_mem_unmap); + +/** * pci_epc_clear_bar() - reset the BAR * @epc: the EPC device for which the BAR has to be cleared * @func_no: the physical endpoint function number in the EPC device @@ -489,12 +581,11 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr); void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - (epf_bar->barno == BAR_5 && - epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (epf_bar->barno == BAR_5 && + epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) return; if (!epc->ops->clear_bar) @@ -521,18 +612,16 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, int ret; int flags = epf_bar->flags; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - (epf_bar->barno == BAR_5 && - flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) + return -EINVAL; + + if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || (flags & PCI_BASE_ADDRESS_SPACE_IO && flags & PCI_BASE_ADDRESS_IO_MASK) || (upper_32_bits(epf_bar->size) && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) return -EINVAL; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) - return -EINVAL; - if (!epc->ops->set_bar) return 0; @@ -561,10 +650,7 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; /* Only Virtual Function #1 has deviceID */ @@ -660,18 +746,18 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, if (IS_ERR_OR_NULL(epc) || !epf) return; + mutex_lock(&epc->list_lock); if (type == PRIMARY_INTERFACE) { func_no = epf->func_no; list = &epf->list; + epf->epc = NULL; } else { func_no = epf->sec_epc_func_no; list = &epf->sec_epc_list; + epf->sec_epc = NULL; } - - mutex_lock(&epc->list_lock); clear_bit(func_no, &epc->function_num_map); list_del(list); - epf->epc = NULL; mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); @@ -837,11 +923,10 @@ EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify); void pci_epc_destroy(struct pci_epc *epc) { pci_ep_cfs_remove_epc_group(epc->group); - device_unregister(&epc->dev); - #ifdef CONFIG_PCI_DOMAINS_GENERIC - pci_bus_release_domain_nr(&epc->dev, epc->domain_nr); + pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr); #endif + device_unregister(&epc->dev); } EXPORT_SYMBOL_GPL(pci_epc_destroy); diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index a9c028f58da1..218a60e945db 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_exit); void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size) { - void __iomem *virt_addr = NULL; + void __iomem *virt_addr; struct pci_epc_mem *mem; unsigned int page_shift; size_t align_size; @@ -188,10 +188,13 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, for (i = 0; i < epc->num_windows; i++) { mem = epc->windows[i]; - mutex_lock(&mem->lock); + if (size > mem->window.size) + continue; + align_size = ALIGN(size, mem->window.page_size); order = pci_epc_mem_get_order(mem, align_size); + mutex_lock(&mem->lock); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno >= 0) { @@ -211,7 +214,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, mutex_unlock(&mem->lock); } - return virt_addr; + return NULL; } EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 1472aef0fb81..123c4c7c2ab5 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -118,6 +118,16 @@ config HOTPLUG_PCI_CPCI_GENERIC When in doubt, say N. +config HOTPLUG_PCI_OCTEONEP + bool "Marvell OCTEON PCI Hotplug driver" + depends on HOTPLUG_PCI + help + Say Y here if you have an OCTEON PCIe device with a hotplug + controller. This driver enables the non-controller functions of the + device to be registered as hotplug slots. + + When in doubt, say N. + config HOTPLUG_PCI_SHPC bool "SHPC PCI Hotplug driver" help diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 240c99517d5e..40aaf31fe338 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o +obj-$(CONFIG_HOTPLUG_PCI_OCTEONEP) += octep_hp.o # acpiphp_ibm extends acpiphp, so should be linked afterwards. diff --git a/drivers/pci/hotplug/acpiphp_ampere_altra.c b/drivers/pci/hotplug/acpiphp_ampere_altra.c index f5c9e741c1d4..70dbc0431fc6 100644 --- a/drivers/pci/hotplug/acpiphp_ampere_altra.c +++ b/drivers/pci/hotplug/acpiphp_ampere_altra.c @@ -119,7 +119,7 @@ static struct platform_driver altra_led_driver = { .acpi_match_table = altra_led_ids, }, .probe = altra_led_probe, - .remove_new = altra_led_remove, + .remove = altra_led_remove, }; module_platform_driver(altra_led_driver); diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h index 6d8970d8c3f2..03fa39ab0c88 100644 --- a/drivers/pci/hotplug/cpci_hotplug.h +++ b/drivers/pci/hotplug/cpci_hotplug.h @@ -44,7 +44,6 @@ struct cpci_hp_controller_ops { int (*enable_irq)(void); int (*disable_irq)(void); int (*check_irq)(void *dev_id); - int (*hardware_test)(struct slot *slot, u32 value); u8 (*get_power)(struct slot *slot); int (*set_power)(struct slot *slot, int value); }; diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 718bc6cf12cb..ef7534a3ca40 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -12,8 +12,11 @@ * */ +#define pr_fmt(fmt) "cpqphp: " fmt + #include <linux/module.h> #include <linux/kernel.h> +#include <linux/printk.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> @@ -132,18 +135,6 @@ int cpqhp_unconfigure_device(struct pci_func *func) return 0; } -static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value) -{ - u32 vendID = 0; - - if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1) - return -1; - if (PCI_POSSIBLE_ERROR(vendID)) - return -1; - return pci_bus_read_config_dword(bus, devfn, offset, value); -} - - /* * cpqhp_set_irq * @@ -202,13 +193,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_ { u16 tdevice; u32 work; - u8 tbus; + int ret = -1; ctrl->pci_bus->number = bus_num; for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ - if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) + if (!pci_bus_read_dev_vendor_id(ctrl->pci_bus, tdevice, &work, 0)) + continue; + ret = pci_bus_read_config_dword(ctrl->pci_bus, tdevice, PCI_CLASS_REVISION, &work); + if (ret) continue; dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. Not a bridge ? */ @@ -216,23 +210,20 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_ *dev_num = tdevice; dbg("found it !\n"); return 0; - } - } - for (tdevice = 0; tdevice < 0xFF; tdevice++) { - /* Scan for access first */ - if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) - continue; - dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); - /* Yep we got one. bridge ? */ - if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { - pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); - /* XXX: no recursion, wtf? */ - dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); - return 0; + } else { + /* + * XXX: Code whose debug printout indicated + * recursion to buses underneath bridges might be + * necessary was removed because it never did + * any recursion. + */ + ret = 0; + pr_warn("missing feature: bridge scan recursion not implemented\n"); } } - return -1; + + return ret; } diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index fed1360ee9b1..6143ebf71f21 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c @@ -123,7 +123,6 @@ static int spew_debug_info(struct controller *ctrl, char *data, int size) struct ctrl_dbg { int size; char *data; - struct controller *ctrl; }; #define MAX_OUTPUT (4*PAGE_SIZE) diff --git a/drivers/pci/hotplug/octep_hp.c b/drivers/pci/hotplug/octep_hp.c new file mode 100644 index 000000000000..2bce7296c050 --- /dev/null +++ b/drivers/pci/hotplug/octep_hp.c @@ -0,0 +1,427 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Marvell. */ + +#include <linux/cleanup.h> +#include <linux/container_of.h> +#include <linux/delay.h> +#include <linux/dev_printk.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pci_hotplug.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#define OCTEP_HP_INTR_OFFSET(x) (0x20400 + ((x) << 4)) +#define OCTEP_HP_INTR_VECTOR(x) (16 + (x)) +#define OCTEP_HP_DRV_NAME "octep_hp" + +/* + * Type of MSI-X interrupts. OCTEP_HP_INTR_VECTOR() and + * OCTEP_HP_INTR_OFFSET() generate the vector and offset for an interrupt + * type. + */ +enum octep_hp_intr_type { + OCTEP_HP_INTR_INVALID = -1, + OCTEP_HP_INTR_ENA = 0, + OCTEP_HP_INTR_DIS = 1, + OCTEP_HP_INTR_MAX = 2, +}; + +struct octep_hp_cmd { + struct list_head list; + enum octep_hp_intr_type intr_type; + u64 intr_val; +}; + +struct octep_hp_slot { + struct list_head list; + struct hotplug_slot slot; + u16 slot_number; + struct pci_dev *hp_pdev; + unsigned int hp_devfn; + struct octep_hp_controller *ctrl; +}; + +struct octep_hp_intr_info { + enum octep_hp_intr_type type; + int number; + char name[16]; +}; + +struct octep_hp_controller { + void __iomem *base; + struct pci_dev *pdev; + struct octep_hp_intr_info intr[OCTEP_HP_INTR_MAX]; + struct work_struct work; + struct list_head slot_list; + struct mutex slot_lock; /* Protects slot_list */ + struct list_head hp_cmd_list; + spinlock_t hp_cmd_lock; /* Protects hp_cmd_list */ +}; + +static void octep_hp_enable_pdev(struct octep_hp_controller *hp_ctrl, + struct octep_hp_slot *hp_slot) +{ + guard(mutex)(&hp_ctrl->slot_lock); + if (hp_slot->hp_pdev) { + pci_dbg(hp_slot->hp_pdev, "Slot %s is already enabled\n", + hotplug_slot_name(&hp_slot->slot)); + return; + } + + /* Scan the device and add it to the bus */ + hp_slot->hp_pdev = pci_scan_single_device(hp_ctrl->pdev->bus, + hp_slot->hp_devfn); + pci_bus_assign_resources(hp_ctrl->pdev->bus); + pci_bus_add_device(hp_slot->hp_pdev); + + dev_dbg(&hp_slot->hp_pdev->dev, "Enabled slot %s\n", + hotplug_slot_name(&hp_slot->slot)); +} + +static void octep_hp_disable_pdev(struct octep_hp_controller *hp_ctrl, + struct octep_hp_slot *hp_slot) +{ + guard(mutex)(&hp_ctrl->slot_lock); + if (!hp_slot->hp_pdev) { + pci_dbg(hp_ctrl->pdev, "Slot %s is already disabled\n", + hotplug_slot_name(&hp_slot->slot)); + return; + } + + pci_dbg(hp_slot->hp_pdev, "Disabling slot %s\n", + hotplug_slot_name(&hp_slot->slot)); + + /* Remove the device from the bus */ + pci_stop_and_remove_bus_device_locked(hp_slot->hp_pdev); + hp_slot->hp_pdev = NULL; +} + +static int octep_hp_enable_slot(struct hotplug_slot *slot) +{ + struct octep_hp_slot *hp_slot = + container_of(slot, struct octep_hp_slot, slot); + + octep_hp_enable_pdev(hp_slot->ctrl, hp_slot); + return 0; +} + +static int octep_hp_disable_slot(struct hotplug_slot *slot) +{ + struct octep_hp_slot *hp_slot = + container_of(slot, struct octep_hp_slot, slot); + + octep_hp_disable_pdev(hp_slot->ctrl, hp_slot); + return 0; +} + +static struct hotplug_slot_ops octep_hp_slot_ops = { + .enable_slot = octep_hp_enable_slot, + .disable_slot = octep_hp_disable_slot, +}; + +#define SLOT_NAME_SIZE 16 +static struct octep_hp_slot * +octep_hp_register_slot(struct octep_hp_controller *hp_ctrl, + struct pci_dev *pdev, u16 slot_number) +{ + char slot_name[SLOT_NAME_SIZE]; + struct octep_hp_slot *hp_slot; + int ret; + + hp_slot = kzalloc(sizeof(*hp_slot), GFP_KERNEL); + if (!hp_slot) + return ERR_PTR(-ENOMEM); + + hp_slot->ctrl = hp_ctrl; + hp_slot->hp_pdev = pdev; + hp_slot->hp_devfn = pdev->devfn; + hp_slot->slot_number = slot_number; + hp_slot->slot.ops = &octep_hp_slot_ops; + + snprintf(slot_name, sizeof(slot_name), "octep_hp_%u", slot_number); + ret = pci_hp_register(&hp_slot->slot, hp_ctrl->pdev->bus, + PCI_SLOT(pdev->devfn), slot_name); + if (ret) { + kfree(hp_slot); + return ERR_PTR(ret); + } + + pci_info(pdev, "Registered slot %s for device %s\n", + slot_name, pci_name(pdev)); + + list_add_tail(&hp_slot->list, &hp_ctrl->slot_list); + octep_hp_disable_pdev(hp_ctrl, hp_slot); + + return hp_slot; +} + +static void octep_hp_deregister_slot(void *data) +{ + struct octep_hp_slot *hp_slot = data; + struct octep_hp_controller *hp_ctrl = hp_slot->ctrl; + + pci_hp_deregister(&hp_slot->slot); + octep_hp_enable_pdev(hp_ctrl, hp_slot); + list_del(&hp_slot->list); + kfree(hp_slot); +} + +static const char *octep_hp_cmd_name(enum octep_hp_intr_type type) +{ + switch (type) { + case OCTEP_HP_INTR_ENA: + return "hotplug enable"; + case OCTEP_HP_INTR_DIS: + return "hotplug disable"; + default: + return "invalid"; + } +} + +static void octep_hp_cmd_handler(struct octep_hp_controller *hp_ctrl, + struct octep_hp_cmd *hp_cmd) +{ + struct octep_hp_slot *hp_slot; + + /* + * Enable or disable the slots based on the slot mask. + * intr_val is a bit mask where each bit represents a slot. + */ + list_for_each_entry(hp_slot, &hp_ctrl->slot_list, list) { + if (!(hp_cmd->intr_val & BIT(hp_slot->slot_number))) + continue; + + pci_info(hp_ctrl->pdev, "Received %s command for slot %s\n", + octep_hp_cmd_name(hp_cmd->intr_type), + hotplug_slot_name(&hp_slot->slot)); + + switch (hp_cmd->intr_type) { + case OCTEP_HP_INTR_ENA: + octep_hp_enable_pdev(hp_ctrl, hp_slot); + break; + case OCTEP_HP_INTR_DIS: + octep_hp_disable_pdev(hp_ctrl, hp_slot); + break; + default: + break; + } + } +} + +static void octep_hp_work_handler(struct work_struct *work) +{ + struct octep_hp_controller *hp_ctrl; + struct octep_hp_cmd *hp_cmd; + unsigned long flags; + + hp_ctrl = container_of(work, struct octep_hp_controller, work); + + /* Process all the hotplug commands */ + spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags); + while (!list_empty(&hp_ctrl->hp_cmd_list)) { + hp_cmd = list_first_entry(&hp_ctrl->hp_cmd_list, + struct octep_hp_cmd, list); + list_del(&hp_cmd->list); + spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags); + + octep_hp_cmd_handler(hp_ctrl, hp_cmd); + kfree(hp_cmd); + + spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags); + } + spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags); +} + +static enum octep_hp_intr_type octep_hp_intr_type(struct octep_hp_intr_info *intr, + int irq) +{ + enum octep_hp_intr_type type; + + for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) { + if (intr[type].number == irq) + return type; + } + + return OCTEP_HP_INTR_INVALID; +} + +static irqreturn_t octep_hp_intr_handler(int irq, void *data) +{ + struct octep_hp_controller *hp_ctrl = data; + struct pci_dev *pdev = hp_ctrl->pdev; + enum octep_hp_intr_type type; + struct octep_hp_cmd *hp_cmd; + u64 intr_val; + + type = octep_hp_intr_type(hp_ctrl->intr, irq); + if (type == OCTEP_HP_INTR_INVALID) { + pci_err(pdev, "Invalid interrupt %d\n", irq); + return IRQ_HANDLED; + } + + /* Read and clear the interrupt */ + intr_val = readq(hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type)); + writeq(intr_val, hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type)); + + hp_cmd = kzalloc(sizeof(*hp_cmd), GFP_ATOMIC); + if (!hp_cmd) + return IRQ_HANDLED; + + hp_cmd->intr_val = intr_val; + hp_cmd->intr_type = type; + + /* Add the command to the list and schedule the work */ + spin_lock(&hp_ctrl->hp_cmd_lock); + list_add_tail(&hp_cmd->list, &hp_ctrl->hp_cmd_list); + spin_unlock(&hp_ctrl->hp_cmd_lock); + schedule_work(&hp_ctrl->work); + + return IRQ_HANDLED; +} + +static void octep_hp_irq_cleanup(void *data) +{ + struct octep_hp_controller *hp_ctrl = data; + + pci_free_irq_vectors(hp_ctrl->pdev); + flush_work(&hp_ctrl->work); +} + +static int octep_hp_request_irq(struct octep_hp_controller *hp_ctrl, + enum octep_hp_intr_type type) +{ + struct pci_dev *pdev = hp_ctrl->pdev; + struct octep_hp_intr_info *intr; + int irq; + + irq = pci_irq_vector(pdev, OCTEP_HP_INTR_VECTOR(type)); + if (irq < 0) + return irq; + + intr = &hp_ctrl->intr[type]; + intr->number = irq; + intr->type = type; + snprintf(intr->name, sizeof(intr->name), "octep_hp_%d", type); + + return devm_request_irq(&pdev->dev, irq, octep_hp_intr_handler, + IRQF_SHARED, intr->name, hp_ctrl); +} + +static int octep_hp_controller_setup(struct pci_dev *pdev, + struct octep_hp_controller *hp_ctrl) +{ + struct device *dev = &pdev->dev; + enum octep_hp_intr_type type; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable PCI device\n"); + + hp_ctrl->base = pcim_iomap_region(pdev, 0, OCTEP_HP_DRV_NAME); + if (IS_ERR(hp_ctrl->base)) + return dev_err_probe(dev, PTR_ERR(hp_ctrl->base), + "Failed to map PCI device region\n"); + + pci_set_master(pdev); + pci_set_drvdata(pdev, hp_ctrl); + + INIT_LIST_HEAD(&hp_ctrl->slot_list); + INIT_LIST_HEAD(&hp_ctrl->hp_cmd_list); + mutex_init(&hp_ctrl->slot_lock); + spin_lock_init(&hp_ctrl->hp_cmd_lock); + INIT_WORK(&hp_ctrl->work, octep_hp_work_handler); + hp_ctrl->pdev = pdev; + + ret = pci_alloc_irq_vectors(pdev, 1, + OCTEP_HP_INTR_VECTOR(OCTEP_HP_INTR_MAX), + PCI_IRQ_MSIX); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to alloc MSI-X vectors\n"); + + ret = devm_add_action(&pdev->dev, octep_hp_irq_cleanup, hp_ctrl); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to add IRQ cleanup action\n"); + + for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) { + ret = octep_hp_request_irq(hp_ctrl, type); + if (ret) + return dev_err_probe(dev, ret, + "Failed to request IRQ for vector %d\n", + OCTEP_HP_INTR_VECTOR(type)); + } + + return 0; +} + +static int octep_hp_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct octep_hp_controller *hp_ctrl; + struct pci_dev *tmp_pdev, *next; + struct octep_hp_slot *hp_slot; + u16 slot_number = 0; + int ret; + + hp_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hp_ctrl), GFP_KERNEL); + if (!hp_ctrl) + return -ENOMEM; + + ret = octep_hp_controller_setup(pdev, hp_ctrl); + if (ret) + return ret; + + /* + * Register all hotplug slots. Hotplug controller is the first function + * of the PCI device. The hotplug slots are the remaining functions of + * the PCI device. The hotplug slot functions are logically removed from + * the bus during probing and are re-enabled by the driver when a + * hotplug event is received. + */ + list_for_each_entry_safe(tmp_pdev, next, &pdev->bus->devices, bus_list) { + if (tmp_pdev == pdev) + continue; + + hp_slot = octep_hp_register_slot(hp_ctrl, tmp_pdev, slot_number); + if (IS_ERR(hp_slot)) + return dev_err_probe(&pdev->dev, PTR_ERR(hp_slot), + "Failed to register hotplug slot %u\n", + slot_number); + + ret = devm_add_action(&pdev->dev, octep_hp_deregister_slot, + hp_slot); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to add action for deregistering slot %u\n", + slot_number); + slot_number++; + } + + return 0; +} + +#define PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR 0xa0e3 +static struct pci_device_id octep_hp_pci_map[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR) }, + { }, +}; + +static struct pci_driver octep_hp = { + .name = OCTEP_HP_DRV_NAME, + .id_table = octep_hp_pci_map, + .probe = octep_hp_pci_probe, +}; + +module_pci_driver(octep_hp); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Marvell"); +MODULE_DESCRIPTION("Marvell OCTEON PCI Hotplug driver"); diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 058d5937d8a9..36236ac88fd5 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -388,8 +388,8 @@ static struct hotplug_slot *get_slot_from_name(const char *name) /** * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem - * @bus: bus this slot is on * @slot: pointer to the &struct hotplug_slot to register + * @bus: bus this slot is on * @devnr: device number * @name: name registered with kobject core * @owner: caller module owner @@ -498,8 +498,6 @@ EXPORT_SYMBOL_GPL(pci_hp_add); * * The @slot must have been registered with the pci hotplug subsystem * previously with a call to pci_hp_register(). - * - * Returns 0 if successful, anything else for an error. */ void pci_hp_deregister(struct hotplug_slot *slot) { @@ -513,8 +511,6 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister); * @slot: pointer to the &struct hotplug_slot to unpublish * * Remove a hotplug slot's sysfs interface. - * - * Returns 0 on success or a negative int on error. */ void pci_hp_del(struct hotplug_slot *slot) { @@ -545,8 +541,6 @@ EXPORT_SYMBOL_GPL(pci_hp_del); * the driver may no longer invoke hotplug_slot_name() to get the slot's * unique name. The driver no longer needs to handle a ->reset_slot callback * from this point on. - * - * Returns 0 on success or a negative int on error. */ void pci_hp_destroy(struct hotplug_slot *slot) { diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index dcdbfcf404dd..d603a7aa7483 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -19,6 +19,8 @@ #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> + +#include "../pci.h" #include "pciehp.h" /* The following routines constitute the bulk of the @@ -127,6 +129,9 @@ static void remove_board(struct controller *ctrl, bool safe_removal) pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, INDICATOR_NOOP); + + /* Don't carry LBMS indications across */ + pcie_reset_lbms_count(ctrl->pcie->port); } static int pciehp_enable_slot(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 736ad8baa2a5..bb5a8d9f03ad 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -319,7 +319,7 @@ int pciehp_check_link_status(struct controller *ctrl) return -1; } - pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); + __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); if (!found) { ctrl_info(ctrl, "Slot(%s): No device found\n", diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index aaa33e8dc4c9..4be402fe9ab9 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -327,8 +327,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) virtfn->resource[i].name = pci_name(virtfn); virtfn->resource[i].flags = res->flags; size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); - virtfn->resource[i].start = res->start + size * id; - virtfn->resource[i].end = virtfn->resource[i].start + size - 1; + resource_set_range(&virtfn->resource[i], + res->start + size * id, size); rc = request_resource(res, &virtfn->resource[i]); BUG_ON(rc); } @@ -804,7 +804,7 @@ found: goto failed; } iov->barsz[i] = resource_size(res); - res->end = res->start + resource_size(res) * total - 1; + resource_set_size(res, resource_size(res) * total); pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n", res_name, res, i, total); i += bar64; diff --git a/drivers/pci/of.c b/drivers/pci/of.c index dacea3fc5128..52f770bcc481 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -728,6 +728,33 @@ out_free_name: } #endif +/** + * of_pci_supply_present() - Check if the power supply is present for the PCI + * device + * @np: Device tree node + * + * Check if the power supply for the PCI device is present in the device tree + * node or not. + * + * Return: true if at least one power supply exists; false otherwise. + */ +bool of_pci_supply_present(struct device_node *np) +{ + struct property *prop; + char *supply; + + if (!np) + return false; + + for_each_property_of_node(np, prop) { + supply = strrchr(prop->name, '-'); + if (supply && !strcmp(supply, "-supply")) + return true; + } + + return false; +} + #endif /* CONFIG_PCI */ /** diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c index 5a0b98e69795..886c236e5de6 100644 --- a/drivers/pci/of_property.c +++ b/drivers/pci/of_property.c @@ -126,7 +126,7 @@ static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs, if (of_pci_get_addr_flags(&res[j], &flags)) continue; - val64 = res[j].start; + val64 = pci_bus_address(pdev, &res[j] - pdev->resource); of_pci_set_address(pdev, rp[i].parent_addr, val64, 0, flags, false); if (pci_is_bridge(pdev)) { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d0f4db1cab7..3e5a117f5b5d 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -521,6 +521,31 @@ static ssize_t bus_rescan_store(struct device *dev, static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, bus_rescan_store); +static ssize_t reset_subordinate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->subordinate; + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val) { + int ret = __pci_reset_bus(bus); + + if (ret) + return ret; + } + + return count; +} +static DEVICE_ATTR_WO(reset_subordinate); + #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, struct device_attribute *attr, @@ -625,6 +650,7 @@ static struct attribute *pci_dev_attrs[] = { static struct attribute *pci_bridge_attrs[] = { &dev_attr_subordinate_bus_number.attr, &dev_attr_secondary_bus_number.attr, + &dev_attr_reset_subordinate.attr, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 63690375d36b..0b29ec6e8e5e 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1832,6 +1832,7 @@ int pci_save_state(struct pci_dev *dev) pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); + pci_save_tph_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); @@ -1937,6 +1938,7 @@ void pci_restore_state(struct pci_dev *dev) pci_restore_rebar_state(dev); pci_restore_dpc_state(dev); pci_restore_ptm_state(dev); + pci_restore_tph_state(dev); pci_aer_clear_status(dev); pci_restore_aer_state(dev); @@ -4744,7 +4746,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) * to track link speed or width changes made by hardware itself * in attempt to correct unreliable link operation. */ - pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); + pcie_reset_lbms_count(pdev); return rc; } @@ -5162,6 +5164,8 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) */ if (err_handler && err_handler->reset_prepare) err_handler->reset_prepare(dev); + else if (dev->driver) + pci_warn(dev, "resetting"); /* * Wake-up device prior to save. PM registers default to D0 after @@ -5195,6 +5199,8 @@ static void pci_dev_restore(struct pci_dev *dev) */ if (err_handler && err_handler->reset_done) err_handler->reset_done(dev); + else if (dev->driver) + pci_warn(dev, "reset done"); } /* dev->reset_methods[] is a 0-terminated list of indices into this array */ @@ -5248,7 +5254,7 @@ static ssize_t reset_method_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - char *options, *name; + char *options, *tmp_options, *name; int m, n; u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 }; @@ -5268,7 +5274,8 @@ static ssize_t reset_method_store(struct device *dev, return -ENOMEM; n = 0; - while ((name = strsep(&options, " ")) != NULL) { + tmp_options = options; + while ((name = strsep(&tmp_options, " ")) != NULL) { if (sysfs_streq(name, "")) continue; @@ -5884,7 +5891,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus); * * Same as above except return -EAGAIN if the bus cannot be locked */ -static int __pci_reset_bus(struct pci_bus *bus) +int __pci_reset_bus(struct pci_bus *bus) { int rc; @@ -6193,38 +6200,64 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, EXPORT_SYMBOL(pcie_bandwidth_available); /** - * pcie_get_speed_cap - query for the PCI device's link speed capability + * pcie_get_supported_speeds - query Supported Link Speed Vector * @dev: PCI device to query * - * Query the PCI device speed capability. Return the maximum link speed - * supported by the device. + * Query @dev supported link speeds. + * + * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining + * supported link speeds using the Supported Link Speeds Vector in the Link + * Capabilities 2 Register (when available). + * + * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. + * + * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link + * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s + * speeds were defined. + * + * For @dev without Supported Link Speed Vector, the field is synthesized + * from the Max Link Speed field in the Link Capabilities Register. + * + * Return: Supported Link Speeds Vector (+ reserved 0 at LSB). */ -enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +u8 pcie_get_supported_speeds(struct pci_dev *dev) { u32 lnkcap2, lnkcap; + u8 speeds; /* - * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The - * implementation note there recommends using the Supported Link - * Speeds Vector in Link Capabilities 2 when supported. - * - * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software - * should use the Supported Link Speeds field in Link Capabilities, - * where only 2.5 GT/s and 5.0 GT/s speeds were defined. + * Speeds retain the reserved 0 at LSB before PCIe Supported Link + * Speeds Vector to allow using SLS Vector bit defines directly. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS; /* PCIe r3.0-compliant */ - if (lnkcap2) - return PCIE_LNKCAP2_SLS2SPEED(lnkcap2); + if (speeds) + return speeds; pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + + /* Synthesize from the Max Link Speed field */ if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) - return PCIE_SPEED_5_0GT; + speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB; else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) - return PCIE_SPEED_2_5GT; + speeds = PCI_EXP_LNKCAP2_SLS_2_5GB; - return PCI_SPEED_UNKNOWN; + return speeds; +} + +/** + * pcie_get_speed_cap - query for the PCI device's link speed capability + * @dev: PCI device to query + * + * Query the PCI device speed capability. + * + * Return: the maximum link speed supported by the device. + */ +enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +{ + return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds); } EXPORT_SYMBOL(pcie_get_speed_cap); @@ -6653,8 +6686,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, } else { r->flags &= ~IORESOURCE_SIZEALIGN; r->flags |= IORESOURCE_STARTALIGN; - r->start = align; - r->end = r->start + size - 1; + resource_set_range(r, align, size); } r->flags |= IORESOURCE_UNSET; } @@ -6900,6 +6932,8 @@ static int __init pci_setup(char *str) pci_no_domains(); } else if (!strncmp(str, "noari", 5)) { pcie_ari_disabled = true; + } else if (!strncmp(str, "notph", 5)) { + pci_no_tph(); } else if (!strncmp(str, "cbiosize=", 9)) { pci_cardbus_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "cbmemsize=", 10)) { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 14d00ce45bfa..2e40fc63ba31 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -104,6 +104,7 @@ bool pci_reset_supported(struct pci_dev *dev); void pci_init_reset_methods(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); +int __pci_reset_bus(struct pci_bus *bus); struct pci_cap_saved_data { u16 cap_nr; @@ -323,6 +324,9 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); +void pci_walk_bus_locked(struct pci_bus *top, + int (*cb)(struct pci_dev *, void *), + void *userdata); const char *pci_resource_name(struct pci_dev *dev, unsigned int i); @@ -331,6 +335,17 @@ void pci_disable_bridge_window(struct pci_dev *dev); struct pci_bus *pci_bus_get(struct pci_bus *bus); void pci_bus_put(struct pci_bus *bus); +#define PCIE_LNKCAP_SLS2SPEED(lnkcap) \ +({ \ + ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN); \ +}) + /* PCIe link information from Link Capabilities 2 */ #define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \ ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ @@ -341,6 +356,15 @@ void pci_bus_put(struct pci_bus *bus); (lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN) +#define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \ + ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN) + /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \ @@ -373,12 +397,16 @@ static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed) return -EINVAL; } +u8 pcie_get_supported_speeds(struct pci_dev *dev); const char *pci_speed_string(enum pci_bus_speed speed); -enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); -enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); void __pcie_print_link_status(struct pci_dev *dev, bool verbose); void pcie_report_downtraining(struct pci_dev *dev); -void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); + +static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta) +{ + bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; +} +void pcie_update_link_speed(struct pci_bus *bus); /* Single Root I/O Virtualization */ struct pci_sriov { @@ -469,10 +497,18 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 #define PCI_DPC_RECOVERING 2 +#define PCI_DEV_REMOVED 3 -static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) +static inline void pci_dev_assign_added(struct pci_dev *dev) { - assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added); + smp_mb__before_atomic(); + set_bit(PCI_DEV_ADDED, &dev->priv_flags); + smp_mb__after_atomic(); +} + +static inline bool pci_dev_test_and_clear_added(struct pci_dev *dev) +{ + return test_and_clear_bit(PCI_DEV_ADDED, &dev->priv_flags); } static inline bool pci_dev_is_added(const struct pci_dev *dev) @@ -480,6 +516,11 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev) return test_bit(PCI_DEV_ADDED, &dev->priv_flags); } +static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev) +{ + return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags); +} + #ifdef CONFIG_PCIEAER #include <linux/aer.h> @@ -597,6 +638,18 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) #endif /* CONFIG_PCI_IOV */ +#ifdef CONFIG_PCIE_TPH +void pci_restore_tph_state(struct pci_dev *dev); +void pci_save_tph_state(struct pci_dev *dev); +void pci_no_tph(void); +void pci_tph_init(struct pci_dev *dev); +#else +static inline void pci_restore_tph_state(struct pci_dev *dev) { } +static inline void pci_save_tph_state(struct pci_dev *dev) { } +static inline void pci_no_tph(void) { } +static inline void pci_tph_init(struct pci_dev *dev) { } +#endif + #ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); void pci_save_ptm_state(struct pci_dev *dev); @@ -692,6 +745,17 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif +#ifdef CONFIG_PCIEPORTBUS +void pcie_reset_lbms_count(struct pci_dev *port); +int pcie_lbms_count(struct pci_dev *port, unsigned long *val); +#else +static inline void pcie_reset_lbms_count(struct pci_dev *port) {} +static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val) +{ + return -EOPNOTSUPP; +} +#endif + struct pci_dev_reset_methods { u16 vendor; u16 device; @@ -746,6 +810,7 @@ void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge); +bool of_pci_supply_present(struct device_node *np); #else static inline int @@ -793,6 +858,10 @@ static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_br return 0; } +static inline bool of_pci_supply_present(struct device_node *np) +{ + return false; +} #endif /* CONFIG_OF */ struct of_changeset; diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 6461aa93fe76..53ccab62314d 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -4,7 +4,7 @@ pcieportdrv-y := portdrv.o rcec.o -obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o +obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o obj-y += aspm.o obj-$(CONFIG_PCIEAER) += aer.o err.o diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 13b8586924ea..80c5ba8d8296 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -180,7 +180,8 @@ static int disable_ecrc_checking(struct pci_dev *dev) } /** - * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy + * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based + * on global policy * @dev: the PCI device */ void pcie_set_ecrc_checking(struct pci_dev *dev) @@ -1148,14 +1149,16 @@ static void aer_recover_work_func(struct work_struct *work) continue; } pci_print_aer(pdev, entry.severity, entry.regs); + /* - * Memory for aer_capability_regs(entry.regs) is being allocated from the - * ghes_estatus_pool to protect it from overwriting when multiple sections - * are present in the error status. Thus free the same after processing - * the data. + * Memory for aer_capability_regs(entry.regs) is being + * allocated from the ghes_estatus_pool to protect it from + * overwriting when multiple sections are present in the + * error status. Thus free the same after processing the + * data. */ ghes_estatus_pool_region_free((unsigned long)entry.regs, - sizeof(struct aer_capability_regs)); + sizeof(struct aer_capability_regs)); if (entry.severity == AER_NONFATAL) pcie_do_recovery(pdev, pci_channel_io_normal, diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index cee2365e54b8..28567d457613 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -805,6 +805,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl); pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl); + /* Disable L0s/L1 before updating L1SS config */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { + pcie_capability_write_word(child, PCI_EXP_LNKCTL, + child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, + parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); + } + /* * Setup L0s state * @@ -829,6 +838,13 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) aspm_l1ss_init(link); + /* Restore L0s/L1 if they were enabled */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl); + pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl); + } + /* Save default state */ link->aspm_default = link->aspm_enabled; @@ -845,25 +861,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) } } -/* Configure the ASPM L1 substates */ +/* Configure the ASPM L1 substates. Caller must disable L1 first. */ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) { - u32 val, enable_req; + u32 val; struct pci_dev *child = link->downstream, *parent = link->pdev; - enable_req = (link->aspm_enabled ^ state) & state; + val = 0; + if (state & PCIE_LINK_STATE_L1_1) + val |= PCI_L1SS_CTL1_ASPM_L1_1; + if (state & PCIE_LINK_STATE_L1_2) + val |= PCI_L1SS_CTL1_ASPM_L1_2; + if (state & PCIE_LINK_STATE_L1_1_PCIPM) + val |= PCI_L1SS_CTL1_PCIPM_L1_1; + if (state & PCIE_LINK_STATE_L1_2_PCIPM) + val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* - * Here are the rules specified in the PCIe spec for enabling L1SS: - * - When enabling L1.x, enable bit at parent first, then at child - * - When disabling L1.x, disable bit at child first, then at parent - * - When enabling ASPM L1.x, need to disable L1 - * (at child followed by parent). - * - The ASPM/PCIPM L1.2 must be disabled while programming timing + * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates: + * - Clear L1.x enable bits at child first, then at parent + * - Set L1.x enable bits at parent first, then at child + * - ASPM/PCIPM L1.2 must be disabled while programming timing * parameters - * - * To keep it simple, disable all L1SS bits first, and later enable - * what is needed. */ /* Disable all L1 substates */ @@ -871,26 +890,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) PCI_L1SS_CTL1_L1SS_MASK, 0); pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, 0); - /* - * If needed, disable L1, and it gets enabled later - * in pcie_config_aspm_link(). - */ - if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) { - pcie_capability_clear_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1); - pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1); - } - - val = 0; - if (state & PCIE_LINK_STATE_L1_1) - val |= PCI_L1SS_CTL1_ASPM_L1_1; - if (state & PCIE_LINK_STATE_L1_2) - val |= PCI_L1SS_CTL1_ASPM_L1_2; - if (state & PCIE_LINK_STATE_L1_1_PCIPM) - val |= PCI_L1SS_CTL1_PCIPM_L1_1; - if (state & PCIE_LINK_STATE_L1_2_PCIPM) - val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, @@ -937,21 +936,30 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) dwstream |= PCI_EXP_LNKCTL_ASPM_L1; } + /* + * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable + * bits for ASPM L1 PM Substates must be done while ASPM L1 is + * disabled. Disable L1 here and apply new configuration after L1SS + * configuration has been completed. + * + * Per sec 7.5.3.7, when disabling ASPM L1, software must disable + * it in the Downstream component prior to disabling it in the + * Upstream component, and ASPM L1 must be enabled in the Upstream + * component prior to enabling it in the Downstream component. + * + * Sec 7.5.3.7 also recommends programming the same ASPM Control + * value for all functions of a multi-function device. + */ + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_config_aspm_dev(child, 0); + pcie_config_aspm_dev(parent, 0); + if (link->aspm_capable & PCIE_LINK_STATE_L1SS) pcie_config_aspm_l1ss(link, state); - /* - * Spec 2.0 suggests all functions should be configured the - * same setting for ASPM. Enabling ASPM L1 should be done in - * upstream component first and then downstream, and vice - * versa for disabling ASPM L1. Spec doesn't mention L0S. - */ - if (state & PCIE_LINK_STATE_L1) - pcie_config_aspm_dev(parent, upstream); + pcie_config_aspm_dev(parent, upstream); list_for_each_entry(child, &linkbus->devices, bus_list) pcie_config_aspm_dev(child, dwstream); - if (!(state & PCIE_LINK_STATE_L1)) - pcie_config_aspm_dev(parent, upstream); link->aspm_enabled = state; @@ -1442,6 +1450,9 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked) * touch the LNKCTL register. Also note that this does not enable states * disabled by pci_disable_link_state(). Return 0 or a negative errno. * + * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per + * PCIe r6.0, sec 5.5.4. + * * @pdev: PCI device * @state: Mask of ASPM link states to enable */ @@ -1458,6 +1469,9 @@ EXPORT_SYMBOL(pci_enable_link_state); * can't touch the LNKCTL register. Also note that this does not enable states * disabled by pci_disable_link_state(). Return 0 or a negative errno. * + * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per + * PCIe r6.0, sec 5.5.4. + * * @pdev: PCI device * @state: Mask of ASPM link states to enable * diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c new file mode 100644 index 000000000000..b59cacc740fa --- /dev/null +++ b/drivers/pci/pcie/bwctrl.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe bandwidth controller + * + * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com> + * + * Copyright (C) 2019 Dell Inc + * Copyright (C) 2023-2024 Intel Corporation + * + * The PCIe bandwidth controller provides a way to alter PCIe Link Speeds + * and notify the operating system when the Link Width or Speed changes. The + * notification capability is required for all Root Ports and Downstream + * Ports supporting Link Width wider than x1 and/or multiple Link Speeds. + * + * This service port driver hooks into the Bandwidth Notification interrupt + * watching for changes or links becoming degraded in operation. It updates + * the cached Current Link Speed that is exposed to user space through sysfs. + */ + +#define dev_fmt(fmt) "bwctrl: " fmt + +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pci-bwctrl.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "../pci.h" +#include "portdrv.h" + +/** + * struct pcie_bwctrl_data - PCIe bandwidth controller + * @set_speed_mutex: Serializes link speed changes + * @lbms_count: Count for LBMS (since last reset) + * @cdev: Thermal cooling device associated with the port + */ +struct pcie_bwctrl_data { + struct mutex set_speed_mutex; + atomic_t lbms_count; + struct thermal_cooling_device *cdev; +}; + +/* + * Prevent port removal during LBMS count accessors and Link Speed changes. + * + * These have to be differentiated because pcie_bwctrl_change_speed() calls + * pcie_retrain_link() which uses LBMS count reset accessor on success + * (using just one rwsem triggers "possible recursive locking detected" + * warning). + */ +static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem); +static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem); + +static bool pcie_valid_speed(enum pci_bus_speed speed) +{ + return (speed >= PCIE_SPEED_2_5GT) && (speed <= PCIE_SPEED_64_0GT); +} + +static u16 pci_bus_speed2lnkctl2(enum pci_bus_speed speed) +{ + static const u8 speed_conv[] = { + [PCIE_SPEED_2_5GT] = PCI_EXP_LNKCTL2_TLS_2_5GT, + [PCIE_SPEED_5_0GT] = PCI_EXP_LNKCTL2_TLS_5_0GT, + [PCIE_SPEED_8_0GT] = PCI_EXP_LNKCTL2_TLS_8_0GT, + [PCIE_SPEED_16_0GT] = PCI_EXP_LNKCTL2_TLS_16_0GT, + [PCIE_SPEED_32_0GT] = PCI_EXP_LNKCTL2_TLS_32_0GT, + [PCIE_SPEED_64_0GT] = PCI_EXP_LNKCTL2_TLS_64_0GT, + }; + + if (WARN_ON_ONCE(!pcie_valid_speed(speed))) + return 0; + + return speed_conv[speed]; +} + +static inline u16 pcie_supported_speeds2target_speed(u8 supported_speeds) +{ + return __fls(supported_speeds); +} + +/** + * pcie_bwctrl_select_speed - Select Target Link Speed + * @port: PCIe Port + * @speed_req: Requested PCIe Link Speed + * + * Select Target Link Speed by take into account Supported Link Speeds of + * both the Root Port and the Endpoint. + * + * Return: Target Link Speed (1=2.5GT/s, 2=5GT/s, 3=8GT/s, etc.) + */ +static u16 pcie_bwctrl_select_speed(struct pci_dev *port, enum pci_bus_speed speed_req) +{ + struct pci_bus *bus = port->subordinate; + u8 desired_speeds, supported_speeds; + struct pci_dev *dev; + + desired_speeds = GENMASK(pci_bus_speed2lnkctl2(speed_req), + __fls(PCI_EXP_LNKCAP2_SLS_2_5GB)); + + supported_speeds = port->supported_speeds; + if (bus) { + down_read(&pci_bus_sem); + dev = list_first_entry_or_null(&bus->devices, struct pci_dev, bus_list); + if (dev) + supported_speeds &= dev->supported_speeds; + up_read(&pci_bus_sem); + } + if (!supported_speeds) + return PCI_EXP_LNKCAP2_SLS_2_5GB; + + return pcie_supported_speeds2target_speed(supported_speeds & desired_speeds); +} + +static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool use_lt) +{ + int ret; + + ret = pcie_capability_clear_and_set_word(port, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, target_speed); + if (ret != PCIBIOS_SUCCESSFUL) + return pcibios_err_to_errno(ret); + + ret = pcie_retrain_link(port, use_lt); + if (ret < 0) + return ret; + + /* + * Ensure link speed updates also with platforms that have problems + * with notifications. + */ + if (port->subordinate) + pcie_update_link_speed(port->subordinate); + + return 0; +} + +/** + * pcie_set_target_speed - Set downstream Link Speed for PCIe Port + * @port: PCIe Port + * @speed_req: Requested PCIe Link Speed + * @use_lt: Wait for the LT or DLLLA bit to detect the end of link training + * + * Attempt to set PCIe Port Link Speed to @speed_req. @speed_req may be + * adjusted downwards to the best speed supported by both the Port and PCIe + * Device underneath it. + * + * Return: + * * 0 - on success + * * -EINVAL - @speed_req is not a PCIe Link Speed + * * -ENODEV - @port is not controllable + * * -ETIMEDOUT - changing Link Speed took too long + * * -EAGAIN - Link Speed was changed but @speed_req was not achieved + */ +int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, + bool use_lt) +{ + struct pci_bus *bus = port->subordinate; + u16 target_speed; + int ret; + + if (WARN_ON_ONCE(!pcie_valid_speed(speed_req))) + return -EINVAL; + + if (bus && bus->cur_bus_speed == speed_req) + return 0; + + target_speed = pcie_bwctrl_select_speed(port, speed_req); + + scoped_guard(rwsem_read, &pcie_bwctrl_setspeed_rwsem) { + struct pcie_bwctrl_data *data = port->link_bwctrl; + + /* + * port->link_bwctrl is NULL during initial scan when called + * e.g. from the Target Speed quirk. + */ + if (data) + mutex_lock(&data->set_speed_mutex); + + ret = pcie_bwctrl_change_speed(port, target_speed, use_lt); + + if (data) + mutex_unlock(&data->set_speed_mutex); + } + + /* + * Despite setting higher speed into the Target Link Speed, empty + * bus won't train to 5GT+ speeds. + */ + if (!ret && bus && bus->cur_bus_speed != speed_req && + !list_empty(&bus->devices)) + ret = -EAGAIN; + + return ret; +} + +static void pcie_bwnotif_enable(struct pcie_device *srv) +{ + struct pcie_bwctrl_data *data = srv->port->link_bwctrl; + struct pci_dev *port = srv->port; + u16 link_status; + int ret; + + /* Count LBMS seen so far as one */ + ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); + if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS) + atomic_inc(&data->lbms_count); + + pcie_capability_set_word(port, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); + pcie_capability_write_word(port, PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS); + + /* + * Update after enabling notifications & clearing status bits ensures + * link speed is up to date. + */ + pcie_update_link_speed(port->subordinate); +} + +static void pcie_bwnotif_disable(struct pci_dev *port) +{ + pcie_capability_clear_word(port, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); +} + +static irqreturn_t pcie_bwnotif_irq(int irq, void *context) +{ + struct pcie_device *srv = context; + struct pcie_bwctrl_data *data = srv->port->link_bwctrl; + struct pci_dev *port = srv->port; + u16 link_status, events; + int ret; + + ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); + if (ret != PCIBIOS_SUCCESSFUL) + return IRQ_NONE; + + events = link_status & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS); + if (!events) + return IRQ_NONE; + + if (events & PCI_EXP_LNKSTA_LBMS) + atomic_inc(&data->lbms_count); + + pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); + + /* + * Interrupts will not be triggered from any further Link Speed + * change until LBMS is cleared by the write. Therefore, re-read the + * speed (inside pcie_update_link_speed()) after LBMS has been + * cleared to avoid missing link speed changes. + */ + pcie_update_link_speed(port->subordinate); + + return IRQ_HANDLED; +} + +void pcie_reset_lbms_count(struct pci_dev *port) +{ + struct pcie_bwctrl_data *data; + + guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem); + data = port->link_bwctrl; + if (data) + atomic_set(&data->lbms_count, 0); + else + pcie_capability_write_word(port, PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_LBMS); +} + +int pcie_lbms_count(struct pci_dev *port, unsigned long *val) +{ + struct pcie_bwctrl_data *data; + + guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem); + data = port->link_bwctrl; + if (!data) + return -ENOTTY; + + *val = atomic_read(&data->lbms_count); + + return 0; +} + +static int pcie_bwnotif_probe(struct pcie_device *srv) +{ + struct pci_dev *port = srv->port; + int ret; + + struct pcie_bwctrl_data *data = devm_kzalloc(&srv->device, + sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = devm_mutex_init(&srv->device, &data->set_speed_mutex); + if (ret) + return ret; + + ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq, + IRQF_SHARED, "PCIe bwctrl", srv); + if (ret) + return ret; + + scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) { + scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) { + port->link_bwctrl = no_free_ptr(data); + pcie_bwnotif_enable(srv); + } + } + + pci_dbg(port, "enabled with IRQ %d\n", srv->irq); + + /* Don't fail on errors. Don't leave IS_ERR() "pointer" into ->cdev */ + port->link_bwctrl->cdev = pcie_cooling_device_register(port); + if (IS_ERR(port->link_bwctrl->cdev)) + port->link_bwctrl->cdev = NULL; + + return 0; +} + +static void pcie_bwnotif_remove(struct pcie_device *srv) +{ + struct pcie_bwctrl_data *data = srv->port->link_bwctrl; + + pcie_cooling_device_unregister(data->cdev); + + pcie_bwnotif_disable(srv->port); + + scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) + scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) + srv->port->link_bwctrl = NULL; +} + +static int pcie_bwnotif_suspend(struct pcie_device *srv) +{ + pcie_bwnotif_disable(srv->port); + return 0; +} + +static int pcie_bwnotif_resume(struct pcie_device *srv) +{ + pcie_bwnotif_enable(srv); + return 0; +} + +static struct pcie_port_service_driver pcie_bwctrl_driver = { + .name = "pcie_bwctrl", + .port_type = PCIE_ANY_PORT, + .service = PCIE_PORT_SERVICE_BWCTRL, + .probe = pcie_bwnotif_probe, + .suspend = pcie_bwnotif_suspend, + .resume = pcie_bwnotif_resume, + .remove = pcie_bwnotif_remove, +}; + +int __init pcie_bwctrl_init(void) +{ + return pcie_port_service_register(&pcie_bwctrl_driver); +} diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 6af5e0425872..5e10306b6308 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -68,7 +68,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask, */ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | - PCIE_PORT_SERVICE_BWNOTIF)) { + PCIE_PORT_SERVICE_BWCTRL)) { pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); *pme = FIELD_GET(PCI_EXP_FLAGS_IRQ, reg16); nvec = *pme + 1; @@ -150,11 +150,11 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) /* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | - PCIE_PORT_SERVICE_BWNOTIF)) { + PCIE_PORT_SERVICE_BWCTRL)) { pcie_irq = pci_irq_vector(dev, pme); irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq; irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq; - irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq; + irqs[PCIE_PORT_SERVICE_BWCTRL_SHIFT] = pcie_irq; } if (mask & PCIE_PORT_SERVICE_AER) @@ -271,7 +271,7 @@ static int get_port_device_capability(struct pci_dev *dev) pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap); if (linkcap & PCI_EXP_LNKCAP_LBNC) - services |= PCIE_PORT_SERVICE_BWNOTIF; + services |= PCIE_PORT_SERVICE_BWCTRL; } return services; @@ -828,6 +828,7 @@ static void __init pcie_init_services(void) pcie_aer_init(); pcie_pme_init(); pcie_dpc_init(); + pcie_bwctrl_init(); pcie_hp_init(); } diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 12c89ea0313b..bd29d1cc7b8b 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -20,8 +20,8 @@ #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) #define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */ #define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) -#define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */ -#define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT) +#define PCIE_PORT_SERVICE_BWCTRL_SHIFT 4 /* Bandwidth Controller (notifications) */ +#define PCIE_PORT_SERVICE_BWCTRL (1 << PCIE_PORT_SERVICE_BWCTRL_SHIFT) #define PCIE_PORT_DEVICE_MAXSERVICES 5 @@ -51,6 +51,8 @@ int pcie_dpc_init(void); static inline int pcie_dpc_init(void) { return 0; } #endif +int pcie_bwctrl_init(void); + /* Port Type */ #define PCIE_ANY_PORT (~0) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f1615805f5b0..2e81ab0f5a25 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -543,15 +543,15 @@ void pci_read_bridge_bases(struct pci_bus *child) pci_read_bridge_mmio(child->self, child->resource[1], false); pci_read_bridge_mmio_pref(child->self, child->resource[2], false); - if (dev->transparent) { - pci_bus_for_each_resource(child->parent, res) { - if (res && res->flags) { - pci_bus_add_resource(child, res, - PCI_SUBTRACTIVE_DECODE); - pci_info(dev, " bridge window %pR (subtractive decode)\n", - res); - } - } + if (!dev->transparent) + return; + + pci_bus_for_each_resource(child->parent, res) { + if (!res || !res->flags) + continue; + + pci_bus_add_resource(child, res); + pci_info(dev, " bridge window %pR (subtractive decode)\n", res); } } @@ -742,9 +742,13 @@ const char *pci_speed_string(enum pci_bus_speed speed) } EXPORT_SYMBOL_GPL(pci_speed_string); -void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) +void pcie_update_link_speed(struct pci_bus *bus) { - bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; + struct pci_dev *bridge = bus->self; + u16 linksta; + + pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); + __pcie_update_link_speed(bus, linksta); } EXPORT_SYMBOL_GPL(pcie_update_link_speed); @@ -827,13 +831,11 @@ static void pci_set_bus_speed(struct pci_bus *bus) if (pci_is_pcie(bridge)) { u32 linkcap; - u16 linksta; pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; - pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); - pcie_update_link_speed(bus, linksta); + pcie_update_link_speed(bus); } } @@ -1032,7 +1034,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) if (res->flags & IORESOURCE_BUS) pci_bus_insert_busn_res(bus, bus->number, res->end); else - pci_bus_add_resource(bus, res, 0); + pci_bus_add_resource(bus, res); if (offset) { if (resource_type(res) == IORESOURCE_IO) @@ -1633,23 +1635,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev) static void set_pcie_untrusted(struct pci_dev *dev) { - struct pci_dev *parent; + struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * If the upstream bridge is untrusted we treat this device + * If the upstream bridge is untrusted we treat this device as * untrusted as well. */ - parent = pci_upstream_bridge(dev); - if (parent && (parent->untrusted || parent->external_facing)) + if (parent->untrusted) { + dev->untrusted = true; + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as untrusted\n"); dev->untrusted = true; + } } static void pci_set_removable(struct pci_dev *dev) { struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * We (only) consider everything downstream from an external_facing + * We (only) consider everything tunneled below an external_facing * device to be removable by the user. We're mainly concerned with * consumer platforms with user accessible thunderbolt ports that are * vulnerable to DMA attacks, and we expect those ports to be marked by @@ -1659,9 +1671,15 @@ static void pci_set_removable(struct pci_dev *dev) * accessible to user / may not be removed by end user, and thus not * exposed as "removable" to userspace. */ - if (parent && - (parent->external_facing || dev_is_removable(&parent->dev))) + if (dev_is_removable(&parent->dev)) { + dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as removable\n"); dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + } } /** @@ -1947,6 +1965,9 @@ int pci_setup_device(struct pci_dev *dev) set_pcie_untrusted(dev); + if (pci_is_pcie(dev)) + dev->supported_speeds = pcie_get_supported_speeds(dev); + /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; @@ -2495,6 +2516,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_dpc_init(dev); /* Downstream Port Containment */ pci_rcec_init(dev); /* Root Complex Event Collector */ pci_doe_init(dev); /* Data Object Exchange */ + pci_tph_init(dev); /* TLP Processing Hints */ pcie_report_downtraining(dev); pci_init_reset_methods(dev); @@ -3108,6 +3130,17 @@ int pci_host_probe(struct pci_host_bridge *bridge) pci_lock_rescan_remove(); pci_bus_add_devices(bus); pci_unlock_rescan_remove(); + + /* + * Ensure pm_runtime_enable() is called for the controller drivers + * before calling pci_host_probe(). The PM framework expects that + * if the parent device supports runtime PM, it will be enabled + * before child runtime PM is enabled. + */ + pm_runtime_set_active(&bridge->dev); + pm_runtime_no_callbacks(&bridge->dev); + devm_pm_runtime_enable(&bridge->dev); + return 0; } EXPORT_SYMBOL_GPL(pci_host_probe); diff --git a/drivers/pci/pwrctl/Makefile b/drivers/pci/pwrctl/Makefile deleted file mode 100644 index d308aae4800c..000000000000 --- a/drivers/pci/pwrctl/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctl-core.o -pci-pwrctl-core-y := core.o - -obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctl-pwrseq.o diff --git a/drivers/pci/pwrctl/core.c b/drivers/pci/pwrctl/core.c deleted file mode 100644 index 01d913b60316..000000000000 --- a/drivers/pci/pwrctl/core.c +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2024 Linaro Ltd. - */ - -#include <linux/device.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/pci-pwrctl.h> -#include <linux/property.h> -#include <linux/slab.h> - -static int pci_pwrctl_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct pci_pwrctl *pwrctl = container_of(nb, struct pci_pwrctl, nb); - struct device *dev = data; - - if (dev_fwnode(dev) != dev_fwnode(pwrctl->dev)) - return NOTIFY_DONE; - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - /* - * We will have two struct device objects bound to two different - * drivers on different buses but consuming the same DT node. We - * must not bind the pins twice in this case but only once for - * the first device to be added. - * - * If we got here then the PCI device is the second after the - * power control platform device. Mark its OF node as reused. - */ - dev->of_node_reused = true; - break; - case BUS_NOTIFY_BOUND_DRIVER: - pwrctl->link = device_link_add(dev, pwrctl->dev, - DL_FLAG_AUTOREMOVE_CONSUMER); - if (!pwrctl->link) - dev_err(pwrctl->dev, "Failed to add device link\n"); - break; - case BUS_NOTIFY_UNBOUND_DRIVER: - if (pwrctl->link) - device_link_remove(dev, pwrctl->dev); - break; - } - - return NOTIFY_DONE; -} - -static void rescan_work_func(struct work_struct *work) -{ - struct pci_pwrctl *pwrctl = container_of(work, struct pci_pwrctl, work); - - pci_lock_rescan_remove(); - pci_rescan_bus(to_pci_dev(pwrctl->dev->parent)->bus); - pci_unlock_rescan_remove(); -} - -/** - * pci_pwrctl_init() - Initialize the PCI power control context struct - * - * @pwrctl: PCI power control data - * @dev: Parent device - */ -void pci_pwrctl_init(struct pci_pwrctl *pwrctl, struct device *dev) -{ - pwrctl->dev = dev; - INIT_WORK(&pwrctl->work, rescan_work_func); -} -EXPORT_SYMBOL_GPL(pci_pwrctl_init); - -/** - * pci_pwrctl_device_set_ready() - Notify the pwrctl subsystem that the PCI - * device is powered-up and ready to be detected. - * - * @pwrctl: PCI power control data. - * - * Returns: - * 0 on success, negative error number on error. - * - * Note: - * This function returning 0 doesn't mean the device was detected. It means, - * that the bus rescan was successfully started. The device will get bound to - * its PCI driver asynchronously. - */ -int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl) -{ - int ret; - - if (!pwrctl->dev) - return -ENODEV; - - pwrctl->nb.notifier_call = pci_pwrctl_notify; - ret = bus_register_notifier(&pci_bus_type, &pwrctl->nb); - if (ret) - return ret; - - schedule_work(&pwrctl->work); - - return 0; -} -EXPORT_SYMBOL_GPL(pci_pwrctl_device_set_ready); - -/** - * pci_pwrctl_device_unset_ready() - Notify the pwrctl subsystem that the PCI - * device is about to be powered-down. - * - * @pwrctl: PCI power control data. - */ -void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl) -{ - /* - * We don't have to delete the link here. Typically, this function - * is only called when the power control device is being detached. If - * it is being detached then the child PCI device must have already - * been unbound too or the device core wouldn't let us unbind. - */ - bus_unregister_notifier(&pci_bus_type, &pwrctl->nb); -} -EXPORT_SYMBOL_GPL(pci_pwrctl_device_unset_ready); - -static void devm_pci_pwrctl_device_unset_ready(void *data) -{ - struct pci_pwrctl *pwrctl = data; - - pci_pwrctl_device_unset_ready(pwrctl); -} - -/** - * devm_pci_pwrctl_device_set_ready - Managed variant of - * pci_pwrctl_device_set_ready(). - * - * @dev: Device managing this pwrctl provider. - * @pwrctl: PCI power control data. - * - * Returns: - * 0 on success, negative error number on error. - */ -int devm_pci_pwrctl_device_set_ready(struct device *dev, - struct pci_pwrctl *pwrctl) -{ - int ret; - - ret = pci_pwrctl_device_set_ready(pwrctl); - if (ret) - return ret; - - return devm_add_action_or_reset(dev, - devm_pci_pwrctl_device_unset_ready, - pwrctl); -} -EXPORT_SYMBOL_GPL(devm_pci_pwrctl_device_set_ready); - -MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); -MODULE_DESCRIPTION("PCI Device Power Control core driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/pci/pwrctl/Kconfig b/drivers/pci/pwrctrl/Kconfig index 54589bb2403b..54589bb2403b 100644 --- a/drivers/pci/pwrctl/Kconfig +++ b/drivers/pci/pwrctrl/Kconfig diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile new file mode 100644 index 000000000000..75c7ce531c7e --- /dev/null +++ b/drivers/pci/pwrctrl/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o +pci-pwrctrl-core-y := core.o + +obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c new file mode 100644 index 000000000000..2fb174db91e5 --- /dev/null +++ b/drivers/pci/pwrctrl/core.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci-pwrctrl.h> +#include <linux/property.h> +#include <linux/slab.h> + +static int pci_pwrctrl_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct pci_pwrctrl *pwrctrl = container_of(nb, struct pci_pwrctrl, nb); + struct device *dev = data; + + if (dev_fwnode(dev) != dev_fwnode(pwrctrl->dev)) + return NOTIFY_DONE; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + /* + * We will have two struct device objects bound to two different + * drivers on different buses but consuming the same DT node. We + * must not bind the pins twice in this case but only once for + * the first device to be added. + * + * If we got here then the PCI device is the second after the + * power control platform device. Mark its OF node as reused. + */ + dev->of_node_reused = true; + break; + } + + return NOTIFY_DONE; +} + +static void rescan_work_func(struct work_struct *work) +{ + struct pci_pwrctrl *pwrctrl = container_of(work, + struct pci_pwrctrl, work); + + pci_lock_rescan_remove(); + pci_rescan_bus(to_pci_dev(pwrctrl->dev->parent)->bus); + pci_unlock_rescan_remove(); +} + +/** + * pci_pwrctrl_init() - Initialize the PCI power control context struct + * + * @pwrctrl: PCI power control data + * @dev: Parent device + */ +void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev) +{ + pwrctrl->dev = dev; + INIT_WORK(&pwrctrl->work, rescan_work_func); +} +EXPORT_SYMBOL_GPL(pci_pwrctrl_init); + +/** + * pci_pwrctrl_device_set_ready() - Notify the pwrctrl subsystem that the PCI + * device is powered-up and ready to be detected. + * + * @pwrctrl: PCI power control data. + * + * Returns: + * 0 on success, negative error number on error. + * + * Note: + * This function returning 0 doesn't mean the device was detected. It means, + * that the bus rescan was successfully started. The device will get bound to + * its PCI driver asynchronously. + */ +int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl) +{ + int ret; + + if (!pwrctrl->dev) + return -ENODEV; + + pwrctrl->nb.notifier_call = pci_pwrctrl_notify; + ret = bus_register_notifier(&pci_bus_type, &pwrctrl->nb); + if (ret) + return ret; + + schedule_work(&pwrctrl->work); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready); + +/** + * pci_pwrctrl_device_unset_ready() - Notify the pwrctrl subsystem that the PCI + * device is about to be powered-down. + * + * @pwrctrl: PCI power control data. + */ +void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl) +{ + /* + * We don't have to delete the link here. Typically, this function + * is only called when the power control device is being detached. If + * it is being detached then the child PCI device must have already + * been unbound too or the device core wouldn't let us unbind. + */ + bus_unregister_notifier(&pci_bus_type, &pwrctrl->nb); +} +EXPORT_SYMBOL_GPL(pci_pwrctrl_device_unset_ready); + +static void devm_pci_pwrctrl_device_unset_ready(void *data) +{ + struct pci_pwrctrl *pwrctrl = data; + + pci_pwrctrl_device_unset_ready(pwrctrl); +} + +/** + * devm_pci_pwrctrl_device_set_ready - Managed variant of + * pci_pwrctrl_device_set_ready(). + * + * @dev: Device managing this pwrctrl provider. + * @pwrctrl: PCI power control data. + * + * Returns: + * 0 on success, negative error number on error. + */ +int devm_pci_pwrctrl_device_set_ready(struct device *dev, + struct pci_pwrctrl *pwrctrl) +{ + int ret; + + ret = pci_pwrctrl_device_set_ready(pwrctrl); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, + devm_pci_pwrctrl_device_unset_ready, + pwrctrl); +} +EXPORT_SYMBOL_GPL(devm_pci_pwrctrl_device_set_ready); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("PCI Device Power Control core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c index 0e6bd47671c2..4e664e7b8dd2 100644 --- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c +++ b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c @@ -6,19 +6,19 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/pci-pwrctl.h> +#include <linux/pci-pwrctrl.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/pwrseq/consumer.h> #include <linux/slab.h> #include <linux/types.h> -struct pci_pwrctl_pwrseq_data { - struct pci_pwrctl ctx; +struct pci_pwrctrl_pwrseq_data { + struct pci_pwrctrl ctx; struct pwrseq_desc *pwrseq; }; -struct pci_pwrctl_pwrseq_pdata { +struct pci_pwrctrl_pwrseq_pdata { const char *target; /* * Called before doing anything else to perform device-specific @@ -27,7 +27,7 @@ struct pci_pwrctl_pwrseq_pdata { int (*validate_device)(struct device *dev); }; -static int pci_pwrctl_pwrseq_qcm_wcn_validate_device(struct device *dev) +static int pci_pwrctrl_pwrseq_qcm_wcn_validate_device(struct device *dev) { /* * Old device trees for some platforms already define wifi nodes for @@ -47,22 +47,22 @@ static int pci_pwrctl_pwrseq_qcm_wcn_validate_device(struct device *dev) return 0; } -static const struct pci_pwrctl_pwrseq_pdata pci_pwrctl_pwrseq_qcom_wcn_pdata = { +static const struct pci_pwrctrl_pwrseq_pdata pci_pwrctrl_pwrseq_qcom_wcn_pdata = { .target = "wlan", - .validate_device = pci_pwrctl_pwrseq_qcm_wcn_validate_device, + .validate_device = pci_pwrctrl_pwrseq_qcm_wcn_validate_device, }; -static void devm_pci_pwrctl_pwrseq_power_off(void *data) +static void devm_pci_pwrctrl_pwrseq_power_off(void *data) { struct pwrseq_desc *pwrseq = data; pwrseq_power_off(pwrseq); } -static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev) +static int pci_pwrctrl_pwrseq_probe(struct platform_device *pdev) { - const struct pci_pwrctl_pwrseq_pdata *pdata; - struct pci_pwrctl_pwrseq_data *data; + const struct pci_pwrctrl_pwrseq_pdata *pdata; + struct pci_pwrctrl_pwrseq_data *data; struct device *dev = &pdev->dev; int ret; @@ -90,49 +90,49 @@ static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "Failed to power-on the device\n"); - ret = devm_add_action_or_reset(dev, devm_pci_pwrctl_pwrseq_power_off, + ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_pwrseq_power_off, data->pwrseq); if (ret) return ret; - pci_pwrctl_init(&data->ctx, dev); + pci_pwrctrl_init(&data->ctx, dev); - ret = devm_pci_pwrctl_device_set_ready(dev, &data->ctx); + ret = devm_pci_pwrctrl_device_set_ready(dev, &data->ctx); if (ret) return dev_err_probe(dev, ret, - "Failed to register the pwrctl wrapper\n"); + "Failed to register the pwrctrl wrapper\n"); return 0; } -static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = { +static const struct of_device_id pci_pwrctrl_pwrseq_of_match[] = { { /* ATH11K in QCA6390 package. */ .compatible = "pci17cb,1101", - .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata, + .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata, }, { /* ATH11K in WCN6855 package. */ .compatible = "pci17cb,1103", - .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata, + .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata, }, { /* ATH12K in WCN7850 package. */ .compatible = "pci17cb,1107", - .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata, + .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata, }, { } }; -MODULE_DEVICE_TABLE(of, pci_pwrctl_pwrseq_of_match); +MODULE_DEVICE_TABLE(of, pci_pwrctrl_pwrseq_of_match); -static struct platform_driver pci_pwrctl_pwrseq_driver = { +static struct platform_driver pci_pwrctrl_pwrseq_driver = { .driver = { - .name = "pci-pwrctl-pwrseq", - .of_match_table = pci_pwrctl_pwrseq_of_match, + .name = "pci-pwrctrl-pwrseq", + .of_match_table = pci_pwrctrl_pwrseq_of_match, }, - .probe = pci_pwrctl_pwrseq_probe, + .probe = pci_pwrctrl_pwrseq_probe, }; -module_platform_driver(pci_pwrctl_pwrseq_driver); +module_platform_driver(pci_pwrctrl_pwrseq_driver); MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); MODULE_DESCRIPTION("Generic PCI Power Control module for power sequenced devices"); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 41dec625ed7b..76f4df75b08a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -12,6 +12,7 @@ * file, where their drivers can use them. */ +#include <linux/align.h> #include <linux/bitfield.h> #include <linux/types.h> #include <linux/kernel.h> @@ -29,10 +30,23 @@ #include <linux/nvme.h> #include <linux/platform_data/x86/apple.h> #include <linux/pm_runtime.h> +#include <linux/sizes.h> #include <linux/suspend.h> #include <linux/switchtec.h> #include "pci.h" +static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta) +{ + unsigned long count; + int ret; + + ret = pcie_lbms_count(dev, &count); + if (ret < 0) + return lnksta & PCI_EXP_LNKSTA_LBMS; + + return count > 0; +} + /* * Retrain the link of a downstream PCIe port by hand if necessary. * @@ -96,22 +110,16 @@ int pcie_failed_link_retrain(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == - PCI_EXP_LNKSTA_LBMS) { + if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) { u16 oldlnkctl2 = lnkctl2; pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); - lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; - lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; - pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); - - ret = pcie_retrain_link(dev, false); + ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false); if (ret) { pci_info(dev, "retraining failed\n"); - pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, - oldlnkctl2); - pcie_retrain_link(dev, true); + pcie_set_target_speed(dev, PCIE_LNKCTL2_TLS2SPEED(oldlnkctl2), + true); return ret; } @@ -125,11 +133,7 @@ int pcie_failed_link_retrain(struct pci_dev *dev) pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n"); pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); - lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; - lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; - pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); - - ret = pcie_retrain_link(dev, false); + ret = pcie_set_target_speed(dev, PCIE_LNKCAP_SLS2SPEED(lnkcap), false); if (ret) { pci_info(dev, "retraining failed\n"); return ret; @@ -586,8 +590,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) const char *r_name = pci_resource_name(dev, i); if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { - r->end = PAGE_SIZE - 1; - r->start = 0; + resource_set_range(r, 0, PAGE_SIZE); r->flags |= IORESOURCE_UNSET; pci_info(dev, "%s %pR: expanded to page size\n", r_name, r); @@ -604,10 +607,9 @@ static void quirk_s3_64M(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; - if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { + if (!IS_ALIGNED(r->start, SZ_64M) || resource_size(r) != SZ_64M) { r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0x3ffffff; + resource_set_range(r, 0, SZ_64M); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); @@ -1342,8 +1344,7 @@ static void quirk_dunord(struct pci_dev *dev) struct resource *r = &dev->resource[1]; r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0xffffff; + resource_set_range(r, 0, SZ_16M); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord); @@ -2340,8 +2341,7 @@ static void quirk_tc86c001_ide(struct pci_dev *dev) if (r->start & 0x8) { r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0xf; + resource_set_range(r, 0, SZ_16); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2, @@ -2369,8 +2369,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev) pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n", bar); r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0xff; + resource_set_range(r, 0, SZ_256); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, @@ -3522,13 +3521,13 @@ static void quirk_intel_ntb(struct pci_dev *dev) if (rc) return; - dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1; + resource_set_size(&dev->resource[2], (resource_size_t)1 << val); rc = pci_read_config_byte(dev, 0x00D1, &val); if (rc) return; - dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1; + resource_set_size(&dev->resource[4], (resource_size_t)1 << val); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); @@ -4996,18 +4995,21 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) } /* - * Wangxun 10G/1G NICs have no ACS capability, and on multi-function - * devices, peer-to-peer transactions are not be used between the functions. - * So add an ACS quirk for below devices to isolate functions. + * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on + * multi-function devices, the hardware isolates the functions by + * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and + * PCI_ACS_CR were set. * SFxxx 1G NICs(em). * RP1000/RP2000 10G NICs(sp). + * FF5xxx 40G/25G/10G NICs(aml). */ static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) { switch (dev->device) { - case 0x0100 ... 0x010F: - case 0x1001: - case 0x2001: + case 0x0100 ... 0x010F: /* EM */ + case 0x1001: case 0x2001: /* SP */ + case 0x5010: case 0x5025: case 0x5040: /* AML */ + case 0x5110: case 0x5125: case 0x5140: /* AML */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index e4ce1145aa3e..963b8d2855c1 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -17,37 +17,35 @@ static void pci_free_resources(struct pci_dev *dev) } } -static int pci_pwrctl_unregister(struct device *dev, void *data) +static void pci_pwrctrl_unregister(struct device *dev) { - struct device_node *pci_node = data, *plat_node = dev_of_node(dev); + struct platform_device *pdev; - if (dev_is_platform(dev) && plat_node && plat_node == pci_node) { - of_device_unregister(to_platform_device(dev)); - of_node_clear_flag(plat_node, OF_POPULATED); - } + pdev = of_find_device_by_node(dev_of_node(dev)); + if (!pdev) + return; - return 0; + of_device_unregister(pdev); + of_node_clear_flag(dev_of_node(dev), OF_POPULATED); } static void pci_stop_dev(struct pci_dev *dev) { pci_pme_active(dev, false); - if (pci_dev_is_added(dev)) { - device_for_each_child(dev->dev.parent, dev_of_node(&dev->dev), - pci_pwrctl_unregister); - device_release_driver(&dev->dev); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - of_pci_remove_node(dev); + if (!pci_dev_test_and_clear_added(dev)) + return; - pci_dev_assign_added(dev, false); - } + pci_pwrctrl_unregister(&dev->dev); + device_release_driver(&dev->dev); + pci_proc_detach_device(dev); + pci_remove_sysfs_dev_files(dev); + of_pci_remove_node(dev); } static void pci_destroy_dev(struct pci_dev *dev) { - if (!dev->dev.kobj.parent) + if (pci_dev_test_and_set_removed(dev)) return; pci_npem_remove(dev); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 23082bc0ca37..5e00cecf1f1a 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -134,6 +134,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) int i; pci_dev_for_each_resource(dev, r, i) { + const char *r_name = pci_resource_name(dev, i); struct pci_dev_resource *dev_res, *tmp; resource_size_t r_align; struct list_head *n; @@ -146,8 +147,8 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) r_align = pci_resource_alignment(dev, r); if (!r_align) { - pci_warn(dev, "BAR %d: %pR has bogus alignment\n", - i, r); + pci_warn(dev, "%s %pR: alignment must not be zero\n", + r_name, r); continue; } @@ -246,8 +247,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, add_size = add_res->add_size; align = add_res->min_align; if (!resource_size(res)) { - res->start = align; - res->end = res->start + add_size - 1; + resource_set_range(res, align, add_size); if (pci_assign_resource(add_res->dev, idx)) reset_resource(res); } else { @@ -938,8 +938,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, return; } - b_res->start = min_align; - b_res->end = b_res->start + size0 - 1; + resource_set_range(b_res, min_align, size0); b_res->flags |= IORESOURCE_STARTALIGN; if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, @@ -1202,8 +1201,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, * Reserve some resources for CardBus. We reserve a fixed amount * of bus space for CardBus bridges. */ - b_res->start = pci_cardbus_io_size; - b_res->end = b_res->start + pci_cardbus_io_size - 1; + resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size); b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_io_size; @@ -1215,8 +1213,7 @@ handle_b_res_1: b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW]; if (b_res->parent) goto handle_b_res_2; - b_res->start = pci_cardbus_io_size; - b_res->end = b_res->start + pci_cardbus_io_size - 1; + resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size); b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_io_size; @@ -1249,8 +1246,8 @@ handle_b_res_2: * Otherwise, allocate one region of twice the size. */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { - b_res->start = pci_cardbus_mem_size; - b_res->end = b_res->start + pci_cardbus_mem_size - 1; + resource_set_range(b_res, pci_cardbus_mem_size, + pci_cardbus_mem_size); b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_STARTALIGN; if (realloc_head) { @@ -1267,8 +1264,7 @@ handle_b_res_3: b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW]; if (b_res->parent) goto handle_done; - b_res->start = pci_cardbus_mem_size; - b_res->end = b_res->start + b_res_3_size - 1; + resource_set_range(b_res, pci_cardbus_mem_size, b_res_3_size); b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= b_res_3_size; @@ -1847,7 +1843,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res, return; } - res->end = res->start + new_size - 1; + resource_set_size(res, new_size); /* If the resource is part of the add_list, remove it now */ if (add_list) @@ -1899,6 +1895,9 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io, } } +#define ALIGN_DOWN_IF_NONZERO(addr, align) \ + ((align) ? ALIGN_DOWN((addr), (align)) : (addr)) + /* * io, mmio and mmio_pref contain the total amount of bridge window space * available. This includes the minimal space needed to cover all the @@ -2010,8 +2009,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, * what is available). */ align = pci_resource_alignment(dev, res); - io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1 - : io.start + io_per_b - 1; + resource_set_size(&io, ALIGN_DOWN_IF_NONZERO(io_per_b, align)); /* * The x_per_b holds the extra resource space that can be @@ -2023,15 +2021,14 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, res = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; align = pci_resource_alignment(dev, res); - mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1 - : mmio.start + mmio_per_b - 1; + resource_set_size(&mmio, + ALIGN_DOWN_IF_NONZERO(mmio_per_b,align)); mmio.start -= resource_size(res); res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; align = pci_resource_alignment(dev, res); - mmio_pref.end = align ? mmio_pref.start + - ALIGN_DOWN(mmio_pref_per_b, align) - 1 - : mmio_pref.start + mmio_pref_per_b - 1; + resource_set_size(&mmio_pref, + ALIGN_DOWN_IF_NONZERO(mmio_pref_per_b, align)); mmio_pref.start -= resource_size(res); pci_bus_distribute_available_resources(b, add_list, io, mmio, diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index c6d933ddfd46..ca14576bf2bf 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -211,8 +211,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, start = res->start; end = res->end; - res->start = fw_addr; - res->end = res->start + size - 1; + resource_set_range(res, fw_addr, size); res->flags &= ~IORESOURCE_UNSET; root = pci_find_parent_resource(dev, res); @@ -463,7 +462,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) if (ret) return ret; - res->end = res->start + pci_rebar_size_to_bytes(size) - 1; + resource_set_size(res, pci_rebar_size_to_bytes(size)); /* Check if the new config works by trying to assign everything. */ if (dev->bus->self) { @@ -475,7 +474,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) error_resize: pci_rebar_set_size(dev, resno, old); - res->end = res->start + pci_rebar_size_to_bytes(old) - 1; + resource_set_size(res, pci_rebar_size_to_bytes(old)); return ret; } EXPORT_SYMBOL(pci_resize_resource); diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 0f87cade10f7..36b44be0489d 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -79,6 +79,7 @@ static void pci_slot_release(struct kobject *kobj) up_read(&pci_bus_sem); list_del(&slot->list); + pci_bus_put(slot->bus); kfree(slot); } @@ -244,12 +245,13 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, slot = get_slot(parent, slot_nr); if (slot) { if (hotplug) { - if ((err = slot->hotplug ? -EBUSY : 0) - || (err = rename_slot(slot, name))) { - kobject_put(&slot->kobj); - slot = NULL; - goto err; + if (slot->hotplug) { + err = -EBUSY; + goto put_slot; } + err = rename_slot(slot, name); + if (err) + goto put_slot; } goto out; } @@ -261,7 +263,7 @@ placeholder: goto err; } - slot->bus = parent; + slot->bus = pci_bus_get(parent); slot->number = slot_nr; slot->kobj.kset = pci_slots_kset; @@ -269,6 +271,7 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + pci_bus_put(slot->bus); kfree(slot); goto err; } @@ -278,10 +281,8 @@ placeholder: err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) { - kobject_put(&slot->kobj); - goto err; - } + if (err) + goto put_slot; down_read(&pci_bus_sem); list_for_each_entry(dev, &parent->devices, bus_list) @@ -296,6 +297,9 @@ out: kfree(slot_name); mutex_unlock(&pci_slot_mutex); return slot; + +put_slot: + kobject_put(&slot->kobj); err: slot = ERR_PTR(err); goto out; diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c new file mode 100644 index 000000000000..1e604fbbda65 --- /dev/null +++ b/drivers/pci/tph.c @@ -0,0 +1,547 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TPH (TLP Processing Hints) support + * + * Copyright (C) 2024 Advanced Micro Devices, Inc. + * Eric Van Tassell <Eric.VanTassell@amd.com> + * Wei Huang <wei.huang2@amd.com> + */ +#include <linux/pci.h> +#include <linux/pci-acpi.h> +#include <linux/msi.h> +#include <linux/bitfield.h> +#include <linux/pci-tph.h> + +#include "pci.h" + +/* System-wide TPH disabled */ +static bool pci_tph_disabled; + +#ifdef CONFIG_ACPI +/* + * The st_info struct defines the Steering Tag (ST) info returned by the + * firmware PCI ACPI _DSM method (rev=0x7, func=0xF, "_DSM to Query Cache + * Locality TPH Features"), as specified in the approved ECN for PCI Firmware + * Spec and available at https://members.pcisig.com/wg/PCI-SIG/document/15470. + * + * @vm_st_valid: 8-bit ST for volatile memory is valid + * @vm_xst_valid: 16-bit extended ST for volatile memory is valid + * @vm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied + * @vm_st: 8-bit ST for volatile mem + * @vm_xst: 16-bit extended ST for volatile mem + * @pm_st_valid: 8-bit ST for persistent memory is valid + * @pm_xst_valid: 16-bit extended ST for persistent memory is valid + * @pm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied + * @pm_st: 8-bit ST for persistent mem + * @pm_xst: 16-bit extended ST for persistent mem + */ +union st_info { + struct { + u64 vm_st_valid : 1; + u64 vm_xst_valid : 1; + u64 vm_ph_ignore : 1; + u64 rsvd1 : 5; + u64 vm_st : 8; + u64 vm_xst : 16; + u64 pm_st_valid : 1; + u64 pm_xst_valid : 1; + u64 pm_ph_ignore : 1; + u64 rsvd2 : 5; + u64 pm_st : 8; + u64 pm_xst : 16; + }; + u64 value; +}; + +static u16 tph_extract_tag(enum tph_mem_type mem_type, u8 req_type, + union st_info *info) +{ + switch (req_type) { + case PCI_TPH_REQ_TPH_ONLY: /* 8-bit tag */ + switch (mem_type) { + case TPH_MEM_TYPE_VM: + if (info->vm_st_valid) + return info->vm_st; + break; + case TPH_MEM_TYPE_PM: + if (info->pm_st_valid) + return info->pm_st; + break; + } + break; + case PCI_TPH_REQ_EXT_TPH: /* 16-bit tag */ + switch (mem_type) { + case TPH_MEM_TYPE_VM: + if (info->vm_xst_valid) + return info->vm_xst; + break; + case TPH_MEM_TYPE_PM: + if (info->pm_xst_valid) + return info->pm_xst; + break; + } + break; + default: + return 0; + } + + return 0; +} + +#define TPH_ST_DSM_FUNC_INDEX 0xF +static acpi_status tph_invoke_dsm(acpi_handle handle, u32 cpu_uid, + union st_info *st_out) +{ + union acpi_object arg3[3], in_obj, *out_obj; + + if (!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 7, + BIT(TPH_ST_DSM_FUNC_INDEX))) + return AE_ERROR; + + /* DWORD: feature ID (0 for processor cache ST query) */ + arg3[0].integer.type = ACPI_TYPE_INTEGER; + arg3[0].integer.value = 0; + + /* DWORD: target UID */ + arg3[1].integer.type = ACPI_TYPE_INTEGER; + arg3[1].integer.value = cpu_uid; + + /* QWORD: properties, all 0's */ + arg3[2].integer.type = ACPI_TYPE_INTEGER; + arg3[2].integer.value = 0; + + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = ARRAY_SIZE(arg3); + in_obj.package.elements = arg3; + + out_obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 7, + TPH_ST_DSM_FUNC_INDEX, &in_obj); + if (!out_obj) + return AE_ERROR; + + if (out_obj->type != ACPI_TYPE_BUFFER) { + ACPI_FREE(out_obj); + return AE_ERROR; + } + + st_out->value = *((u64 *)(out_obj->buffer.pointer)); + + ACPI_FREE(out_obj); + + return AE_OK; +} +#endif + +/* Update the TPH Requester Enable field of TPH Control Register */ +static void set_ctrl_reg_req_en(struct pci_dev *pdev, u8 req_type) +{ + u32 reg; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, ®); + + reg &= ~PCI_TPH_CTRL_REQ_EN_MASK; + reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, req_type); + + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg); +} + +static u8 get_st_modes(struct pci_dev *pdev) +{ + u32 reg; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + reg &= PCI_TPH_CAP_ST_NS | PCI_TPH_CAP_ST_IV | PCI_TPH_CAP_ST_DS; + + return reg; +} + +static u32 get_st_table_loc(struct pci_dev *pdev) +{ + u32 reg; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + + return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg); +} + +/* + * Return the size of ST table. If ST table is not in TPH Requester Extended + * Capability space, return 0. Otherwise return the ST Table Size + 1. + */ +static u16 get_st_table_size(struct pci_dev *pdev) +{ + u32 reg; + u32 loc; + + /* Check ST table location first */ + loc = get_st_table_loc(pdev); + + /* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */ + loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc); + if (loc != PCI_TPH_LOC_CAP) + return 0; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + + return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1; +} + +/* Return device's Root Port completer capability */ +static u8 get_rp_completer_type(struct pci_dev *pdev) +{ + struct pci_dev *rp; + u32 reg; + int ret; + + rp = pcie_find_root_port(pdev); + if (!rp) + return 0; + + ret = pcie_capability_read_dword(rp, PCI_EXP_DEVCAP2, ®); + if (ret) + return 0; + + return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg); +} + +/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */ +static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag) +{ +#ifdef CONFIG_PCI_MSI + struct msi_desc *msi_desc = NULL; + void __iomem *vec_ctrl; + u32 val; + int err = 0; + + msi_lock_descs(&pdev->dev); + + /* Find the msi_desc entry with matching msix_idx */ + msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) { + if (msi_desc->msi_index == msix_idx) + break; + } + + if (!msi_desc) { + err = -ENXIO; + goto err_out; + } + + /* Get the vector control register (offset 0xc) pointed by msix_idx */ + vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE; + vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL; + + val = readl(vec_ctrl); + val &= ~PCI_MSIX_ENTRY_CTRL_ST; + val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); + writel(val, vec_ctrl); + + /* Read back to flush the update */ + val = readl(vec_ctrl); + +err_out: + msi_unlock_descs(&pdev->dev); + return err; +#else + return -ENODEV; +#endif +} + +/* Write tag to ST table - Return 0 if OK, otherwise -errno */ +static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag) +{ + int st_table_size; + int offset; + + /* Check if index is out of bound */ + st_table_size = get_st_table_size(pdev); + if (index >= st_table_size) + return -ENXIO; + + offset = pdev->tph_cap + PCI_TPH_BASE_SIZEOF + index * sizeof(u16); + + return pci_write_config_word(pdev, offset, tag); +} + +/** + * pcie_tph_get_cpu_st() - Retrieve Steering Tag for a target memory associated + * with a specific CPU + * @pdev: PCI device + * @mem_type: target memory type (volatile or persistent RAM) + * @cpu_uid: associated CPU id + * @tag: Steering Tag to be returned + * + * Return the Steering Tag for a target memory that is associated with a + * specific CPU as indicated by cpu_uid. + * + * Return: 0 if success, otherwise negative value (-errno) + */ +int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *tag) +{ +#ifdef CONFIG_ACPI + struct pci_dev *rp; + acpi_handle rp_acpi_handle; + union st_info info; + + rp = pcie_find_root_port(pdev); + if (!rp || !rp->bus || !rp->bus->bridge) + return -ENODEV; + + rp_acpi_handle = ACPI_HANDLE(rp->bus->bridge); + + if (tph_invoke_dsm(rp_acpi_handle, cpu_uid, &info) != AE_OK) { + *tag = 0; + return -EINVAL; + } + + *tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info); + + pci_dbg(pdev, "get steering tag: mem_type=%s, cpu_uid=%d, tag=%#04x\n", + (mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent", + cpu_uid, *tag); + + return 0; +#else + return -ENODEV; +#endif +} +EXPORT_SYMBOL(pcie_tph_get_cpu_st); + +/** + * pcie_tph_set_st_entry() - Set Steering Tag in the ST table entry + * @pdev: PCI device + * @index: ST table entry index + * @tag: Steering Tag to be written + * + * Figure out the proper location of ST table, either in the MSI-X table or + * in the TPH Extended Capability space, and write the Steering Tag into + * the ST entry pointed by index. + * + * Return: 0 if success, otherwise negative value (-errno) + */ +int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag) +{ + u32 loc; + int err = 0; + + if (!pdev->tph_cap) + return -EINVAL; + + if (!pdev->tph_enabled) + return -EINVAL; + + /* No need to write tag if device is in "No ST Mode" */ + if (pdev->tph_mode == PCI_TPH_ST_NS_MODE) + return 0; + + /* + * Disable TPH before updating ST to avoid potential instability as + * cautioned in PCIe r6.2, sec 6.17.3, "ST Modes of Operation" + */ + set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE); + + loc = get_st_table_loc(pdev); + /* Convert loc to match with PCI_TPH_LOC_* */ + loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc); + + switch (loc) { + case PCI_TPH_LOC_MSIX: + err = write_tag_to_msix(pdev, index, tag); + break; + case PCI_TPH_LOC_CAP: + err = write_tag_to_st_table(pdev, index, tag); + break; + default: + err = -EINVAL; + } + + if (err) { + pcie_disable_tph(pdev); + return err; + } + + set_ctrl_reg_req_en(pdev, pdev->tph_mode); + + pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n", + (loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag); + + return 0; +} +EXPORT_SYMBOL(pcie_tph_set_st_entry); + +/** + * pcie_disable_tph - Turn off TPH support for device + * @pdev: PCI device + * + * Return: none + */ +void pcie_disable_tph(struct pci_dev *pdev) +{ + if (!pdev->tph_cap) + return; + + if (!pdev->tph_enabled) + return; + + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, 0); + + pdev->tph_mode = 0; + pdev->tph_req_type = 0; + pdev->tph_enabled = 0; +} +EXPORT_SYMBOL(pcie_disable_tph); + +/** + * pcie_enable_tph - Enable TPH support for device using a specific ST mode + * @pdev: PCI device + * @mode: ST mode to enable. Current supported modes include: + * + * - PCI_TPH_ST_NS_MODE: NO ST Mode + * - PCI_TPH_ST_IV_MODE: Interrupt Vector Mode + * - PCI_TPH_ST_DS_MODE: Device Specific Mode + * + * Check whether the mode is actually supported by the device before enabling + * and return an error if not. Additionally determine what types of requests, + * TPH or extended TPH, can be issued by the device based on its TPH requester + * capability and the Root Port's completer capability. + * + * Return: 0 on success, otherwise negative value (-errno) + */ +int pcie_enable_tph(struct pci_dev *pdev, int mode) +{ + u32 reg; + u8 dev_modes; + u8 rp_req_type; + + /* Honor "notph" kernel parameter */ + if (pci_tph_disabled) + return -EINVAL; + + if (!pdev->tph_cap) + return -EINVAL; + + if (pdev->tph_enabled) + return -EBUSY; + + /* Sanitize and check ST mode compatibility */ + mode &= PCI_TPH_CTRL_MODE_SEL_MASK; + dev_modes = get_st_modes(pdev); + if (!((1 << mode) & dev_modes)) + return -EINVAL; + + pdev->tph_mode = mode; + + /* Get req_type supported by device and its Root Port */ + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + if (FIELD_GET(PCI_TPH_CAP_EXT_TPH, reg)) + pdev->tph_req_type = PCI_TPH_REQ_EXT_TPH; + else + pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY; + + rp_req_type = get_rp_completer_type(pdev); + + /* Final req_type is the smallest value of two */ + pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type); + + if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE) + return -EINVAL; + + /* Write them into TPH control register */ + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, ®); + + reg &= ~PCI_TPH_CTRL_MODE_SEL_MASK; + reg |= FIELD_PREP(PCI_TPH_CTRL_MODE_SEL_MASK, pdev->tph_mode); + + reg &= ~PCI_TPH_CTRL_REQ_EN_MASK; + reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, pdev->tph_req_type); + + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg); + + pdev->tph_enabled = 1; + + return 0; +} +EXPORT_SYMBOL(pcie_enable_tph); + +void pci_restore_tph_state(struct pci_dev *pdev) +{ + struct pci_cap_saved_state *save_state; + int num_entries, i, offset; + u16 *st_entry; + u32 *cap; + + if (!pdev->tph_cap) + return; + + if (!pdev->tph_enabled) + return; + + save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH); + if (!save_state) + return; + + /* Restore control register and all ST entries */ + cap = &save_state->cap.data[0]; + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++); + st_entry = (u16 *)cap; + offset = PCI_TPH_BASE_SIZEOF; + num_entries = get_st_table_size(pdev); + for (i = 0; i < num_entries; i++) { + pci_write_config_word(pdev, pdev->tph_cap + offset, + *st_entry++); + offset += sizeof(u16); + } +} + +void pci_save_tph_state(struct pci_dev *pdev) +{ + struct pci_cap_saved_state *save_state; + int num_entries, i, offset; + u16 *st_entry; + u32 *cap; + + if (!pdev->tph_cap) + return; + + if (!pdev->tph_enabled) + return; + + save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH); + if (!save_state) + return; + + /* Save control register */ + cap = &save_state->cap.data[0]; + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, cap++); + + /* Save all ST entries in extended capability structure */ + st_entry = (u16 *)cap; + offset = PCI_TPH_BASE_SIZEOF; + num_entries = get_st_table_size(pdev); + for (i = 0; i < num_entries; i++) { + pci_read_config_word(pdev, pdev->tph_cap + offset, + st_entry++); + offset += sizeof(u16); + } +} + +void pci_no_tph(void) +{ + pci_tph_disabled = true; + + pr_info("PCIe TPH is disabled\n"); +} + +void pci_tph_init(struct pci_dev *pdev) +{ + int num_entries; + u32 save_size; + + pdev->tph_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH); + if (!pdev->tph_cap) + return; + + num_entries = get_st_table_size(pdev); + save_size = sizeof(u32) + num_entries * sizeof(u16); + pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size); +} diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index f73abff416be..8d58efe998ec 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -82,6 +82,17 @@ config PHY_AIROHA_PCIE This driver create the basic PHY instance and provides initialize callback for PCIe GEN3 port. +config PHY_NXP_PTN3222 + tristate "NXP PTN3222 1-port eUSB2 to USB2 redriver" + depends on I2C + depends on OF + select GENERIC_PHY + help + Enable this to support NXP PTN3222 1-port eUSB2 to USB2 Redriver. + This redriver performs translation between eUSB2 and USB2 signalling + schemes. It supports all three USB 2.0 data rates: Low Speed, Full + Speed and High Speed. + source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index ebc399560da4..e281442acc75 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o +obj-$(CONFIG_PHY_NXP_PTN3222) += phy-nxp-ptn3222.o obj-y += allwinner/ \ amlogic/ \ broadcom/ \ diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index b0f19e950601..cd159a71b23c 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -1049,11 +1049,11 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = { MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match); static struct platform_driver sun4i_usb_phy_driver = { - .probe = sun4i_usb_phy_probe, - .remove_new = sun4i_usb_phy_remove, + .probe = sun4i_usb_phy_probe, + .remove = sun4i_usb_phy_remove, .driver = { - .of_match_table = sun4i_usb_phy_of_match, - .name = "sun4i-usb-phy", + .of_match_table= sun4i_usb_phy_of_match, + .name = "sun4i-usb-phy", } }; module_platform_driver(sun4i_usb_phy_driver); diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c index 5213c75b6da6..c5d35031b398 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c @@ -24,9 +24,6 @@ struct bcm_ns_usb2 { struct phy *phy; struct regmap *clkset; void __iomem *base; - - /* Deprecated binding */ - void __iomem *dmu; }; static int bcm_ns_usb2_phy_init(struct phy *phy) @@ -49,10 +46,7 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) goto err_clk_off; } - if (usb2->base) - usb2ctl = readl(usb2->base); - else - usb2ctl = readl(usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL); + usb2ctl = readl(usb2->base); if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) { usb_pll_pdiv = usb2ctl; @@ -66,24 +60,16 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate; /* Unlock DMU PLL settings with some magic value */ - if (usb2->clkset) - regmap_write(usb2->clkset, 0, 0x0000ea68); - else - writel(0x0000ea68, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY); + regmap_write(usb2->clkset, 0, 0x0000ea68); /* Write USB 2.0 PLL control setting */ usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK; usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT; - if (usb2->base) - writel(usb2ctl, usb2->base); - else - writel(usb2ctl, usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL); + + writel(usb2ctl, usb2->base); /* Lock DMU PLL settings */ - if (usb2->clkset) - regmap_write(usb2->clkset, 0, 0x00000000); - else - writel(0x00000000, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY); + regmap_write(usb2->clkset, 0, 0x00000000); err_clk_off: clk_disable_unprepare(usb2->ref_clk); @@ -107,27 +93,17 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev) return -ENOMEM; usb2->dev = dev; - if (of_property_present(dev->of_node, "brcm,syscon-clkset")) { - usb2->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(usb2->base)) { - dev_err(dev, "Failed to map control reg\n"); - return PTR_ERR(usb2->base); - } - - usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node, - "brcm,syscon-clkset"); - if (IS_ERR(usb2->clkset)) { - dev_err(dev, "Failed to lookup clkset regmap\n"); - return PTR_ERR(usb2->clkset); - } - } else { - usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu"); - if (IS_ERR(usb2->dmu)) { - dev_err(dev, "Failed to map DMU regs\n"); - return PTR_ERR(usb2->dmu); - } + usb2->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(usb2->base)) { + dev_err(dev, "Failed to map control reg\n"); + return PTR_ERR(usb2->base); + } - dev_warn(dev, "using deprecated DT binding\n"); + usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node, + "brcm,syscon-clkset"); + if (IS_ERR(usb2->clkset)) { + dev_err(dev, "Failed to lookup clkset regmap\n"); + return PTR_ERR(usb2->clkset); } usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk"); diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c index 5ebb3a616115..da23078878a9 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c @@ -193,256 +193,251 @@ static const u32 usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { /* 3390B0 */ [BRCM_FAMILY_3390A0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 4908 */ [BRCM_FAMILY_4908] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - 0, /* USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK */ - 0, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, }, /* 7250b0 */ [BRCM_FAMILY_7250B0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7271a0 */ [BRCM_FAMILY_7271A0] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - USB_CTRL_USB_PM_SOFT_RESET_MASK, - USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, - USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_SOFT_RESET_SELECTOR] = + USB_CTRL_USB_PM_SOFT_RESET_MASK, + [USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_SELECTOR] = + USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, + [USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7364a0 */ [BRCM_FAMILY_7364A0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7366c0 */ [BRCM_FAMILY_7366C0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 74371A0 */ [BRCM_FAMILY_74371A0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB30_CTL1_USB3_IOC_MASK, - USB_CTRL_USB30_CTL1_USB3_IPP_MASK, - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */ - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB30_CTL1_USB3_IOC_SELECTOR] = + USB_CTRL_USB30_CTL1_USB3_IOC_MASK, + [USB_CTRL_USB30_CTL1_USB3_IPP_SELECTOR] = + USB_CTRL_USB30_CTL1_USB3_IPP_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7439B0 */ [BRCM_FAMILY_7439B0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7445d0 */ [BRCM_FAMILY_7445D0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7260a0 */ [BRCM_FAMILY_7260A0] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - USB_CTRL_USB_PM_SOFT_RESET_MASK, - USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, - USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_SOFT_RESET_SELECTOR] = + USB_CTRL_USB_PM_SOFT_RESET_MASK, + [USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_SELECTOR] = + USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, + [USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7278a0 */ [BRCM_FAMILY_7278A0] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - 0, /*USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */ - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - USB_CTRL_USB_PM_SOFT_RESET_MASK, - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */ - 0, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_SOFT_RESET_SELECTOR] = + USB_CTRL_USB_PM_SOFT_RESET_MASK, }, }; diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index ad2eec095601..6362ca5b7fb6 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -667,7 +667,7 @@ MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids); static struct platform_driver brcm_usb_driver = { .probe = brcm_usb_phy_probe, - .remove_new = brcm_usb_phy_remove, + .remove = brcm_usb_phy_remove, .driver = { .name = "brcmstb-usb-phy", .pm = &brcm_usb_phy_pm_ops, diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c index dddb66de6dba..ed87a3970f83 100644 --- a/drivers/phy/cadence/cdns-dphy.c +++ b/drivers/phy/cadence/cdns-dphy.c @@ -472,7 +472,7 @@ MODULE_DEVICE_TABLE(of, cdns_dphy_of_match); static struct platform_driver cdns_dphy_platform_driver = { .probe = cdns_dphy_probe, - .remove_new = cdns_dphy_remove, + .remove = cdns_dphy_remove, .driver = { .name = "cdns-mipi-dphy", .of_match_table = cdns_dphy_of_match, diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index dfc4f55d112e..45a5c00843bf 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -2731,7 +2731,7 @@ MODULE_DEVICE_TABLE(of, cdns_sierra_id_table); static struct platform_driver cdns_sierra_driver = { .probe = cdns_sierra_phy_probe, - .remove_new = cdns_sierra_phy_remove, + .remove = cdns_sierra_phy_remove, .driver = { .name = "cdns-sierra-phy", .of_match_table = cdns_sierra_id_table, diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 8bbbbb87bb22..a281c0dfae97 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -5440,8 +5440,8 @@ MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match); static struct platform_driver cdns_torrent_phy_driver = { .probe = cdns_torrent_phy_probe, - .remove_new = cdns_torrent_phy_remove, - .driver = { + .remove = cdns_torrent_phy_remove, + .driver = { .name = "cdns-torrent-phy", .of_match_table = cdns_torrent_phy_of_match, .pm = pm_sleep_ptr(&cdns_torrent_phy_pm_ops), diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c index 38388dd04bdc..7aef2f59e8eb 100644 --- a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c +++ b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c @@ -433,12 +433,12 @@ static const struct of_device_id mixel_lvds_phy_of_match[] = { MODULE_DEVICE_TABLE(of, mixel_lvds_phy_of_match); static struct platform_driver mixel_lvds_phy_driver = { - .probe = mixel_lvds_phy_probe, - .remove_new = mixel_lvds_phy_remove, + .probe = mixel_lvds_phy_probe, + .remove = mixel_lvds_phy_remove, .driver = { .pm = &mixel_lvds_phy_pm_ops, .name = "mixel-lvds-phy", - .of_match_table = mixel_lvds_phy_of_match, + .of_match_table = mixel_lvds_phy_of_match, } }; module_platform_driver(mixel_lvds_phy_driver); diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c index b86da8e9daa4..f7994e8983c8 100644 --- a/drivers/phy/freescale/phy-fsl-lynx-28g.c +++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c @@ -631,9 +631,9 @@ static const struct of_device_id lynx_28g_of_match_table[] = { MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table); static struct platform_driver lynx_28g_driver = { - .probe = lynx_28g_probe, - .remove_new = lynx_28g_remove, - .driver = { + .probe = lynx_28g_probe, + .remove = lynx_28g_remove, + .driver = { .name = "lynx-28g", .of_match_table = lynx_28g_of_match_table, }, diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c index 9048cdc760c2..2c8038864357 100644 --- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c +++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c @@ -14,352 +14,265 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#define PHY_REG_00 0x00 -#define PHY_REG_01 0x04 -#define PHY_REG_02 0x08 -#define PHY_REG_08 0x20 -#define PHY_REG_09 0x24 -#define PHY_REG_10 0x28 -#define PHY_REG_11 0x2c - -#define PHY_REG_12 0x30 -#define REG12_CK_DIV_MASK GENMASK(5, 4) - -#define PHY_REG_13 0x34 -#define REG13_TG_CODE_LOW_MASK GENMASK(7, 0) - -#define PHY_REG_14 0x38 -#define REG14_TOL_MASK GENMASK(7, 4) -#define REG14_RP_CODE_MASK GENMASK(3, 1) -#define REG14_TG_CODE_HIGH_MASK GENMASK(0, 0) - -#define PHY_REG_15 0x3c -#define PHY_REG_16 0x40 -#define PHY_REG_17 0x44 -#define PHY_REG_18 0x48 -#define PHY_REG_19 0x4c -#define PHY_REG_20 0x50 - -#define PHY_REG_21 0x54 -#define REG21_SEL_TX_CK_INV BIT(7) -#define REG21_PMS_S_MASK GENMASK(3, 0) - -#define PHY_REG_22 0x58 -#define PHY_REG_23 0x5c -#define PHY_REG_24 0x60 -#define PHY_REG_25 0x64 -#define PHY_REG_26 0x68 -#define PHY_REG_27 0x6c -#define PHY_REG_28 0x70 -#define PHY_REG_29 0x74 -#define PHY_REG_30 0x78 -#define PHY_REG_31 0x7c -#define PHY_REG_32 0x80 +#define PHY_REG(reg) (reg * 4) +#define REG01_PMS_P_MASK GENMASK(3, 0) +#define REG03_PMS_S_MASK GENMASK(7, 4) +#define REG12_CK_DIV_MASK GENMASK(5, 4) + +#define REG13_TG_CODE_LOW_MASK GENMASK(7, 0) + +#define REG14_TOL_MASK GENMASK(7, 4) +#define REG14_RP_CODE_MASK GENMASK(3, 1) +#define REG14_TG_CODE_HIGH_MASK GENMASK(0, 0) + +#define REG21_SEL_TX_CK_INV BIT(7) +#define REG21_PMS_S_MASK GENMASK(3, 0) /* * REG33 does not match the ref manual. According to Sandor Yu from NXP, * "There is a doc issue on the i.MX8MP latest RM" * REG33 is being used per guidance from Sandor */ +#define REG33_MODE_SET_DONE BIT(7) +#define REG33_FIX_DA BIT(1) + +#define REG34_PHY_READY BIT(7) +#define REG34_PLL_LOCK BIT(6) +#define REG34_PHY_CLK_READY BIT(5) -#define PHY_REG_33 0x84 -#define REG33_MODE_SET_DONE BIT(7) -#define REG33_FIX_DA BIT(1) - -#define PHY_REG_34 0x88 -#define REG34_PHY_READY BIT(7) -#define REG34_PLL_LOCK BIT(6) -#define REG34_PHY_CLK_READY BIT(5) - -#define PHY_REG_35 0x8c -#define PHY_REG_36 0x90 -#define PHY_REG_37 0x94 -#define PHY_REG_38 0x98 -#define PHY_REG_39 0x9c -#define PHY_REG_40 0xa0 -#define PHY_REG_41 0xa4 -#define PHY_REG_42 0xa8 -#define PHY_REG_43 0xac -#define PHY_REG_44 0xb0 -#define PHY_REG_45 0xb4 -#define PHY_REG_46 0xb8 -#define PHY_REG_47 0xbc - -#define PHY_PLL_DIV_REGS_NUM 6 +#ifndef MHZ +#define MHZ (1000UL * 1000UL) +#endif + +#define PHY_PLL_DIV_REGS_NUM 7 struct phy_config { u32 pixclk; u8 pll_div_regs[PHY_PLL_DIV_REGS_NUM]; }; +/* + * The calculated_phy_pll_cfg only handles integer divider for PMS, + * meaning the last four entries will be fixed, but the first three will + * be calculated by the PMS calculator. + */ +static struct phy_config calculated_phy_pll_cfg = { + .pixclk = 0, + .pll_div_regs = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, +}; + +/* The lookup table contains values for which the fractional divder is used */ static const struct phy_config phy_pll_cfg[] = { { .pixclk = 22250000, - .pll_div_regs = { 0x4b, 0xf1, 0x89, 0x88, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4b, 0xf1, 0x89, 0x88, 0x80, 0x40 }, }, { .pixclk = 23750000, - .pll_div_regs = { 0x50, 0xf1, 0x86, 0x85, 0x80, 0x40 }, - }, { - .pixclk = 24000000, - .pll_div_regs = { 0x50, 0xf0, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x50, 0xf1, 0x86, 0x85, 0x80, 0x40 }, }, { .pixclk = 24024000, - .pll_div_regs = { 0x50, 0xf1, 0x99, 0x02, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x50, 0xf1, 0x99, 0x02, 0x80, 0x40 }, }, { .pixclk = 25175000, - .pll_div_regs = { 0x54, 0xfc, 0xcc, 0x91, 0x80, 0x40 }, - }, { - .pixclk = 25200000, - .pll_div_regs = { 0x54, 0xf0, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x54, 0xfc, 0xcc, 0x91, 0x80, 0x40 }, + }, { .pixclk = 26750000, - .pll_div_regs = { 0x5a, 0xf2, 0x89, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 27000000, - .pll_div_regs = { 0x5a, 0xf0, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0xf2, 0x89, 0x88, 0x80, 0x40 }, + }, { .pixclk = 27027000, - .pll_div_regs = { 0x5a, 0xf2, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0xf2, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 29500000, - .pll_div_regs = { 0x62, 0xf4, 0x95, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x62, 0xf4, 0x95, 0x08, 0x80, 0x40 }, }, { .pixclk = 30750000, - .pll_div_regs = { 0x66, 0xf4, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0xf4, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 30888000, - .pll_div_regs = { 0x66, 0xf4, 0x99, 0x18, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0xf4, 0x99, 0x18, 0x88, 0x45 }, }, { .pixclk = 33750000, - .pll_div_regs = { 0x70, 0xf4, 0x82, 0x01, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0xf4, 0x82, 0x01, 0x80, 0x40 }, }, { .pixclk = 35000000, - .pll_div_regs = { 0x58, 0xb8, 0x8b, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 36000000, - .pll_div_regs = { 0x5a, 0xb0, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x58, 0xb8, 0x8b, 0x88, 0x80, 0x40 }, + }, { .pixclk = 36036000, - .pll_div_regs = { 0x5a, 0xb2, 0xfd, 0x0c, 0x80, 0x40 }, - }, { - .pixclk = 40000000, - .pll_div_regs = { 0x64, 0xb0, 0x00, 0x00, 0x80, 0x00 }, - }, { - .pixclk = 43200000, - .pll_div_regs = { 0x5a, 0x90, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x5a, 0xb2, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 43243200, - .pll_div_regs = { 0x5a, 0x92, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x92, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 44500000, - .pll_div_regs = { 0x5c, 0x92, 0x98, 0x11, 0x84, 0x41 }, + .pll_div_regs = { 0xd1, 0x5c, 0x92, 0x98, 0x11, 0x84, 0x41 }, }, { .pixclk = 47000000, - .pll_div_regs = { 0x62, 0x94, 0x95, 0x82, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x62, 0x94, 0x95, 0x82, 0x80, 0x40 }, }, { .pixclk = 47500000, - .pll_div_regs = { 0x63, 0x96, 0xa1, 0x82, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x63, 0x96, 0xa1, 0x82, 0x80, 0x40 }, }, { .pixclk = 50349650, - .pll_div_regs = { 0x54, 0x7c, 0xc3, 0x8f, 0x80, 0x40 }, - }, { - .pixclk = 50400000, - .pll_div_regs = { 0x54, 0x70, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x54, 0x7c, 0xc3, 0x8f, 0x80, 0x40 }, }, { .pixclk = 53250000, - .pll_div_regs = { 0x58, 0x72, 0x84, 0x03, 0x82, 0x41 }, + .pll_div_regs = { 0xd1, 0x58, 0x72, 0x84, 0x03, 0x82, 0x41 }, }, { .pixclk = 53500000, - .pll_div_regs = { 0x5a, 0x72, 0x89, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 54000000, - .pll_div_regs = { 0x5a, 0x70, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0x72, 0x89, 0x88, 0x80, 0x40 }, + }, { .pixclk = 54054000, - .pll_div_regs = { 0x5a, 0x72, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x72, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 59000000, - .pll_div_regs = { 0x62, 0x74, 0x95, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x62, 0x74, 0x95, 0x08, 0x80, 0x40 }, }, { .pixclk = 59340659, - .pll_div_regs = { 0x62, 0x74, 0xdb, 0x52, 0x88, 0x47 }, - }, { - .pixclk = 59400000, - .pll_div_regs = { 0x63, 0x70, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x62, 0x74, 0xdb, 0x52, 0x88, 0x47 }, + }, { .pixclk = 61500000, - .pll_div_regs = { 0x66, 0x74, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0x74, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 63500000, - .pll_div_regs = { 0x69, 0x74, 0x89, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x69, 0x74, 0x89, 0x08, 0x80, 0x40 }, }, { .pixclk = 67500000, - .pll_div_regs = { 0x54, 0x52, 0x87, 0x03, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x54, 0x52, 0x87, 0x03, 0x80, 0x40 }, }, { .pixclk = 70000000, - .pll_div_regs = { 0x58, 0x58, 0x8b, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 72000000, - .pll_div_regs = { 0x5a, 0x50, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x58, 0x58, 0x8b, 0x88, 0x80, 0x40 }, + }, { .pixclk = 72072000, - .pll_div_regs = { 0x5a, 0x52, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x52, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 74176000, - .pll_div_regs = { 0x5d, 0x58, 0xdb, 0xA2, 0x88, 0x41 }, + .pll_div_regs = { 0xd1, 0x5d, 0x58, 0xdb, 0xA2, 0x88, 0x41 }, }, { .pixclk = 74250000, - .pll_div_regs = { 0x5c, 0x52, 0x90, 0x0d, 0x84, 0x41 }, + .pll_div_regs = { 0xd1, 0x5c, 0x52, 0x90, 0x0d, 0x84, 0x41 }, }, { .pixclk = 78500000, - .pll_div_regs = { 0x62, 0x54, 0x87, 0x01, 0x80, 0x40 }, - }, { - .pixclk = 80000000, - .pll_div_regs = { 0x64, 0x50, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x62, 0x54, 0x87, 0x01, 0x80, 0x40 }, + }, { .pixclk = 82000000, - .pll_div_regs = { 0x66, 0x54, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0x54, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 82500000, - .pll_div_regs = { 0x67, 0x54, 0x88, 0x01, 0x90, 0x49 }, + .pll_div_regs = { 0xd1, 0x67, 0x54, 0x88, 0x01, 0x90, 0x49 }, }, { .pixclk = 89000000, - .pll_div_regs = { 0x70, 0x54, 0x84, 0x83, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0x54, 0x84, 0x83, 0x80, 0x40 }, }, { .pixclk = 90000000, - .pll_div_regs = { 0x70, 0x54, 0x82, 0x01, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0x54, 0x82, 0x01, 0x80, 0x40 }, }, { .pixclk = 94000000, - .pll_div_regs = { 0x4e, 0x32, 0xa7, 0x10, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4e, 0x32, 0xa7, 0x10, 0x80, 0x40 }, }, { .pixclk = 95000000, - .pll_div_regs = { 0x50, 0x31, 0x86, 0x85, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x50, 0x31, 0x86, 0x85, 0x80, 0x40 }, }, { .pixclk = 98901099, - .pll_div_regs = { 0x52, 0x3a, 0xdb, 0x4c, 0x88, 0x47 }, + .pll_div_regs = { 0xd1, 0x52, 0x3a, 0xdb, 0x4c, 0x88, 0x47 }, }, { .pixclk = 99000000, - .pll_div_regs = { 0x52, 0x32, 0x82, 0x01, 0x88, 0x47 }, + .pll_div_regs = { 0xd1, 0x52, 0x32, 0x82, 0x01, 0x88, 0x47 }, }, { .pixclk = 100699300, - .pll_div_regs = { 0x54, 0x3c, 0xc3, 0x8f, 0x80, 0x40 }, - }, { - .pixclk = 100800000, - .pll_div_regs = { 0x54, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x54, 0x3c, 0xc3, 0x8f, 0x80, 0x40 }, + }, { .pixclk = 102500000, - .pll_div_regs = { 0x55, 0x32, 0x8c, 0x05, 0x90, 0x4b }, + .pll_div_regs = { 0xd1, 0x55, 0x32, 0x8c, 0x05, 0x90, 0x4b }, }, { .pixclk = 104750000, - .pll_div_regs = { 0x57, 0x32, 0x98, 0x07, 0x90, 0x49 }, + .pll_div_regs = { 0xd1, 0x57, 0x32, 0x98, 0x07, 0x90, 0x49 }, }, { .pixclk = 106500000, - .pll_div_regs = { 0x58, 0x32, 0x84, 0x03, 0x82, 0x41 }, + .pll_div_regs = { 0xd1, 0x58, 0x32, 0x84, 0x03, 0x82, 0x41 }, }, { .pixclk = 107000000, - .pll_div_regs = { 0x5a, 0x32, 0x89, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 108000000, - .pll_div_regs = { 0x5a, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0x32, 0x89, 0x88, 0x80, 0x40 }, + }, { .pixclk = 108108000, - .pll_div_regs = { 0x5a, 0x32, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x32, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 118000000, - .pll_div_regs = { 0x62, 0x34, 0x95, 0x08, 0x80, 0x40 }, - }, { - .pixclk = 118800000, - .pll_div_regs = { 0x63, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x62, 0x34, 0x95, 0x08, 0x80, 0x40 }, + }, { .pixclk = 123000000, - .pll_div_regs = { 0x66, 0x34, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0x34, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 127000000, - .pll_div_regs = { 0x69, 0x34, 0x89, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x69, 0x34, 0x89, 0x08, 0x80, 0x40 }, }, { .pixclk = 135000000, - .pll_div_regs = { 0x70, 0x34, 0x82, 0x01, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0x34, 0x82, 0x01, 0x80, 0x40 }, }, { .pixclk = 135580000, - .pll_div_regs = { 0x71, 0x39, 0xe9, 0x82, 0x9c, 0x5b }, + .pll_div_regs = { 0xd1, 0x71, 0x39, 0xe9, 0x82, 0x9c, 0x5b }, }, { .pixclk = 137520000, - .pll_div_regs = { 0x72, 0x38, 0x99, 0x10, 0x85, 0x41 }, + .pll_div_regs = { 0xd1, 0x72, 0x38, 0x99, 0x10, 0x85, 0x41 }, }, { .pixclk = 138750000, - .pll_div_regs = { 0x73, 0x35, 0x88, 0x05, 0x90, 0x4d }, + .pll_div_regs = { 0xd1, 0x73, 0x35, 0x88, 0x05, 0x90, 0x4d }, }, { .pixclk = 140000000, - .pll_div_regs = { 0x75, 0x36, 0xa7, 0x90, 0x80, 0x40 }, - }, { - .pixclk = 144000000, - .pll_div_regs = { 0x78, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x75, 0x36, 0xa7, 0x90, 0x80, 0x40 }, + }, { .pixclk = 148352000, - .pll_div_regs = { 0x7b, 0x35, 0xdb, 0x39, 0x90, 0x45 }, + .pll_div_regs = { 0xd1, 0x7b, 0x35, 0xdb, 0x39, 0x90, 0x45 }, }, { .pixclk = 148500000, - .pll_div_regs = { 0x7b, 0x35, 0x84, 0x03, 0x90, 0x45 }, + .pll_div_regs = { 0xd1, 0x7b, 0x35, 0x84, 0x03, 0x90, 0x45 }, }, { .pixclk = 154000000, - .pll_div_regs = { 0x40, 0x18, 0x83, 0x01, 0x00, 0x40 }, + .pll_div_regs = { 0xd1, 0x40, 0x18, 0x83, 0x01, 0x00, 0x40 }, }, { .pixclk = 157000000, - .pll_div_regs = { 0x41, 0x11, 0xa7, 0x14, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x41, 0x11, 0xa7, 0x14, 0x80, 0x40 }, }, { .pixclk = 160000000, - .pll_div_regs = { 0x42, 0x12, 0xa1, 0x20, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x42, 0x12, 0xa1, 0x20, 0x80, 0x40 }, }, { .pixclk = 162000000, - .pll_div_regs = { 0x43, 0x18, 0x8b, 0x08, 0x96, 0x55 }, + .pll_div_regs = { 0xd1, 0x43, 0x18, 0x8b, 0x08, 0x96, 0x55 }, }, { .pixclk = 164000000, - .pll_div_regs = { 0x45, 0x11, 0x83, 0x82, 0x90, 0x4b }, + .pll_div_regs = { 0xd1, 0x45, 0x11, 0x83, 0x82, 0x90, 0x4b }, }, { .pixclk = 165000000, - .pll_div_regs = { 0x45, 0x11, 0x84, 0x81, 0x90, 0x4b }, - }, { - .pixclk = 180000000, - .pll_div_regs = { 0x4b, 0x10, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x45, 0x11, 0x84, 0x81, 0x90, 0x4b }, }, { .pixclk = 185625000, - .pll_div_regs = { 0x4e, 0x12, 0x9a, 0x95, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4e, 0x12, 0x9a, 0x95, 0x80, 0x40 }, }, { .pixclk = 188000000, - .pll_div_regs = { 0x4e, 0x12, 0xa7, 0x10, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4e, 0x12, 0xa7, 0x10, 0x80, 0x40 }, }, { .pixclk = 198000000, - .pll_div_regs = { 0x52, 0x12, 0x82, 0x01, 0x88, 0x47 }, + .pll_div_regs = { 0xd1, 0x52, 0x12, 0x82, 0x01, 0x88, 0x47 }, }, { .pixclk = 205000000, - .pll_div_regs = { 0x55, 0x12, 0x8c, 0x05, 0x90, 0x4b }, + .pll_div_regs = { 0xd1, 0x55, 0x12, 0x8c, 0x05, 0x90, 0x4b }, }, { .pixclk = 209500000, - .pll_div_regs = { 0x57, 0x12, 0x98, 0x07, 0x90, 0x49 }, + .pll_div_regs = { 0xd1, 0x57, 0x12, 0x98, 0x07, 0x90, 0x49 }, }, { .pixclk = 213000000, - .pll_div_regs = { 0x58, 0x12, 0x84, 0x03, 0x82, 0x41 }, - }, { - .pixclk = 216000000, - .pll_div_regs = { 0x5a, 0x10, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x58, 0x12, 0x84, 0x03, 0x82, 0x41 }, }, { .pixclk = 216216000, - .pll_div_regs = { 0x5a, 0x12, 0xfd, 0x0c, 0x80, 0x40 }, - }, { - .pixclk = 237600000, - .pll_div_regs = { 0x63, 0x10, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0x12, 0xfd, 0x0c, 0x80, 0x40 }, + }, { .pixclk = 254000000, - .pll_div_regs = { 0x69, 0x14, 0x89, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x69, 0x14, 0x89, 0x08, 0x80, 0x40 }, }, { .pixclk = 277500000, - .pll_div_regs = { 0x73, 0x15, 0x88, 0x05, 0x90, 0x4d }, - }, { - .pixclk = 288000000, - .pll_div_regs = { 0x78, 0x10, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x73, 0x15, 0x88, 0x05, 0x90, 0x4d }, + }, { .pixclk = 297000000, - .pll_div_regs = { 0x7b, 0x15, 0x84, 0x03, 0x90, 0x45 }, + .pll_div_regs = { 0xd1, 0x7b, 0x15, 0x84, 0x03, 0x90, 0x45 }, }, }; @@ -369,29 +282,30 @@ struct reg_settings { }; static const struct reg_settings common_phy_cfg[] = { - { PHY_REG_00, 0x00 }, { PHY_REG_01, 0xd1 }, - { PHY_REG_08, 0x4f }, { PHY_REG_09, 0x30 }, - { PHY_REG_10, 0x33 }, { PHY_REG_11, 0x65 }, + { PHY_REG(0), 0x00 }, + /* PHY_REG(1-7) pix clk specific */ + { PHY_REG(8), 0x4f }, { PHY_REG(9), 0x30 }, + { PHY_REG(10), 0x33 }, { PHY_REG(11), 0x65 }, /* REG12 pixclk specific */ /* REG13 pixclk specific */ /* REG14 pixclk specific */ - { PHY_REG_15, 0x80 }, { PHY_REG_16, 0x6c }, - { PHY_REG_17, 0xf2 }, { PHY_REG_18, 0x67 }, - { PHY_REG_19, 0x00 }, { PHY_REG_20, 0x10 }, + { PHY_REG(15), 0x80 }, { PHY_REG(16), 0x6c }, + { PHY_REG(17), 0xf2 }, { PHY_REG(18), 0x67 }, + { PHY_REG(19), 0x00 }, { PHY_REG(20), 0x10 }, /* REG21 pixclk specific */ - { PHY_REG_22, 0x30 }, { PHY_REG_23, 0x32 }, - { PHY_REG_24, 0x60 }, { PHY_REG_25, 0x8f }, - { PHY_REG_26, 0x00 }, { PHY_REG_27, 0x00 }, - { PHY_REG_28, 0x08 }, { PHY_REG_29, 0x00 }, - { PHY_REG_30, 0x00 }, { PHY_REG_31, 0x00 }, - { PHY_REG_32, 0x00 }, { PHY_REG_33, 0x80 }, - { PHY_REG_34, 0x00 }, { PHY_REG_35, 0x00 }, - { PHY_REG_36, 0x00 }, { PHY_REG_37, 0x00 }, - { PHY_REG_38, 0x00 }, { PHY_REG_39, 0x00 }, - { PHY_REG_40, 0x00 }, { PHY_REG_41, 0xe0 }, - { PHY_REG_42, 0x83 }, { PHY_REG_43, 0x0f }, - { PHY_REG_44, 0x3E }, { PHY_REG_45, 0xf8 }, - { PHY_REG_46, 0x00 }, { PHY_REG_47, 0x00 } + { PHY_REG(22), 0x30 }, { PHY_REG(23), 0x32 }, + { PHY_REG(24), 0x60 }, { PHY_REG(25), 0x8f }, + { PHY_REG(26), 0x00 }, { PHY_REG(27), 0x00 }, + { PHY_REG(28), 0x08 }, { PHY_REG(29), 0x00 }, + { PHY_REG(30), 0x00 }, { PHY_REG(31), 0x00 }, + { PHY_REG(32), 0x00 }, { PHY_REG(33), 0x80 }, + { PHY_REG(34), 0x00 }, { PHY_REG(35), 0x00 }, + { PHY_REG(36), 0x00 }, { PHY_REG(37), 0x00 }, + { PHY_REG(38), 0x00 }, { PHY_REG(39), 0x00 }, + { PHY_REG(40), 0x00 }, { PHY_REG(41), 0xe0 }, + { PHY_REG(42), 0x83 }, { PHY_REG(43), 0x0f }, + { PHY_REG(44), 0x3E }, { PHY_REG(45), 0xf8 }, + { PHY_REG(46), 0x00 }, { PHY_REG(47), 0x00 } }; struct fsl_samsung_hdmi_phy { @@ -412,40 +326,6 @@ to_fsl_samsung_hdmi_phy(struct clk_hw *hw) } static void -fsl_samsung_hdmi_phy_configure_pixclk(struct fsl_samsung_hdmi_phy *phy, - const struct phy_config *cfg) -{ - u8 div = 0x1; - - switch (cfg->pixclk) { - case 22250000 ... 33750000: - div = 0xf; - break; - case 35000000 ... 40000000: - div = 0xb; - break; - case 43200000 ... 47500000: - div = 0x9; - break; - case 50349650 ... 63500000: - div = 0x7; - break; - case 67500000 ... 90000000: - div = 0x5; - break; - case 94000000 ... 148500000: - div = 0x3; - break; - case 154000000 ... 297000000: - div = 0x1; - break; - } - - writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK, div), - phy->regs + PHY_REG_21); -} - -static void fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, const struct phy_config *cfg) { @@ -469,7 +349,7 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, break; } - writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG_12); + writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG(12)); /* * Calculation for the frequency lock detector target code (fld_tg_code) @@ -489,11 +369,88 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, /* FLD_TOL and FLD_RP_CODE taken from downstream driver */ writeb(FIELD_PREP(REG13_TG_CODE_LOW_MASK, fld_tg_code), - phy->regs + PHY_REG_13); + phy->regs + PHY_REG(13)); writeb(FIELD_PREP(REG14_TOL_MASK, 2) | FIELD_PREP(REG14_RP_CODE_MASK, 2) | FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8), - phy->regs + PHY_REG_14); + phy->regs + PHY_REG(14)); +} + +static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u16 *m, u8 *s) +{ + unsigned long best_freq = 0; + u32 min_delta = 0xffffffff; + u8 _p, best_p; + u16 _m, best_m; + u8 _s, best_s; + + /* + * Figure 13-78 of the reference manual states the PLL should be TMDS x 5 + * while the TMDS_CLKO should be the PLL / 5. So to calculate the PLL, + * take the pix clock x 5, then return the value of the PLL / 5. + */ + fout *= 5; + + /* The ref manual states the values of 'P' range from 1 to 11 */ + for (_p = 1; _p <= 11; ++_p) { + for (_s = 1; _s <= 16; ++_s) { + u64 tmp; + u32 delta; + + /* s must be one or even */ + if (_s > 1 && (_s & 0x01) == 1) + _s++; + + /* _s cannot be 14 per the TRM */ + if (_s == 14) + continue; + + /* + * TODO: Ref Manual doesn't state the range of _m + * so this should be further refined if possible. + * This range was set based on the original values + * in the lookup table + */ + tmp = (u64)fout * (_p * _s); + do_div(tmp, 24 * MHZ); + _m = tmp; + if (_m < 0x30 || _m > 0x7b) + continue; + + /* + * Rev 2 of the Ref Manual states the + * VCO can range between 750MHz and + * 3GHz. The VCO is assumed to be + * Fvco = (M * f_ref) / P, + * where f_ref is 24MHz. + */ + tmp = (u64)_m * 24 * MHZ; + do_div(tmp, _p); + if (tmp < 750 * MHZ || + tmp > 3000 * MHZ) + continue; + + /* Final frequency after post-divider */ + do_div(tmp, _s); + + delta = abs(fout - tmp); + if (delta < min_delta) { + best_p = _p; + best_s = _s; + best_m = _m; + min_delta = delta; + best_freq = tmp; + } + } + } + + if (best_freq) { + *p = best_p; + *m = best_m; + *s = best_s; + } + + return best_freq / 5; } static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy, @@ -503,22 +460,25 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy, u8 val; /* HDMI PHY init */ - writeb(REG33_FIX_DA, phy->regs + PHY_REG_33); + writeb(REG33_FIX_DA, phy->regs + PHY_REG(33)); /* common PHY registers */ for (i = 0; i < ARRAY_SIZE(common_phy_cfg); i++) writeb(common_phy_cfg[i].val, phy->regs + common_phy_cfg[i].reg); - /* set individual PLL registers PHY_REG2 ... PHY_REG7 */ + /* set individual PLL registers PHY_REG1 ... PHY_REG7 */ for (i = 0; i < PHY_PLL_DIV_REGS_NUM; i++) - writeb(cfg->pll_div_regs[i], phy->regs + PHY_REG_02 + i * 4); + writeb(cfg->pll_div_regs[i], phy->regs + PHY_REG(1) + i * 4); + + /* High nibble of PHY_REG3 and low nibble of PHY_REG21 both contain 'S' */ + writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK, + cfg->pll_div_regs[2] >> 4), phy->regs + PHY_REG(21)); - fsl_samsung_hdmi_phy_configure_pixclk(phy, cfg); fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg); - writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG_33); + writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG(33)); - ret = readb_poll_timeout(phy->regs + PHY_REG_34, val, + ret = readb_poll_timeout(phy->regs + PHY_REG(34), val, val & REG34_PLL_LOCK, 50, 20000); if (ret) dev_err(phy->dev, "PLL failed to lock\n"); @@ -537,34 +497,120 @@ static unsigned long phy_clk_recalc_rate(struct clk_hw *hw, return phy->cur_cfg->pixclk; } -static long phy_clk_round_rate(struct clk_hw *hw, - unsigned long rate, unsigned long *parent_rate) +/* Helper function to lookup the available fractional-divider rate */ +static const struct phy_config *fsl_samsung_hdmi_phy_lookup_rate(unsigned long rate) { int i; + /* Search the lookup table */ for (i = ARRAY_SIZE(phy_pll_cfg) - 1; i >= 0; i--) if (phy_pll_cfg[i].pixclk <= rate) - return phy_pll_cfg[i].pixclk; + break; - return -EINVAL; + return &phy_pll_cfg[i]; +} + +static void fsl_samsung_hdmi_calculate_phy(struct phy_config *cal_phy, unsigned long rate, + u8 p, u16 m, u8 s) +{ + cal_phy->pixclk = rate; + cal_phy->pll_div_regs[0] = FIELD_PREP(REG01_PMS_P_MASK, p); + cal_phy->pll_div_regs[1] = m; + cal_phy->pll_div_regs[2] = FIELD_PREP(REG03_PMS_S_MASK, s-1); + /* pll_div_regs 3-6 are fixed and pre-defined already */ +} + +static u32 fsl_samsung_hdmi_phy_get_closest_rate(unsigned long rate, + u32 int_div_clk, u32 frac_div_clk) +{ + /* Calculate the absolute value of the differences and return whichever is closest */ + if (abs((long)rate - (long)int_div_clk) < abs((long)(rate - (long)frac_div_clk))) + return int_div_clk; + + return frac_div_clk; +} + +static long phy_clk_round_rate(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate) +{ + const struct phy_config *fract_div_phy; + u32 int_div_clk; + u16 m; + u8 p, s; + + /* If the clock is out of range return error instead of searching */ + if (rate > 297000000 || rate < 22250000) + return -EINVAL; + + /* Search the fractional divider lookup table */ + fract_div_phy = fsl_samsung_hdmi_phy_lookup_rate(rate); + + /* If the rate is an exact match, return that value */ + if (rate == fract_div_phy->pixclk) + return fract_div_phy->pixclk; + + /* If the exact match isn't found, calculate the integer divider */ + int_div_clk = fsl_samsung_hdmi_phy_find_pms(rate, &p, &m, &s); + + /* If the int_div_clk rate is an exact match, return that value */ + if (int_div_clk == rate) + return int_div_clk; + + /* If neither rate is an exact match, use the value from the LUT */ + return fract_div_phy->pixclk; +} + +static int phy_use_fract_div(struct fsl_samsung_hdmi_phy *phy, const struct phy_config *fract_div_phy) +{ + phy->cur_cfg = fract_div_phy; + dev_dbg(phy->dev, "fsl_samsung_hdmi_phy: using fractional divider rate = %u\n", + phy->cur_cfg->pixclk); + return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg); +} + +static int phy_use_integer_div(struct fsl_samsung_hdmi_phy *phy, + const struct phy_config *int_div_clk) +{ + phy->cur_cfg = &calculated_phy_pll_cfg; + dev_dbg(phy->dev, "fsl_samsung_hdmi_phy: integer divider rate = %u\n", + phy->cur_cfg->pixclk); + return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg); } static int phy_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct fsl_samsung_hdmi_phy *phy = to_fsl_samsung_hdmi_phy(hw); - int i; + const struct phy_config *fract_div_phy; + u32 int_div_clk; + u16 m; + u8 p, s; - for (i = ARRAY_SIZE(phy_pll_cfg) - 1; i >= 0; i--) - if (phy_pll_cfg[i].pixclk <= rate) - break; + /* Search the fractional divider lookup table */ + fract_div_phy = fsl_samsung_hdmi_phy_lookup_rate(rate); - if (i < 0) - return -EINVAL; + /* If the rate is an exact match, use that value */ + if (fract_div_phy->pixclk == rate) + return phy_use_fract_div(phy, fract_div_phy); - phy->cur_cfg = &phy_pll_cfg[i]; + /* + * If the rate from the fractional divider is not exact, check the integer divider, + * and use it if that value is an exact match. + */ + int_div_clk = fsl_samsung_hdmi_phy_find_pms(rate, &p, &m, &s); + fsl_samsung_hdmi_calculate_phy(&calculated_phy_pll_cfg, int_div_clk, p, m, s); + if (int_div_clk == rate) + return phy_use_integer_div(phy, &calculated_phy_pll_cfg); - return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg); + /* + * Compare the difference between the integer clock and the fractional clock against + * the desired clock and which whichever is closest. + */ + if (fsl_samsung_hdmi_phy_get_closest_rate(rate, int_div_clk, + fract_div_phy->pixclk) == fract_div_phy->pixclk) + return phy_use_fract_div(phy, fract_div_phy); + else + return phy_use_integer_div(phy, &calculated_phy_pll_cfg); } static const struct clk_ops phy_clk_ops = { @@ -703,8 +749,8 @@ static const struct of_device_id fsl_samsung_hdmi_phy_of_match[] = { MODULE_DEVICE_TABLE(of, fsl_samsung_hdmi_phy_of_match); static struct platform_driver fsl_samsung_hdmi_phy_driver = { - .probe = fsl_samsung_hdmi_phy_probe, - .remove_new = fsl_samsung_hdmi_phy_remove, + .probe = fsl_samsung_hdmi_phy_probe, + .remove = fsl_samsung_hdmi_phy_remove, .driver = { .name = "fsl-samsung-hdmi-phy", .of_match_table = fsl_samsung_hdmi_phy_of_match, diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c index f8e3054a9e59..9ee3cf61cdd0 100644 --- a/drivers/phy/intel/phy-intel-lgm-combo.c +++ b/drivers/phy/intel/phy-intel-lgm-combo.c @@ -605,7 +605,7 @@ static const struct of_device_id of_intel_cbphy_match[] = { static struct platform_driver intel_cbphy_driver = { .probe = intel_cbphy_probe, - .remove_new = intel_cbphy_remove, + .remove = intel_cbphy_remove, .driver = { .name = "intel-combo-phy", .of_match_table = of_intel_cbphy_match, diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c index 4922a5f3327d..59903f86b13f 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c @@ -62,6 +62,8 @@ #define SQ_AMP_CAL_MASK GENMASK(2, 0) #define SQ_AMP_CAL_VAL 1 #define SQ_AMP_CAL_EN BIT(3) +#define UTMI_DIG_CTRL1_REG 0x20 +#define SWAP_DPDM BIT(15) #define UTMI_CTRL_STATUS0_REG 0x24 #define SUSPENDM BIT(22) #define TEST_SEL BIT(25) @@ -99,11 +101,13 @@ struct mvebu_cp110_utmi { * @priv: PHY driver data * @id: PHY port ID * @dr_mode: PHY connection: USB_DR_MODE_HOST or USB_DR_MODE_PERIPHERAL + * @swap_dx: whether to swap d+/d- signals */ struct mvebu_cp110_utmi_port { struct mvebu_cp110_utmi *priv; u32 id; enum usb_dr_mode dr_mode; + bool swap_dx; }; static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port) @@ -159,6 +163,13 @@ static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port) reg &= ~(VDAT_MASK | VSRC_MASK); reg |= (VDAT_VAL << VDAT_OFFSET) | (VSRC_VAL << VSRC_OFFSET); writel(reg, PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG); + + /* Swap D+/D- */ + reg = readl(PORT_REGS(port) + UTMI_DIG_CTRL1_REG); + reg &= ~(SWAP_DPDM); + if (port->swap_dx) + reg |= SWAP_DPDM; + writel(reg, PORT_REGS(port) + UTMI_DIG_CTRL1_REG); } static int mvebu_cp110_utmi_phy_power_off(struct phy *phy) @@ -286,6 +297,7 @@ static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev) struct phy_provider *provider; struct device_node *child; u32 usb_devices = 0; + u32 swap_dx = 0; utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL); if (!utmi) @@ -345,6 +357,10 @@ static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev) } } + of_property_for_each_u32(dev->of_node, "swap-dx-lanes", swap_dx) + if (swap_dx == port_id) + port->swap_dx = 1; + /* Retrieve PHY capabilities */ utmi->ops = &mvebu_cp110_utmi_phy_ops; diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c index 7cb85029fab3..320cf5b50a8c 100644 --- a/drivers/phy/microchip/sparx5_serdes.c +++ b/drivers/phy/microchip/sparx5_serdes.c @@ -21,23 +21,33 @@ #include "sparx5_serdes.h" -#define SPX5_CMU_MAX 14 - #define SPX5_SERDES_10G_START 13 #define SPX5_SERDES_25G_START 25 #define SPX5_SERDES_6G10G_CNT SPX5_SERDES_25G_START +#define LAN969X_SERDES_10G_CNT 10 + /* Optimal power settings from GUC */ #define SPX5_SERDES_QUIET_MODE_VAL 0x01ef4e0c -enum sparx5_10g28cmu_mode { - SPX5_SD10G28_CMU_MAIN = 0, - SPX5_SD10G28_CMU_AUX1 = 1, - SPX5_SD10G28_CMU_AUX2 = 3, - SPX5_SD10G28_CMU_NONE = 4, - SPX5_SD10G28_CMU_MAX, +/* Register target sizes */ +const unsigned int sparx5_serdes_tsize[TSIZE_LAST] = { + [TC_SD10G_LANE] = 12, + [TC_SD_CMU] = 14, + [TC_SD_CMU_CFG] = 14, + [TC_SD_LANE] = 25, +}; + +const unsigned int lan969x_serdes_tsize[TSIZE_LAST] = { + [TC_SD10G_LANE] = 10, + [TC_SD_CMU] = 6, + [TC_SD_CMU_CFG] = 6, + [TC_SD_LANE] = 10, }; +/* Pointer to the register target size table */ +const unsigned int *tsize; + enum sparx5_sd25g28_mode_preset_type { SPX5_SD25G28_MODE_PRESET_25000, SPX5_SD25G28_MODE_PRESET_10000, @@ -1095,13 +1105,31 @@ static int sparx5_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index) return sparx5_serdes_cmu_map[mode][sd_index]; } +/* Map of 6G/10G serdes mode and index to CMU index. */ +static const int +lan969x_serdes_cmu_map[SPX5_SD10G28_CMU_MAX][LAN969X_SERDES_10G_CNT] = { + [SPX5_SD10G28_CMU_MAIN] = { 2, 2, 2, 2, 2, + 2, 2, 2, 5, 5 }, + [SPX5_SD10G28_CMU_AUX1] = { 0, 0, 3, 3, 3, + 3, 3, 3, 3, 3 }, + [SPX5_SD10G28_CMU_AUX2] = { 1, 1, 1, 1, 4, + 4, 4, 4, 4, 4 }, + [SPX5_SD10G28_CMU_NONE] = { 1, 1, 1, 1, 4, + 4, 4, 4, 4, 4 }, +}; + +static int lan969x_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index) +{ + return lan969x_serdes_cmu_map[mode][sd_index]; +} + static void sparx5_serdes_cmu_power_off(struct sparx5_serdes_private *priv) { void __iomem *cmu_inst, *cmu_cfg_inst; int i; /* Power down each CMU */ - for (i = 0; i < SPX5_CMU_MAX; i++) { + for (i = 0; i < priv->data->consts.cmu_max; i++) { cmu_inst = sdx5_inst_get(priv, TARGET_SD_CMU, i); cmu_cfg_inst = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, i); @@ -1650,7 +1678,7 @@ static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro, if (params->skip_cmu_cfg) return 0; - cmu_idx = sparx5_serdes_cmu_get(params->cmu_sel, lane_index); + cmu_idx = priv->data->ops.serdes_cmu_get(params->cmu_sel, macro->sidx); err = sparx5_cmu_cfg(priv, cmu_idx); if (err) return err; @@ -2183,6 +2211,10 @@ static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro) { struct sparx5_serdes_private *priv = macro->priv; + /* Clock is auto-detected in 100Base-FX mode on lan969x */ + if (priv->data->type == SPX5_TARGET_LAN969X) + return 0; + if (macro->serdesmode == SPX5_SD_MODE_100FX) { u32 freq = priv->coreclock == 250000000 ? 2 : priv->coreclock == 500000000 ? 1 : 0; @@ -2297,10 +2329,12 @@ static int sparx5_serdes_set_speed(struct phy *phy, int speed) { struct sparx5_serdes_macro *macro = phy_get_drvdata(phy); - if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000) - return -EINVAL; - if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000) - return -EINVAL; + if (macro->priv->data->type == SPX5_TARGET_SPARX5) { + if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000) + return -EINVAL; + if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000) + return -EINVAL; + } if (speed != macro->speed) { macro->speed = speed; if (macro->serdesmode != SPX5_SD_MODE_NONE) @@ -2337,11 +2371,14 @@ static int sparx5_serdes_validate(struct phy *phy, enum phy_mode mode, if (macro->speed == 0) return -EINVAL; - if (macro->sidx < SPX5_SERDES_10G_START && macro->speed > SPEED_5000) - return -EINVAL; - if (macro->sidx < SPX5_SERDES_25G_START && macro->speed > SPEED_10000) - return -EINVAL; - + if (macro->priv->data->type == SPX5_TARGET_SPARX5) { + if (macro->sidx < SPX5_SERDES_10G_START && + macro->speed > SPEED_5000) + return -EINVAL; + if (macro->sidx < SPX5_SERDES_25G_START && + macro->speed > SPEED_10000) + return -EINVAL; + } switch (submode) { case PHY_INTERFACE_MODE_1000BASEX: if (macro->speed != SPEED_100 && /* This is for 100BASE-FX */ @@ -2375,6 +2412,26 @@ static const struct phy_ops sparx5_serdes_ops = { .owner = THIS_MODULE, }; +static void sparx5_serdes_type_set(struct sparx5_serdes_macro *macro, int sidx) +{ + if (sidx < SPX5_SERDES_10G_START) { + macro->serdestype = SPX5_SDT_6G; + macro->stpidx = macro->sidx; + } else if (sidx < SPX5_SERDES_25G_START) { + macro->serdestype = SPX5_SDT_10G; + macro->stpidx = macro->sidx - SPX5_SERDES_10G_START; + } else { + macro->serdestype = SPX5_SDT_25G; + macro->stpidx = macro->sidx - SPX5_SERDES_25G_START; + } +} + +static void lan969x_serdes_type_set(struct sparx5_serdes_macro *macro, int sidx) +{ + macro->serdestype = SPX5_SDT_10G; + macro->stpidx = macro->sidx; +} + static int sparx5_phy_create(struct sparx5_serdes_private *priv, int idx, struct phy **phy) { @@ -2391,16 +2448,8 @@ static int sparx5_phy_create(struct sparx5_serdes_private *priv, macro->sidx = idx; macro->priv = priv; macro->speed = SPEED_UNKNOWN; - if (idx < SPX5_SERDES_10G_START) { - macro->serdestype = SPX5_SDT_6G; - macro->stpidx = macro->sidx; - } else if (idx < SPX5_SERDES_25G_START) { - macro->serdestype = SPX5_SDT_10G; - macro->stpidx = macro->sidx - SPX5_SERDES_10G_START; - } else { - macro->serdestype = SPX5_SDT_25G; - macro->stpidx = macro->sidx - SPX5_SERDES_25G_START; - } + + priv->data->ops.serdes_type_set(macro, idx); phy_set_drvdata(*phy, macro); @@ -2507,6 +2556,71 @@ static struct sparx5_serdes_io_resource sparx5_serdes_iomap[] = { { TARGET_SD_LANE_25G + 7, 0x5c8000 }, /* 0x610dd0000: sd_lane_25g_32 */ }; +static const struct sparx5_serdes_io_resource lan969x_serdes_iomap[] = { + { TARGET_SD_CMU, 0x0 }, /* 0xe3410000 */ + { TARGET_SD_CMU + 1, 0x8000 }, /* 0xe3418000 */ + { TARGET_SD_CMU + 2, 0x10000 }, /* 0xe3420000 */ + { TARGET_SD_CMU + 3, 0x18000 }, /* 0xe3428000 */ + { TARGET_SD_CMU + 4, 0x20000 }, /* 0xe3430000 */ + { TARGET_SD_CMU + 5, 0x28000 }, /* 0xe3438000 */ + { TARGET_SD_CMU_CFG, 0x30000 }, /* 0xe3440000 */ + { TARGET_SD_CMU_CFG + 1, 0x38000 }, /* 0xe3448000 */ + { TARGET_SD_CMU_CFG + 2, 0x40000 }, /* 0xe3450000 */ + { TARGET_SD_CMU_CFG + 3, 0x48000 }, /* 0xe3458000 */ + { TARGET_SD_CMU_CFG + 4, 0x50000 }, /* 0xe3460000 */ + { TARGET_SD_CMU_CFG + 5, 0x58000 }, /* 0xe3468000 */ + { TARGET_SD10G_LANE, 0x60000 }, /* 0xe3470000 */ + { TARGET_SD10G_LANE + 1, 0x68000 }, /* 0xe3478000 */ + { TARGET_SD10G_LANE + 2, 0x70000 }, /* 0xe3480000 */ + { TARGET_SD10G_LANE + 3, 0x78000 }, /* 0xe3488000 */ + { TARGET_SD10G_LANE + 4, 0x80000 }, /* 0xe3490000 */ + { TARGET_SD10G_LANE + 5, 0x88000 }, /* 0xe3498000 */ + { TARGET_SD10G_LANE + 6, 0x90000 }, /* 0xe34a0000 */ + { TARGET_SD10G_LANE + 7, 0x98000 }, /* 0xe34a8000 */ + { TARGET_SD10G_LANE + 8, 0xa0000 }, /* 0xe34b0000 */ + { TARGET_SD10G_LANE + 9, 0xa8000 }, /* 0xe34b8000 */ + { TARGET_SD_LANE, 0x100000 }, /* 0xe3510000 */ + { TARGET_SD_LANE + 1, 0x108000 }, /* 0xe3518000 */ + { TARGET_SD_LANE + 2, 0x110000 }, /* 0xe3520000 */ + { TARGET_SD_LANE + 3, 0x118000 }, /* 0xe3528000 */ + { TARGET_SD_LANE + 4, 0x120000 }, /* 0xe3530000 */ + { TARGET_SD_LANE + 5, 0x128000 }, /* 0xe3538000 */ + { TARGET_SD_LANE + 6, 0x130000 }, /* 0xe3540000 */ + { TARGET_SD_LANE + 7, 0x138000 }, /* 0xe3548000 */ + { TARGET_SD_LANE + 8, 0x140000 }, /* 0xe3550000 */ + { TARGET_SD_LANE + 9, 0x148000 }, /* 0xe3558000 */ +}; + +static const struct sparx5_serdes_match_data sparx5_desc = { + .type = SPX5_TARGET_SPARX5, + .iomap = sparx5_serdes_iomap, + .iomap_size = ARRAY_SIZE(sparx5_serdes_iomap), + .tsize = sparx5_serdes_tsize, + .consts = { + .sd_max = 33, + .cmu_max = 14, + }, + .ops = { + .serdes_type_set = &sparx5_serdes_type_set, + .serdes_cmu_get = &sparx5_serdes_cmu_get, + }, +}; + +static const struct sparx5_serdes_match_data lan969x_desc = { + .type = SPX5_TARGET_LAN969X, + .iomap = lan969x_serdes_iomap, + .iomap_size = ARRAY_SIZE(lan969x_serdes_iomap), + .tsize = lan969x_serdes_tsize, + .consts = { + .sd_max = 10, + .cmu_max = 6, + }, + .ops = { + .serdes_type_set = &lan969x_serdes_type_set, + .serdes_cmu_get = &lan969x_serdes_cmu_get, + } +}; + /* Client lookup function, uses serdes index */ static struct phy *sparx5_serdes_xlate(struct device *dev, const struct of_phandle_args *args) @@ -2521,7 +2635,7 @@ static struct phy *sparx5_serdes_xlate(struct device *dev, sidx = args->args[0]; /* Check validity: ERR_PTR(-ENODEV) if not valid */ - for (idx = 0; idx < SPX5_SERDES_MAX; idx++) { + for (idx = 0; idx < priv->data->consts.sd_max; idx++) { struct sparx5_serdes_macro *macro = phy_get_drvdata(priv->phys[idx]); @@ -2555,6 +2669,12 @@ static int sparx5_serdes_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); priv->dev = &pdev->dev; + priv->data = device_get_match_data(priv->dev); + if (!priv->data) + return -EINVAL; + + tsize = priv->data->tsize; + /* Get coreclock */ clk = devm_clk_get(priv->dev, NULL); if (IS_ERR(clk)) { @@ -2579,19 +2699,21 @@ static int sparx5_serdes_probe(struct platform_device *pdev) iores->name); return -ENOMEM; } - for (idx = 0; idx < ARRAY_SIZE(sparx5_serdes_iomap); idx++) { - struct sparx5_serdes_io_resource *iomap = &sparx5_serdes_iomap[idx]; + for (idx = 0; idx < priv->data->iomap_size; idx++) { + const struct sparx5_serdes_io_resource *iomap = + &priv->data->iomap[idx]; priv->regs[iomap->id] = iomem + iomap->offset; } - for (idx = 0; idx < SPX5_SERDES_MAX; idx++) { + for (idx = 0; idx < priv->data->consts.sd_max; idx++) { err = sparx5_phy_create(priv, idx, &priv->phys[idx]); if (err) return err; } - /* Power down all CMUs by default */ - sparx5_serdes_cmu_power_off(priv); + /* Power down all CMU's by default */ + if (priv->data->type == SPX5_TARGET_SPARX5) + sparx5_serdes_cmu_power_off(priv); provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate); @@ -2599,7 +2721,8 @@ static int sparx5_serdes_probe(struct platform_device *pdev) } static const struct of_device_id sparx5_serdes_match[] = { - { .compatible = "microchip,sparx5-serdes" }, + { .compatible = "microchip,sparx5-serdes", .data = &sparx5_desc }, + { .compatible = "microchip,lan9691-serdes", .data = &lan969x_desc }, { } }; MODULE_DEVICE_TABLE(of, sparx5_serdes_match); diff --git a/drivers/phy/microchip/sparx5_serdes.h b/drivers/phy/microchip/sparx5_serdes.h index 13f94a29225a..d7093d0b09c0 100644 --- a/drivers/phy/microchip/sparx5_serdes.h +++ b/drivers/phy/microchip/sparx5_serdes.h @@ -26,11 +26,18 @@ enum sparx5_serdes_mode { SPX5_SD_MODE_SFI, }; -struct sparx5_serdes_private { - struct device *dev; - void __iomem *regs[NUM_TARGETS]; - struct phy *phys[SPX5_SERDES_MAX]; - unsigned long coreclock; +enum sparx5_10g28cmu_mode { + SPX5_SD10G28_CMU_MAIN = 0, + SPX5_SD10G28_CMU_AUX1 = 1, + SPX5_SD10G28_CMU_AUX2 = 3, + SPX5_SD10G28_CMU_NONE = 4, + SPX5_SD10G28_CMU_MAX, +}; + +enum sparx5_target { + SPX5_TARGET_SPARX5, + SPX5_TARGET_LAN969X, + }; struct sparx5_serdes_macro { @@ -44,6 +51,33 @@ struct sparx5_serdes_macro { enum phy_media media; }; +struct sparx5_serdes_consts { + int sd_max; + int cmu_max; +}; + +struct sparx5_serdes_ops { + void (*serdes_type_set)(struct sparx5_serdes_macro *macro, int sidx); + int (*serdes_cmu_get)(enum sparx5_10g28cmu_mode mode, int sd_index); +}; + +struct sparx5_serdes_match_data { + enum sparx5_target type; + const struct sparx5_serdes_consts consts; + const struct sparx5_serdes_ops ops; + const struct sparx5_serdes_io_resource *iomap; + int iomap_size; + const unsigned int *tsize; +}; + +struct sparx5_serdes_private { + struct device *dev; + void __iomem *regs[NUM_TARGETS]; + struct phy *phys[SPX5_SERDES_MAX]; + unsigned long coreclock; + const struct sparx5_serdes_match_data *data; +}; + /* Read, Write and modify registers content. * The register definition macros start at the id */ diff --git a/drivers/phy/microchip/sparx5_serdes_regs.h b/drivers/phy/microchip/sparx5_serdes_regs.h index d0543fd3dc94..11c4fdc593fa 100644 --- a/drivers/phy/microchip/sparx5_serdes_regs.h +++ b/drivers/phy/microchip/sparx5_serdes_regs.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0+ * Microchip Sparx5 SerDes driver * - * Copyright (c) 2020 Microchip Technology Inc. + * Copyright (c) 2024 Microchip Technology Inc. */ -/* This file is autogenerated by cml-utils 2020-11-16 13:11:27 +0100. - * Commit ID: 13bdf073131d8bf40c54901df6988ae4e9c8f29f +/* This file is autogenerated by cml-utils 2023-04-13 15:02:00 +0200. + * Commit ID: 5ac560288d46048f872ecdb8add53717f1efc0e1 */ #ifndef _SPARX5_SERDES_REGS_H_ @@ -26,10 +26,25 @@ enum sparx5_serdes_target { NUM_TARGETS = 332 }; +enum sparx5_serdes_tsize_enum { + TC_SD10G_LANE, + TC_SD_CMU, + TC_SD_CMU_CFG, + TC_SD_LANE, + TSIZE_LAST, +}; + +/* sparx5_serdes.c */ +extern const unsigned int *tsize; + +#define TSIZE(o) tsize[o] + #define __REG(...) __VA_ARGS__ -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_01 */ -#define SD10G_LANE_LANE_01(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 4, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_01 */ +#define SD10G_LANE_LANE_01(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 4, 0, \ + 1, 4) #define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\ @@ -49,8 +64,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_01_CFG_RXDET_STR_GET(x)\ FIELD_GET(SD10G_LANE_LANE_01_CFG_RXDET_STR, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_02 */ -#define SD10G_LANE_LANE_02(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 8, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_02 */ +#define SD10G_LANE_LANE_02(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 8, 0, \ + 1, 4) #define SD10G_LANE_LANE_02_CFG_EN_ADV BIT(0) #define SD10G_LANE_LANE_02_CFG_EN_ADV_SET(x)\ @@ -82,8 +99,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_03 */ -#define SD10G_LANE_LANE_03(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 12, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_03 */ +#define SD10G_LANE_LANE_03(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 12, 0, \ + 1, 4) #define SD10G_LANE_LANE_03_CFG_TAP_MAIN BIT(0) #define SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(x)\ @@ -91,8 +110,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_03_CFG_TAP_MAIN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_03_CFG_TAP_MAIN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_04 */ -#define SD10G_LANE_LANE_04(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 16, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_04 */ +#define SD10G_LANE_LANE_04(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 16, 0, \ + 1, 4) #define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0 GENMASK(4, 0) #define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET(x)\ @@ -100,8 +121,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_06 */ -#define SD10G_LANE_LANE_06(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 24, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_06 */ +#define SD10G_LANE_LANE_06(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 24, 0, \ + 1, 4) #define SD10G_LANE_LANE_06_CFG_PD_DRIVER BIT(0) #define SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(x)\ @@ -139,8 +162,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_06_CFG_EN_PREEMPH_GET(x)\ FIELD_GET(SD10G_LANE_LANE_06_CFG_EN_PREEMPH, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0B */ -#define SD10G_LANE_LANE_0B(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 44, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0B */ +#define SD10G_LANE_LANE_0B(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 44, 0, \ + 1, 4) #define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET(x)\ @@ -172,8 +197,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0C */ -#define SD10G_LANE_LANE_0C(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 48, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0C */ +#define SD10G_LANE_LANE_0C(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 48, 0, \ + 1, 4) #define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE BIT(0) #define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET(x)\ @@ -223,8 +250,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0D */ -#define SD10G_LANE_LANE_0D(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 52, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0D */ +#define SD10G_LANE_LANE_0D(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 52, 0, \ + 1, 4) #define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0_SET(x)\ @@ -238,8 +267,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0D_CFG_EQR_BYP_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0D_CFG_EQR_BYP, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0E */ -#define SD10G_LANE_LANE_0E(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 56, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0E */ +#define SD10G_LANE_LANE_0E(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 56, 0, \ + 1, 4) #define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET(x)\ @@ -265,8 +296,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0F */ -#define SD10G_LANE_LANE_0F(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 60, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0F */ +#define SD10G_LANE_LANE_0F(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 60, 0, \ + 1, 4) #define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET(x)\ @@ -274,8 +307,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_13 */ -#define SD10G_LANE_LANE_13(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 76, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_13 */ +#define SD10G_LANE_LANE_13(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 76, 0, \ + 1, 4) #define SD10G_LANE_LANE_13_CFG_DCDR_PD BIT(0) #define SD10G_LANE_LANE_13_CFG_DCDR_PD_SET(x)\ @@ -295,8 +330,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_13_CFG_CDRCK_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_13_CFG_CDRCK_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_14 */ -#define SD10G_LANE_LANE_14(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 80, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_14 */ +#define SD10G_LANE_LANE_14(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 80, 0, \ + 1, 4) #define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET(x)\ @@ -304,8 +341,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_15 */ -#define SD10G_LANE_LANE_15(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 84, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_15 */ +#define SD10G_LANE_LANE_15(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 84, 0, \ + 1, 4) #define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8 GENMASK(7, 0) #define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET(x)\ @@ -313,8 +352,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_GET(x)\ FIELD_GET(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_16 */ -#define SD10G_LANE_LANE_16(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 88, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_16 */ +#define SD10G_LANE_LANE_16(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 88, 0, \ + 1, 4) #define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16 GENMASK(7, 0) #define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET(x)\ @@ -322,8 +363,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_GET(x)\ FIELD_GET(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_1A */ -#define SD10G_LANE_LANE_1A(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 104, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_1A */ +#define SD10G_LANE_LANE_1A(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 104, 0,\ + 1, 4) #define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN BIT(0) #define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN_SET(x)\ @@ -355,8 +398,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_22 */ -#define SD10G_LANE_LANE_22(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 136, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_22 */ +#define SD10G_LANE_LANE_22(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 136, 0,\ + 1, 4) #define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1 GENMASK(4, 0) #define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET(x)\ @@ -364,8 +409,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_GET(x)\ FIELD_GET(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_23 */ -#define SD10G_LANE_LANE_23(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 140, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_23 */ +#define SD10G_LANE_LANE_23(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 140, 0,\ + 1, 4) #define SD10G_LANE_LANE_23_CFG_DFE_PD BIT(0) #define SD10G_LANE_LANE_23_CFG_DFE_PD_SET(x)\ @@ -397,8 +444,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_24 */ -#define SD10G_LANE_LANE_24(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 144, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_24 */ +#define SD10G_LANE_LANE_24(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 144, 0,\ + 1, 4) #define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET(x)\ @@ -412,8 +461,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_26 */ -#define SD10G_LANE_LANE_26(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 152, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_26 */ +#define SD10G_LANE_LANE_26(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 152, 0,\ + 1, 4) #define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET(x)\ @@ -421,8 +472,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_2F */ -#define SD10G_LANE_LANE_2F(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 188, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_2F */ +#define SD10G_LANE_LANE_2F(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 188, 0,\ + 1, 4) #define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET(x)\ @@ -436,8 +489,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_30 */ -#define SD10G_LANE_LANE_30(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 192, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_30 */ +#define SD10G_LANE_LANE_30(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 192, 0,\ + 1, 4) #define SD10G_LANE_LANE_30_CFG_SUMMER_EN BIT(0) #define SD10G_LANE_LANE_30_CFG_SUMMER_EN_SET(x)\ @@ -451,8 +506,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_31 */ -#define SD10G_LANE_LANE_31(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 196, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_31 */ +#define SD10G_LANE_LANE_31(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 196, 0,\ + 1, 4) #define SD10G_LANE_LANE_31_CFG_PI_RSTN BIT(0) #define SD10G_LANE_LANE_31_CFG_PI_RSTN_SET(x)\ @@ -490,8 +547,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_31_CFG_R50_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_31_CFG_R50_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_32 */ -#define SD10G_LANE_LANE_32(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 200, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_32 */ +#define SD10G_LANE_LANE_32(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 200, 0,\ + 1, 4) #define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0_SET(x)\ @@ -505,8 +564,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_33 */ -#define SD10G_LANE_LANE_33(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 204, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_33 */ +#define SD10G_LANE_LANE_33(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 204, 0,\ + 1, 4) #define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\ @@ -520,8 +581,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_35 */ -#define SD10G_LANE_LANE_35(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 212, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_35 */ +#define SD10G_LANE_LANE_35(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 212, 0,\ + 1, 4) #define SD10G_LANE_LANE_35_CFG_TXRATE_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET(x)\ @@ -535,8 +598,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_35_CFG_RXRATE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_35_CFG_RXRATE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_36 */ -#define SD10G_LANE_LANE_36(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 216, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_36 */ +#define SD10G_LANE_LANE_36(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 216, 0,\ + 1, 4) #define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET(x)\ @@ -568,8 +633,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_36_CFG_PRBS_SETB_GET(x)\ FIELD_GET(SD10G_LANE_LANE_36_CFG_PRBS_SETB, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_37 */ -#define SD10G_LANE_LANE_37(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 220, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_37 */ +#define SD10G_LANE_LANE_37(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 220, 0,\ + 1, 4) #define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD BIT(0) #define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD_SET(x)\ @@ -595,8 +662,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_39 */ -#define SD10G_LANE_LANE_39(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 228, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_39 */ +#define SD10G_LANE_LANE_39(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 228, 0,\ + 1, 4) #define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0_SET(x)\ @@ -610,8 +679,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_39_CFG_RX_SSC_LH_GET(x)\ FIELD_GET(SD10G_LANE_LANE_39_CFG_RX_SSC_LH, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3A */ -#define SD10G_LANE_LANE_3A(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 232, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3A */ +#define SD10G_LANE_LANE_3A(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 232, 0,\ + 1, 4) #define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0_SET(x)\ @@ -625,8 +696,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3C */ -#define SD10G_LANE_LANE_3C(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 240, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3C */ +#define SD10G_LANE_LANE_3C(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 240, 0,\ + 1, 4) #define SD10G_LANE_LANE_3C_CFG_DIS_ACC BIT(0) #define SD10G_LANE_LANE_3C_CFG_DIS_ACC_SET(x)\ @@ -640,8 +713,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_GET(x)\ FIELD_GET(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_40 */ -#define SD10G_LANE_LANE_40(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 256, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_40 */ +#define SD10G_LANE_LANE_40(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 256, 0,\ + 1, 4) #define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET(x)\ @@ -649,8 +724,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_41 */ -#define SD10G_LANE_LANE_41(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 260, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_41 */ +#define SD10G_LANE_LANE_41(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 260, 0,\ + 1, 4) #define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8 GENMASK(7, 0) #define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET(x)\ @@ -658,8 +735,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_GET(x)\ FIELD_GET(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_42 */ -#define SD10G_LANE_LANE_42(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 264, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_42 */ +#define SD10G_LANE_LANE_42(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 264, 0,\ + 1, 4) #define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET(x)\ @@ -673,8 +752,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_48 */ -#define SD10G_LANE_LANE_48(t) __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 0, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_48 */ +#define SD10G_LANE_LANE_48(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 288, 0, 1, 40, 0, 0, \ + 1, 4) #define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET(x)\ @@ -694,8 +775,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_48_CFG_CLK_ENQ_GET(x)\ FIELD_GET(SD10G_LANE_LANE_48_CFG_CLK_ENQ, x) -/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_50 */ -#define SD10G_LANE_LANE_50(t) __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 32, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_50 */ +#define SD10G_LANE_LANE_50(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 288, 0, 1, 40, 32, 0,\ + 1, 4) #define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0_SET(x)\ @@ -727,8 +810,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_50_CFG_JT_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_50_CFG_JT_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_2:LANE_52 */ -#define SD10G_LANE_LANE_52(t) __REG(TARGET_SD10G_LANE, t, 12, 328, 0, 1, 24, 0, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_2:LANE_52 */ +#define SD10G_LANE_LANE_52(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 328, 0, 1, 24, 0, 0, \ + 1, 4) #define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0 GENMASK(5, 0) #define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET(x)\ @@ -736,8 +821,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_4:LANE_83 */ -#define SD10G_LANE_LANE_83(t) __REG(TARGET_SD10G_LANE, t, 12, 464, 0, 1, 112, 60, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_4:LANE_83 */ +#define SD10G_LANE_LANE_83(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 464, 0, 1, 112, 60, \ + 0, 1, 4) #define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE BIT(0) #define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE_SET(x)\ @@ -781,8 +868,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_83_R_CTLE_RSTN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_83_R_CTLE_RSTN, x) -/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_93 */ -#define SD10G_LANE_LANE_93(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 12, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_93 */ +#define SD10G_LANE_LANE_93(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 576, 0, 1, 64, 12, 0,\ + 1, 4) #define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN BIT(0) #define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN_SET(x)\ @@ -832,8 +921,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT_GET(x)\ FIELD_GET(SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT, x) -/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_94 */ -#define SD10G_LANE_LANE_94(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 16, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_94 */ +#define SD10G_LANE_LANE_94(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 576, 0, 1, 64, 16, 0,\ + 1, 4) #define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET(x)\ @@ -865,8 +956,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_94_R_SWING_REG_GET(x)\ FIELD_GET(SD10G_LANE_LANE_94_R_SWING_REG, x) -/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_9E */ -#define SD10G_LANE_LANE_9E(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 56, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_9E */ +#define SD10G_LANE_LANE_9E(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 576, 0, 1, 64, 56, 0,\ + 1, 4) #define SD10G_LANE_LANE_9E_R_RXEQ_REG BIT(0) #define SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(x)\ @@ -886,8 +979,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN, x) -/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A1 */ -#define SD10G_LANE_LANE_A1(t) __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 4, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A1 */ +#define SD10G_LANE_LANE_A1(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 640, 0, 1, 128, 4, 0,\ + 1, 4) #define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0_SET(x)\ @@ -919,8 +1014,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_A1_R_PCLK_GATING_GET(x)\ FIELD_GET(SD10G_LANE_LANE_A1_R_PCLK_GATING, x) -/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A2 */ -#define SD10G_LANE_LANE_A2(t) __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 8, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A2 */ +#define SD10G_LANE_LANE_A2(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 640, 0, 1, 128, 8, 0,\ + 1, 4) #define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0) #define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET(x)\ @@ -928,8 +1025,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ -#define SD10G_LANE_LANE_DF(t) __REG(TARGET_SD10G_LANE, t, 12, 832, 0, 1, 84, 60, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ +#define SD10G_LANE_LANE_DF(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 832, 0, 1, 84, 60, 0,\ + 1, 4) #define SD10G_LANE_LANE_DF_LOL_UDL BIT(0) #define SD10G_LANE_LANE_DF_LOL_UDL_SET(x)\ @@ -955,8 +1054,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_DF_SQUELCH_GET(x)\ FIELD_GET(SD10G_LANE_LANE_DF_SQUELCH, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_09 */ -#define SD25G_LANE_CMU_09(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 36, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_09 */ +#define SD25G_LANE_CMU_09(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 36, 0, 1, 4) #define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN BIT(0) #define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN_SET(x)\ @@ -988,8 +1089,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0B */ -#define SD25G_LANE_CMU_0B(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 44, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0B */ +#define SD25G_LANE_CMU_0B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 44, 0, 1, 4) #define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT BIT(0) #define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT_SET(x)\ @@ -1039,8 +1142,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0C */ -#define SD25G_LANE_CMU_0C(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 48, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0C */ +#define SD25G_LANE_CMU_0C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 48, 0, 1, 4) #define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET BIT(0) #define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(x)\ @@ -1072,8 +1177,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0D */ -#define SD25G_LANE_CMU_0D(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 52, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0D */ +#define SD25G_LANE_CMU_0D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 52, 0, 1, 4) #define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD BIT(0) #define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD_SET(x)\ @@ -1105,8 +1212,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0E */ -#define SD25G_LANE_CMU_0E(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 56, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0E */ +#define SD25G_LANE_CMU_0E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 56, 0, 1, 4) #define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0 GENMASK(3, 0) #define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(x)\ @@ -1120,8 +1229,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_13 */ -#define SD25G_LANE_CMU_13(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 76, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_13 */ +#define SD25G_LANE_CMU_13(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 76, 0, 1, 4) #define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0 GENMASK(3, 0) #define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET(x)\ @@ -1135,8 +1246,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_13_CFG_JT_EN_GET(x)\ FIELD_GET(SD25G_LANE_CMU_13_CFG_JT_EN, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_18 */ -#define SD25G_LANE_CMU_18(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 96, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_18 */ +#define SD25G_LANE_CMU_18(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 96, 0, 1, 4) #define SD25G_LANE_CMU_18_R_PLL_RSTN BIT(0) #define SD25G_LANE_CMU_18_R_PLL_RSTN_SET(x)\ @@ -1162,8 +1275,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_19 */ -#define SD25G_LANE_CMU_19(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 100, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_19 */ +#define SD25G_LANE_CMU_19(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 100, 0, 1, 4) #define SD25G_LANE_CMU_19_R_CK_RESETB BIT(0) #define SD25G_LANE_CMU_19_R_CK_RESETB_SET(x)\ @@ -1177,8 +1292,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_19_R_PLL_DLOL_EN_GET(x)\ FIELD_GET(SD25G_LANE_CMU_19_R_PLL_DLOL_EN, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_1A */ -#define SD25G_LANE_CMU_1A(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 104, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_1A */ +#define SD25G_LANE_CMU_1A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 104, 0, 1, 4) #define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0 GENMASK(2, 0) #define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(x)\ @@ -1204,8 +1321,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_1A_R_REG_MANUAL_GET(x)\ FIELD_GET(SD25G_LANE_CMU_1A_R_REG_MANUAL, x) -/* SD25G_TARGET:CMU_GRP_1:CMU_2A */ -#define SD25G_LANE_CMU_2A(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 36, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_1:CMU_2A */ +#define SD25G_LANE_CMU_2A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 36, 0, 1, 4) #define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0 GENMASK(1, 0) #define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0_SET(x)\ @@ -1225,8 +1344,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_GET(x)\ FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS, x) -/* SD25G_TARGET:CMU_GRP_1:CMU_30 */ -#define SD25G_LANE_CMU_30(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 60, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_1:CMU_30 */ +#define SD25G_LANE_CMU_30(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 60, 0, 1, 4) #define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 GENMASK(2, 0) #define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET(x)\ @@ -1240,8 +1361,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0, x) -/* SD25G_TARGET:CMU_GRP_1:CMU_31 */ -#define SD25G_LANE_CMU_31(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 64, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_1:CMU_31 */ +#define SD25G_LANE_CMU_31(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 64, 0, 1, 4) #define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET(x)\ @@ -1249,8 +1372,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0, x) -/* SD25G_TARGET:CMU_GRP_2:CMU_40 */ -#define SD25G_LANE_CMU_40(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_2:CMU_40 */ +#define SD25G_LANE_CMU_40(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 0, 0, 1, 4) #define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL BIT(0) #define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL_SET(x)\ @@ -1288,8 +1413,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST_GET(x)\ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST, x) -/* SD25G_TARGET:CMU_GRP_2:CMU_45 */ -#define SD25G_LANE_CMU_45(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_2:CMU_45 */ +#define SD25G_LANE_CMU_45(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 20, 0, 1, 4) #define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET(x)\ @@ -1297,8 +1424,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0, x) -/* SD25G_TARGET:CMU_GRP_2:CMU_46 */ -#define SD25G_LANE_CMU_46(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 24, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_2:CMU_46 */ +#define SD25G_LANE_CMU_46(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 24, 0, 1, 4) #define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8 GENMASK(7, 0) #define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET(x)\ @@ -1306,8 +1435,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_GET(x)\ FIELD_GET(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8, x) -/* SD25G_TARGET:CMU_GRP_3:CMU_C0 */ -#define SD25G_LANE_CMU_C0(t) __REG(TARGET_SD25G_LANE, t, 8, 768, 0, 1, 252, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_3:CMU_C0 */ +#define SD25G_LANE_CMU_C0(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 768, 0, 1, 252, 0, 0, 1, 4) #define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0 GENMASK(3, 0) #define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0_SET(x)\ @@ -1321,8 +1452,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(x)\ FIELD_GET(SD25G_LANE_CMU_C0_PLL_LOL_UDL, x) -/* SD25G_TARGET:CMU_GRP_4:CMU_FF */ -#define SD25G_LANE_CMU_FF(t) __REG(TARGET_SD25G_LANE, t, 8, 1020, 0, 1, 4, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_4:CMU_FF */ +#define SD25G_LANE_CMU_FF(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1020, 0, 1, 4, 0, 0, 1, 4) #define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX GENMASK(7, 0) #define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(x)\ @@ -1330,8 +1463,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_GET(x)\ FIELD_GET(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_00 */ -#define SD25G_LANE_LANE_00(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_00 */ +#define SD25G_LANE_LANE_00(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 0, 0, 1, 4) #define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0_SET(x)\ @@ -1345,8 +1480,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_01 */ -#define SD25G_LANE_LANE_01(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_01 */ +#define SD25G_LANE_LANE_01(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 4, 0, 1, 4) #define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\ @@ -1360,8 +1497,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_03 */ -#define SD25G_LANE_LANE_03(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 12, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_03 */ +#define SD25G_LANE_LANE_03(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 12, 0, 1, 4) #define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0 GENMASK(4, 0) #define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(x)\ @@ -1369,8 +1508,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_04 */ -#define SD25G_LANE_LANE_04(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 16, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_04 */ +#define SD25G_LANE_LANE_04(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 16, 0, 1, 4) #define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN BIT(0) #define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(x)\ @@ -1408,8 +1549,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_05 */ -#define SD25G_LANE_LANE_05(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_05 */ +#define SD25G_LANE_LANE_05(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 20, 0, 1, 4) #define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0_SET(x)\ @@ -1423,8 +1566,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_05_LN_CFG_BW_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_05_LN_CFG_BW_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_06 */ -#define SD25G_LANE_LANE_06(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 24, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_06 */ +#define SD25G_LANE_LANE_06(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 24, 0, 1, 4) #define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN BIT(0) #define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN_SET(x)\ @@ -1438,8 +1583,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_07 */ -#define SD25G_LANE_LANE_07(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 28, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_07 */ +#define SD25G_LANE_LANE_07(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 28, 0, 1, 4) #define SD25G_LANE_LANE_07_LN_CFG_EN_ADV BIT(0) #define SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(x)\ @@ -1459,8 +1606,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_07_LN_CFG_EN_DLY_GET(x)\ FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_DLY, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_09 */ -#define SD25G_LANE_LANE_09(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 36, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_09 */ +#define SD25G_LANE_LANE_09(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 36, 0, 1, 4) #define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET(x)\ @@ -1468,8 +1617,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0A */ -#define SD25G_LANE_LANE_0A(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 40, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0A */ +#define SD25G_LANE_LANE_0A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 40, 0, 1, 4) #define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0 GENMASK(5, 0) #define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET(x)\ @@ -1477,8 +1628,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0B */ -#define SD25G_LANE_LANE_0B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 44, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0B */ +#define SD25G_LANE_LANE_0B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 44, 0, 1, 4) #define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN BIT(0) #define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET(x)\ @@ -1498,8 +1651,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0C */ -#define SD25G_LANE_LANE_0C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 48, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0C */ +#define SD25G_LANE_LANE_0C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 48, 0, 1, 4) #define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\ @@ -1519,8 +1674,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0D */ -#define SD25G_LANE_LANE_0D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 52, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0D */ +#define SD25G_LANE_LANE_0D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 52, 0, 1, 4) #define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(x)\ @@ -1552,8 +1709,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0E */ -#define SD25G_LANE_LANE_0E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 56, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0E */ +#define SD25G_LANE_LANE_0E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 56, 0, 1, 4) #define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN BIT(0) #define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN_SET(x)\ @@ -1579,8 +1738,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0F */ -#define SD25G_LANE_LANE_0F(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 60, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0F */ +#define SD25G_LANE_LANE_0F(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 60, 0, 1, 4) #define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1 GENMASK(4, 0) #define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET(x)\ @@ -1588,8 +1749,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_18 */ -#define SD25G_LANE_LANE_18(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 96, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_18 */ +#define SD25G_LANE_LANE_18(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 96, 0, 1, 4) #define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN BIT(0) #define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(x)\ @@ -1621,8 +1784,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_19 */ -#define SD25G_LANE_LANE_19(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 100, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_19 */ +#define SD25G_LANE_LANE_19(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 100, 0, 1, 4) #define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD BIT(0) #define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD_SET(x)\ @@ -1672,8 +1837,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE_GET(x)\ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_PD_CTLE, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1A */ -#define SD25G_LANE_LANE_1A(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 104, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1A */ +#define SD25G_LANE_LANE_1A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 104, 0, 1, 4) #define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN BIT(0) #define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN_SET(x)\ @@ -1687,8 +1854,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1B */ -#define SD25G_LANE_LANE_1B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 108, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1B */ +#define SD25G_LANE_LANE_1B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 108, 0, 1, 4) #define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0 GENMASK(7, 0) #define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(x)\ @@ -1696,8 +1865,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1C */ -#define SD25G_LANE_LANE_1C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 112, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1C */ +#define SD25G_LANE_LANE_1C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 112, 0, 1, 4) #define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN BIT(0) #define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(x)\ @@ -1723,8 +1894,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1D */ -#define SD25G_LANE_LANE_1D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 116, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1D */ +#define SD25G_LANE_LANE_1D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 116, 0, 1, 4) #define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR BIT(0) #define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR_SET(x)\ @@ -1774,8 +1947,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1E */ -#define SD25G_LANE_LANE_1E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 120, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1E */ +#define SD25G_LANE_LANE_1E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 120, 0, 1, 4) #define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0 GENMASK(1, 0) #define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0_SET(x)\ @@ -1807,8 +1982,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_21 */ -#define SD25G_LANE_LANE_21(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 132, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_21 */ +#define SD25G_LANE_LANE_21(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 132, 0, 1, 4) #define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0 GENMASK(4, 0) #define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET(x)\ @@ -1816,8 +1993,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_22 */ -#define SD25G_LANE_LANE_22(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 136, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_22 */ +#define SD25G_LANE_LANE_22(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 136, 0, 1, 4) #define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET(x)\ @@ -1825,8 +2004,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_25 */ -#define SD25G_LANE_LANE_25(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 148, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_25 */ +#define SD25G_LANE_LANE_25(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 148, 0, 1, 4) #define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0 GENMASK(6, 0) #define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET(x)\ @@ -1834,8 +2015,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_26 */ -#define SD25G_LANE_LANE_26(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 152, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_26 */ +#define SD25G_LANE_LANE_26(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 152, 0, 1, 4) #define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0 GENMASK(6, 0) #define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET(x)\ @@ -1843,8 +2026,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_28 */ -#define SD25G_LANE_LANE_28(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 160, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_28 */ +#define SD25G_LANE_LANE_28(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 160, 0, 1, 4) #define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN BIT(0) #define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN_SET(x)\ @@ -1870,8 +2055,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2B */ -#define SD25G_LANE_LANE_2B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 172, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2B */ +#define SD25G_LANE_LANE_2B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 172, 0, 1, 4) #define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(x)\ @@ -1891,8 +2078,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2C */ -#define SD25G_LANE_LANE_2C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 176, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2C */ +#define SD25G_LANE_LANE_2C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 176, 0, 1, 4) #define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET(x)\ @@ -1906,8 +2095,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2D */ -#define SD25G_LANE_LANE_2D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 180, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2D */ +#define SD25G_LANE_LANE_2D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 180, 0, 1, 4) #define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET(x)\ @@ -1921,8 +2112,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2E */ -#define SD25G_LANE_LANE_2E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 184, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2E */ +#define SD25G_LANE_LANE_2E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 184, 0, 1, 4) #define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN BIT(0) #define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN_SET(x)\ @@ -1972,8 +2165,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_40 */ -#define SD25G_LANE_LANE_40(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 256, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_40 */ +#define SD25G_LANE_LANE_40(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 256, 0, 1, 4) #define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE BIT(0) #define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE_SET(x)\ @@ -2017,8 +2212,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_40_LN_R_CTLE_RSTN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_42 */ -#define SD25G_LANE_LANE_42(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 264, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_42 */ +#define SD25G_LANE_LANE_42(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 264, 0, 1, 4) #define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET(x)\ @@ -2026,8 +2223,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_43 */ -#define SD25G_LANE_LANE_43(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 268, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_43 */ +#define SD25G_LANE_LANE_43(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 268, 0, 1, 4) #define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8 GENMASK(7, 0) #define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET(x)\ @@ -2035,8 +2234,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_GET(x)\ FIELD_GET(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_44 */ -#define SD25G_LANE_LANE_44(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 272, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_44 */ +#define SD25G_LANE_LANE_44(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 272, 0, 1, 4) #define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET(x)\ @@ -2044,8 +2245,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_45 */ -#define SD25G_LANE_LANE_45(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 276, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_45 */ +#define SD25G_LANE_LANE_45(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 276, 0, 1, 4) #define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8 GENMASK(7, 0) #define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET(x)\ @@ -2053,8 +2256,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_GET(x)\ FIELD_GET(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8, x) -/* SD25G_TARGET:LANE_GRP_1:LANE_DE */ -#define SD25G_LANE_LANE_DE(t) __REG(TARGET_SD25G_LANE, t, 8, 1792, 0, 1, 128, 120, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_1:LANE_DE */ +#define SD25G_LANE_LANE_DE(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1792, 0, 1, 128, 120, 0, 1, 4) #define SD25G_LANE_LANE_DE_LN_LOL_UDL BIT(0) #define SD25G_LANE_LANE_DE_LN_LOL_UDL_SET(x)\ @@ -2080,8 +2285,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_DE_LN_PMA_RXEI_GET(x)\ FIELD_GET(SD25G_LANE_LANE_DE_LN_PMA_RXEI, x) -/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ -#define SD6G_LANE_LANE_DF(t) __REG(TARGET_SD6G_LANE, t, 13, 832, 0, 1, 84, 60, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ +#define SD6G_LANE_LANE_DF(t) \ + __REG(TARGET_SD6G_LANE, t, 13, 832, 0, 1, 84, 60, 0, 1, 4) #define SD6G_LANE_LANE_DF_LOL_UDL BIT(0) #define SD6G_LANE_LANE_DF_LOL_UDL_SET(x)\ @@ -2107,8 +2314,9 @@ enum sparx5_serdes_target { #define SD6G_LANE_LANE_DF_SQUELCH_GET(x)\ FIELD_GET(SD6G_LANE_LANE_DF_SQUELCH, x) -/* SD10G_CMU_TARGET:CMU_GRP_0:CMU_00 */ -#define SD_CMU_CMU_00(t) __REG(TARGET_SD_CMU, t, 14, 0, 0, 1, 20, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_0:CMU_00 */ +#define SD_CMU_CMU_00(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 0, 0, 1, 20, 0, 0, 1, 4) #define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE BIT(0) #define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE_SET(x)\ @@ -2134,8 +2342,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_GET(x)\ FIELD_GET(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_05 */ -#define SD_CMU_CMU_05(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_05 */ +#define SD_CMU_CMU_05(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 0, 0, 1, 4) #define SD_CMU_CMU_05_CFG_REFCK_TERM_EN BIT(0) #define SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(x)\ @@ -2149,9 +2358,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_GET(x)\ FIELD_GET(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_06 */ -#define SD_CMU_CMU_06(t) \ - __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 4, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_06 */ +#define SD_CMU_CMU_06(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 4, 0, 1, 4) #define SD_CMU_CMU_06_CFG_DISLOS BIT(0) #define SD_CMU_CMU_06_CFG_DISLOS_SET(x)\ @@ -2201,9 +2410,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_06_CFG_VCO_CAL_BYP_GET(x)\ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_CAL_BYP, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_08 */ -#define SD_CMU_CMU_08(t) \ - __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 12, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_08 */ +#define SD_CMU_CMU_08(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 12, 0, 1, 4) #define SD_CMU_CMU_08_CFG_VFILT2PAD BIT(0) #define SD_CMU_CMU_08_CFG_VFILT2PAD_SET(x)\ @@ -2235,8 +2444,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN_GET(x)\ FIELD_GET(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */ -#define SD_CMU_CMU_09(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 16, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */ +#define SD_CMU_CMU_09(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 16, 0, 1, 4) #define SD_CMU_CMU_09_CFG_EN_TX_CK_UP BIT(0) #define SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(x)\ @@ -2262,8 +2472,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_09_CFG_SW_10G_GET(x)\ FIELD_GET(SD_CMU_CMU_09_CFG_SW_10G, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_0D */ -#define SD_CMU_CMU_0D(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 32, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_0D */ +#define SD_CMU_CMU_0D(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 32, 0, 1, 4) #define SD_CMU_CMU_0D_CFG_PD_DIV64 BIT(0) #define SD_CMU_CMU_0D_CFG_PD_DIV64_SET(x)\ @@ -2295,8 +2506,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_0D_CFG_REFCK_PD_GET(x)\ FIELD_GET(SD_CMU_CMU_0D_CFG_REFCK_PD, x) -/* SD10G_CMU_TARGET:CMU_GRP_3:CMU_1B */ -#define SD_CMU_CMU_1B(t) __REG(TARGET_SD_CMU, t, 14, 104, 0, 1, 20, 4, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_3:CMU_1B */ +#define SD_CMU_CMU_1B(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 104, 0, 1, 20, 4, 0, 1, 4) #define SD_CMU_CMU_1B_CFG_RESERVE_7_0 GENMASK(7, 0) #define SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(x)\ @@ -2304,8 +2516,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_1B_CFG_RESERVE_7_0_GET(x)\ FIELD_GET(SD_CMU_CMU_1B_CFG_RESERVE_7_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_4:CMU_1F */ -#define SD_CMU_CMU_1F(t) __REG(TARGET_SD_CMU, t, 14, 124, 0, 1, 68, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_4:CMU_1F */ +#define SD_CMU_CMU_1F(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 124, 0, 1, 68, 0, 0, 1, 4) #define SD_CMU_CMU_1F_CFG_BIAS_DN_EN BIT(0) #define SD_CMU_CMU_1F_CFG_BIAS_DN_EN_SET(x)\ @@ -2331,8 +2544,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_1F_CFG_VTUNE_SEL_GET(x)\ FIELD_GET(SD_CMU_CMU_1F_CFG_VTUNE_SEL, x) -/* SD10G_CMU_TARGET:CMU_GRP_5:CMU_30 */ -#define SD_CMU_CMU_30(t) __REG(TARGET_SD_CMU, t, 14, 192, 0, 1, 72, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_5:CMU_30 */ +#define SD_CMU_CMU_30(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 192, 0, 1, 72, 0, 0, 1, 4) #define SD_CMU_CMU_30_R_PLL_DLOL_EN BIT(0) #define SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(x)\ @@ -2340,8 +2554,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_30_R_PLL_DLOL_EN_GET(x)\ FIELD_GET(SD_CMU_CMU_30_R_PLL_DLOL_EN, x) -/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_44 */ -#define SD_CMU_CMU_44(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 8, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_44 */ +#define SD_CMU_CMU_44(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 264, 0, 1, 632, 8, 0, 1, 4) #define SD_CMU_CMU_44_R_PLL_RSTN BIT(0) #define SD_CMU_CMU_44_R_PLL_RSTN_SET(x)\ @@ -2355,8 +2570,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_44_R_CK_RESETB_GET(x)\ FIELD_GET(SD_CMU_CMU_44_R_CK_RESETB, x) -/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_45 */ -#define SD_CMU_CMU_45(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 12, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_45 */ +#define SD_CMU_CMU_45(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 264, 0, 1, 632, 12, 0, 1, 4) #define SD_CMU_CMU_45_R_EN_RATECHG_CTRL BIT(0) #define SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(x)\ @@ -2406,8 +2622,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN_GET(x)\ FIELD_GET(SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN, x) -/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_47 */ -#define SD_CMU_CMU_47(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 20, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_47 */ +#define SD_CMU_CMU_47(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 264, 0, 1, 632, 20, 0, 1, 4) #define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0) #define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(x)\ @@ -2415,8 +2632,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_GET(x)\ FIELD_GET(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_7:CMU_E0 */ -#define SD_CMU_CMU_E0(t) __REG(TARGET_SD_CMU, t, 14, 896, 0, 1, 8, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_7:CMU_E0 */ +#define SD_CMU_CMU_E0(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 896, 0, 1, 8, 0, 0, 1, 4) #define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0 GENMASK(3, 0) #define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0_SET(x)\ @@ -2430,8 +2648,10 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_E0_PLL_LOL_UDL_GET(x)\ FIELD_GET(SD_CMU_CMU_E0_PLL_LOL_UDL, x) -/* SD_CMU_TARGET:SD_CMU_CFG:SD_CMU_CFG */ -#define SD_CMU_CFG_SD_CMU_CFG(t) __REG(TARGET_SD_CMU_CFG, t, 14, 0, 0, 1, 8, 0, 0, 1, 4) +/* SD_CMU_TARGET:SD_CMU_CFG:SD_CMU_CFG */ +#define SD_CMU_CFG_SD_CMU_CFG(t) \ + __REG(TARGET_SD_CMU_CFG, t, TSIZE(TC_SD_CMU_CFG), 0, 0, 1, 8, 0, 0, 1, \ + 4) #define SD_CMU_CFG_SD_CMU_CFG_CMU_RST BIT(0) #define SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(x)\ @@ -2445,8 +2665,9 @@ enum sparx5_serdes_target { #define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_GET(x)\ FIELD_GET(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, x) -/* SD_LANE_TARGET:SD_RESET:SD_SER_RST */ -#define SD_LANE_SD_SER_RST(t) __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 0, 0, 1, 4) +/* SD_LANE_TARGET:SD_RESET:SD_SER_RST */ +#define SD_LANE_SD_SER_RST(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 0, 0, 1, 8, 0, 0, 1, 4) #define SD_LANE_SD_SER_RST_SER_RST BIT(0) #define SD_LANE_SD_SER_RST_SER_RST_SET(x)\ @@ -2454,8 +2675,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_SER_RST_SER_RST_GET(x)\ FIELD_GET(SD_LANE_SD_SER_RST_SER_RST, x) -/* SD_LANE_TARGET:SD_RESET:SD_DES_RST */ -#define SD_LANE_SD_DES_RST(t) __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 4, 0, 1, 4) +/* SD_LANE_TARGET:SD_RESET:SD_DES_RST */ +#define SD_LANE_SD_DES_RST(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 0, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_SD_DES_RST_DES_RST BIT(0) #define SD_LANE_SD_DES_RST_DES_RST_SET(x)\ @@ -2463,8 +2685,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_DES_RST_DES_RST_GET(x)\ FIELD_GET(SD_LANE_SD_DES_RST_DES_RST, x) -/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ -#define SD_LANE_SD_LANE_CFG(t) __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 0, 0, 1, 4) +/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ +#define SD_LANE_SD_LANE_CFG(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 8, 0, 1, 8, 0, 0, 1, 4) #define SD_LANE_SD_LANE_CFG_MACRO_RST BIT(0) #define SD_LANE_SD_LANE_CFG_MACRO_RST_SET(x)\ @@ -2508,8 +2731,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_LANE_CFG_LANE_RX_RST_GET(x)\ FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_RX_RST, x) -/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ -#define SD_LANE_SD_LANE_STAT(t) __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 4, 0, 1, 4) +/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ +#define SD_LANE_SD_LANE_STAT(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 8, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_SD_LANE_STAT_PMA_RST_DONE BIT(0) #define SD_LANE_SD_LANE_STAT_PMA_RST_DONE_SET(x)\ @@ -2529,9 +2753,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_LANE_STAT_DBG_OBS_GET(x)\ FIELD_GET(SD_LANE_SD_LANE_STAT_DBG_OBS, x) -/* SD_LANE_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ -#define SD_LANE_QUIET_MODE_6G(t) \ - __REG(TARGET_SD_LANE, t, 25, 24, 0, 1, 8, 4, 0, 1, 4) +/* SD_LANE_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ +#define SD_LANE_QUIET_MODE_6G(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 24, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0) #define SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(x)\ @@ -2539,8 +2763,9 @@ enum sparx5_serdes_target { #define SD_LANE_QUIET_MODE_6G_QUIET_MODE_GET(x)\ FIELD_GET(SD_LANE_QUIET_MODE_6G_QUIET_MODE, x) -/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */ -#define SD_LANE_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 0, 0, 1, 4) +/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */ +#define SD_LANE_MISC(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 56, 0, 1, 56, 0, 0, 1, 4) #define SD_LANE_MISC_SD_125_RST_DIS BIT(0) #define SD_LANE_MISC_SD_125_RST_DIS_SET(x)\ @@ -2560,14 +2785,16 @@ enum sparx5_serdes_target { #define SD_LANE_MISC_MUX_ENA_GET(x)\ FIELD_GET(SD_LANE_MISC_MUX_ENA, x) +/* SPARX5 ONLY */ #define SD_LANE_MISC_CORE_CLK_FREQ GENMASK(5, 4) #define SD_LANE_MISC_CORE_CLK_FREQ_SET(x)\ FIELD_PREP(SD_LANE_MISC_CORE_CLK_FREQ, x) #define SD_LANE_MISC_CORE_CLK_FREQ_GET(x)\ FIELD_GET(SD_LANE_MISC_CORE_CLK_FREQ, x) -/* SD_LANE_TARGET:CFG_STAT_FX100:M_STAT_MISC */ -#define SD_LANE_M_STAT_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 36, 0, 1, 4) +/* SD_LANE_TARGET:CFG_STAT_FX100:M_STAT_MISC */ +#define SD_LANE_M_STAT_MISC(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 56, 0, 1, 56, 36, 0, 1, 4) #define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM GENMASK(21, 0) #define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM_SET(x)\ @@ -2581,8 +2808,10 @@ enum sparx5_serdes_target { #define SD_LANE_M_STAT_MISC_M_LOCK_CNT_GET(x)\ FIELD_GET(SD_LANE_M_STAT_MISC_M_LOCK_CNT, x) -/* SD25G_CFG_TARGET:SD_RESET:SD_SER_RST */ -#define SD_LANE_25G_SD_SER_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_RESET:SD_SER_RST */ +#define SD_LANE_25G_SD_SER_RST(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 0, 0, 1, 4) #define SD_LANE_25G_SD_SER_RST_SER_RST BIT(0) #define SD_LANE_25G_SD_SER_RST_SER_RST_SET(x)\ @@ -2590,8 +2819,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_SER_RST_SER_RST_GET(x)\ FIELD_GET(SD_LANE_25G_SD_SER_RST_SER_RST, x) -/* SD25G_CFG_TARGET:SD_RESET:SD_DES_RST */ -#define SD_LANE_25G_SD_DES_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_RESET:SD_DES_RST */ +#define SD_LANE_25G_SD_DES_RST(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_25G_SD_DES_RST_DES_RST BIT(0) #define SD_LANE_25G_SD_DES_RST_DES_RST_SET(x)\ @@ -2599,8 +2830,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_DES_RST_DES_RST_GET(x)\ FIELD_GET(SD_LANE_25G_SD_DES_RST_DES_RST, x) -/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ -#define SD_LANE_25G_SD_LANE_CFG(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ +#define SD_LANE_25G_SD_LANE_CFG(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 0, 0, 1, 4) #define SD_LANE_25G_SD_LANE_CFG_MACRO_RST BIT(0) #define SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(x)\ @@ -2698,8 +2931,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN_GET(x)\ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN, x) -/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG2 */ -#define SD_LANE_25G_SD_LANE_CFG2(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG2 */ +#define SD_LANE_25G_SD_LANE_CFG2(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 4, 0, 1, 4) #define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL GENMASK(2, 0) #define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL_SET(x)\ @@ -2767,8 +3002,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL_GET(x)\ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL, x) -/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ -#define SD_LANE_25G_SD_LANE_STAT(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ +#define SD_LANE_25G_SD_LANE_STAT(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 8, 0, 1, 4) #define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE BIT(0) #define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_SET(x)\ @@ -2788,8 +3025,9 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_GET(x)\ FIELD_GET(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x) -/* SD25G_CFG_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ -#define SD_LANE_25G_QUIET_MODE_6G(t) \ +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ +#define SD_LANE_25G_QUIET_MODE_6G(t) \ __REG(TARGET_SD_LANE_25G, t, 8, 28, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0) diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index 7bbf729a7c90..7cb020dd3423 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -704,7 +704,7 @@ static void cpcap_usb_phy_remove(struct platform_device *pdev) static struct platform_driver cpcap_usb_phy_driver = { .probe = cpcap_usb_phy_probe, - .remove_new = cpcap_usb_phy_remove, + .remove = cpcap_usb_phy_remove, .driver = { .name = "cpcap-usb-phy", .of_match_table = of_match_ptr(cpcap_usb_phy_id_table), diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index 376d023a0aa9..152344e4f7e4 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -655,7 +655,7 @@ static void phy_mdm6600_remove(struct platform_device *pdev) static struct platform_driver phy_mdm6600_driver = { .probe = phy_mdm6600_probe, - .remove_new = phy_mdm6600_remove, + .remove = phy_mdm6600_remove, .driver = { .name = "phy-mapphone-mdm6600", .pm = &phy_mdm6600_pm_ops, diff --git a/drivers/phy/phy-airoha-pcie-regs.h b/drivers/phy/phy-airoha-pcie-regs.h index bb1f679ca1df..b938a7b468fe 100644 --- a/drivers/phy/phy-airoha-pcie-regs.h +++ b/drivers/phy/phy-airoha-pcie-regs.h @@ -197,9 +197,9 @@ #define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0) #define REG_CSR_2L_RX0_REV0 0x00fc -#define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2) -#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4) -#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8) +#define CSR_2L_PXP_VOS_PNINV GENMASK(19, 18) +#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(22, 20) +#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(26, 24) #define REG_CSR_2L_RX0_PHYCK_DIV 0x0100 #define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8) diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c index 1e410eb41058..56e9ade8a9fd 100644 --- a/drivers/phy/phy-airoha-pcie.c +++ b/drivers/phy/phy-airoha-pcie.c @@ -459,7 +459,7 @@ static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy) airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, CSR_2L_PXP_CLKTX1_SR); airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0, - CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd); + CSR_2L_PXP_PLL_RESERVE_MASK, 0xd0d); } static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy) @@ -471,9 +471,9 @@ static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy) PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST | PCIE_SW_RX_RST); airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, - PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); + PCIE_TX_TOP_RST | PCIE_TX_CAL_RST); airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, - PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); + PCIE_TX_TOP_RST | PCIE_TX_CAL_RST); } static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy) @@ -802,7 +802,7 @@ static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy) airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM, CSR_2L_PXP_JCPLL_SDM_IFM); airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, - REG_CSR_2L_JCPLL_SDM_HREN); + CSR_2L_PXP_JCPLL_SDM_HREN); airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, CSR_2L_PXP_JCPLL_SDM_DI_EN); airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, diff --git a/drivers/phy/phy-lgm-usb.c b/drivers/phy/phy-lgm-usb.c index 410729c7f513..eb7c6fed20d3 100644 --- a/drivers/phy/phy-lgm-usb.c +++ b/drivers/phy/phy-lgm-usb.c @@ -271,7 +271,7 @@ static struct platform_driver lgm_phy_driver = { .of_match_table = intel_usb_phy_dt_ids, }, .probe = phy_probe, - .remove_new = phy_remove, + .remove = phy_remove, }; module_platform_driver(lgm_phy_driver); diff --git a/drivers/phy/phy-nxp-ptn3222.c b/drivers/phy/phy-nxp-ptn3222.c new file mode 100644 index 000000000000..c6179d8701e6 --- /dev/null +++ b/drivers/phy/phy-nxp-ptn3222.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024, Linaro Limited + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#define NUM_SUPPLIES 2 + +struct ptn3222 { + struct i2c_client *client; + struct phy *phy; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static int ptn3222_init(struct phy *phy) +{ + struct ptn3222 *ptn3222 = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(NUM_SUPPLIES, ptn3222->supplies); + if (ret) + return ret; + + gpiod_set_value_cansleep(ptn3222->reset_gpio, 0); + + return 0; +} + +static int ptn3222_exit(struct phy *phy) +{ + struct ptn3222 *ptn3222 = phy_get_drvdata(phy); + + gpiod_set_value_cansleep(ptn3222->reset_gpio, 1); + + return regulator_bulk_disable(NUM_SUPPLIES, ptn3222->supplies); +} + +static const struct phy_ops ptn3222_ops = { + .init = ptn3222_init, + .exit = ptn3222_exit, + .owner = THIS_MODULE, +}; + +static const struct regulator_bulk_data ptn3222_supplies[NUM_SUPPLIES] = { + { + .supply = "vdd3v3", + .init_load_uA = 11000, + }, { + .supply = "vdd1v8", + .init_load_uA = 55000, + } +}; + +static int ptn3222_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct phy_provider *phy_provider; + struct ptn3222 *ptn3222; + int ret; + + ptn3222 = devm_kzalloc(dev, sizeof(*ptn3222), GFP_KERNEL); + if (!ptn3222) + return -ENOMEM; + + ptn3222->client = client; + + ptn3222->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(ptn3222->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ptn3222->reset_gpio), + "unable to acquire reset gpio\n"); + + ret = devm_regulator_bulk_get_const(dev, NUM_SUPPLIES, ptn3222_supplies, + &ptn3222->supplies); + if (ret) + return ret; + + ptn3222->phy = devm_phy_create(dev, dev->of_node, &ptn3222_ops); + if (IS_ERR(ptn3222->phy)) { + dev_err(dev, "failed to create PHY: %d\n", ret); + return PTR_ERR(ptn3222->phy); + } + + phy_set_drvdata(ptn3222->phy, ptn3222); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct i2c_device_id ptn3222_table[] = { + { "ptn3222" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ptn3222_table); + +static const struct of_device_id ptn3222_of_table[] = { + { .compatible = "nxp,ptn3222" }, + { } +}; +MODULE_DEVICE_TABLE(of, ptn3222_of_table); + +static struct i2c_driver ptn3222_driver = { + .driver = { + .name = "ptn3222", + .of_match_table = ptn3222_of_table, + }, + .probe = ptn3222_probe, + .id_table = ptn3222_table, +}; + +module_i2c_driver(ptn3222_driver); + +MODULE_DESCRIPTION("NXP PTN3222 eUSB2 Redriver driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c index 3642a5d4f2f3..cae290a6e19f 100644 --- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c @@ -257,12 +257,12 @@ static const struct of_device_id qcom_apq8064_sata_phy_of_match[] = { MODULE_DEVICE_TABLE(of, qcom_apq8064_sata_phy_of_match); static struct platform_driver qcom_apq8064_sata_phy_driver = { - .probe = qcom_apq8064_sata_phy_probe, - .remove_new = qcom_apq8064_sata_phy_remove, + .probe = qcom_apq8064_sata_phy_probe, + .remove = qcom_apq8064_sata_phy_remove, .driver = { - .name = "qcom-apq8064-sata-phy", - .of_match_table = qcom_apq8064_sata_phy_of_match, - } + .name = "qcom-apq8064-sata-phy", + .of_match_table = qcom_apq8064_sata_phy_of_match, + }, }; module_platform_driver(qcom_apq8064_sata_phy_driver); diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c index da2b32fb5b45..f1b51018683d 100644 --- a/drivers/phy/qualcomm/phy-qcom-edp.c +++ b/drivers/phy/qualcomm/phy-qcom-edp.c @@ -32,16 +32,8 @@ #define DP_PHY_PD_CTL 0x001c #define DP_PHY_MODE 0x0020 -#define DP_PHY_AUX_CFG0 0x0024 -#define DP_PHY_AUX_CFG1 0x0028 -#define DP_PHY_AUX_CFG2 0x002C -#define DP_PHY_AUX_CFG3 0x0030 -#define DP_PHY_AUX_CFG4 0x0034 -#define DP_PHY_AUX_CFG5 0x0038 -#define DP_PHY_AUX_CFG6 0x003C -#define DP_PHY_AUX_CFG7 0x0040 -#define DP_PHY_AUX_CFG8 0x0044 -#define DP_PHY_AUX_CFG9 0x0048 +#define DP_AUX_CFG_SIZE 10 +#define DP_PHY_AUX_CFG(n) (0x24 + (0x04 * (n))) #define DP_PHY_AUX_INTERRUPT_MASK 0x0058 @@ -90,6 +82,7 @@ struct phy_ver_ops { struct qcom_edp_phy_cfg { bool is_edp; + const u8 *aux_cfg; const struct qcom_edp_swing_pre_emph_cfg *swing_pre_emph_cfg; const struct phy_ver_ops *ver_ops; }; @@ -186,11 +179,40 @@ static const struct qcom_edp_swing_pre_emph_cfg edp_phy_swing_pre_emph_cfg = { .pre_emphasis_hbr3_hbr2 = &edp_pre_emp_hbr2_hbr3, }; +static const u8 edp_phy_aux_cfg_v4[10] = { + 0x00, 0x13, 0x24, 0x00, 0x0a, 0x26, 0x0a, 0x03, 0x37, 0x03 +}; + +static const u8 edp_pre_emp_hbr_rbr_v5[4][4] = { + { 0x05, 0x11, 0x17, 0x1d }, + { 0x05, 0x11, 0x18, 0xff }, + { 0x06, 0x11, 0xff, 0xff }, + { 0x00, 0xff, 0xff, 0xff } +}; + +static const u8 edp_pre_emp_hbr2_hbr3_v5[4][4] = { + { 0x0c, 0x15, 0x19, 0x1e }, + { 0x0b, 0x15, 0x19, 0xff }, + { 0x0e, 0x14, 0xff, 0xff }, + { 0x0d, 0xff, 0xff, 0xff } +}; + +static const struct qcom_edp_swing_pre_emph_cfg edp_phy_swing_pre_emph_cfg_v5 = { + .swing_hbr_rbr = &edp_swing_hbr_rbr, + .swing_hbr3_hbr2 = &edp_swing_hbr2_hbr3, + .pre_emphasis_hbr_rbr = &edp_pre_emp_hbr_rbr_v5, + .pre_emphasis_hbr3_hbr2 = &edp_pre_emp_hbr2_hbr3_v5, +}; + +static const u8 edp_phy_aux_cfg_v5[10] = { + 0x00, 0x13, 0xa4, 0x00, 0x0a, 0x26, 0x0a, 0x03, 0x37, 0x03 +}; + static int qcom_edp_phy_init(struct phy *phy) { struct qcom_edp *edp = phy_get_drvdata(phy); + u8 aux_cfg[DP_AUX_CFG_SIZE]; int ret; - u8 cfg8; ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies); if (ret) @@ -200,6 +222,8 @@ static int qcom_edp_phy_init(struct phy *phy) if (ret) goto out_disable_supplies; + memcpy(aux_cfg, edp->cfg->aux_cfg, sizeof(aux_cfg)); + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, edp->edp + DP_PHY_PD_CTL); @@ -222,22 +246,12 @@ static int qcom_edp_phy_init(struct phy *phy) * even needed. */ if (edp->cfg->swing_pre_emph_cfg && !edp->is_edp) - cfg8 = 0xb7; - else - cfg8 = 0x37; + aux_cfg[8] = 0xb7; writel(0xfc, edp->edp + DP_PHY_MODE); - writel(0x00, edp->edp + DP_PHY_AUX_CFG0); - writel(0x13, edp->edp + DP_PHY_AUX_CFG1); - writel(0x24, edp->edp + DP_PHY_AUX_CFG2); - writel(0x00, edp->edp + DP_PHY_AUX_CFG3); - writel(0x0a, edp->edp + DP_PHY_AUX_CFG4); - writel(0x26, edp->edp + DP_PHY_AUX_CFG5); - writel(0x0a, edp->edp + DP_PHY_AUX_CFG6); - writel(0x03, edp->edp + DP_PHY_AUX_CFG7); - writel(cfg8, edp->edp + DP_PHY_AUX_CFG8); - writel(0x03, edp->edp + DP_PHY_AUX_CFG9); + for (int i = 0; i < DP_AUX_CFG_SIZE; i++) + writel(aux_cfg[i], edp->edp + DP_PHY_AUX_CFG(i)); writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | @@ -518,17 +532,27 @@ static const struct phy_ver_ops qcom_edp_phy_ops_v4 = { .com_configure_ssc = qcom_edp_com_configure_ssc_v4, }; +static const struct qcom_edp_phy_cfg sa8775p_dp_phy_cfg = { + .is_edp = false, + .aux_cfg = edp_phy_aux_cfg_v5, + .swing_pre_emph_cfg = &edp_phy_swing_pre_emph_cfg_v5, + .ver_ops = &qcom_edp_phy_ops_v4, +}; + static const struct qcom_edp_phy_cfg sc7280_dp_phy_cfg = { + .aux_cfg = edp_phy_aux_cfg_v4, .ver_ops = &qcom_edp_phy_ops_v4, }; static const struct qcom_edp_phy_cfg sc8280xp_dp_phy_cfg = { + .aux_cfg = edp_phy_aux_cfg_v4, .swing_pre_emph_cfg = &dp_phy_swing_pre_emph_cfg, .ver_ops = &qcom_edp_phy_ops_v4, }; static const struct qcom_edp_phy_cfg sc8280xp_edp_phy_cfg = { .is_edp = true, + .aux_cfg = edp_phy_aux_cfg_v4, .swing_pre_emph_cfg = &edp_phy_swing_pre_emph_cfg, .ver_ops = &qcom_edp_phy_ops_v4, }; @@ -707,6 +731,7 @@ static const struct phy_ver_ops qcom_edp_phy_ops_v6 = { }; static struct qcom_edp_phy_cfg x1e80100_phy_cfg = { + .aux_cfg = edp_phy_aux_cfg_v4, .swing_pre_emph_cfg = &dp_phy_swing_pre_emph_cfg, .ver_ops = &qcom_edp_phy_ops_v6, }; @@ -1108,6 +1133,7 @@ static int qcom_edp_phy_probe(struct platform_device *pdev) } static const struct of_device_id qcom_edp_phy_match_table[] = { + { .compatible = "qcom,sa8775p-edp-phy", .data = &sa8775p_dp_phy_cfg, }, { .compatible = "qcom,sc7280-edp-phy", .data = &sc7280_dp_phy_cfg, }, { .compatible = "qcom,sc8180x-edp-phy", .data = &sc7280_dp_phy_cfg, }, { .compatible = "qcom,sc8280xp-dp-phy", .data = &sc8280xp_dp_phy_cfg, }, diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c index 68cc8e24f383..6bd1b3c75c77 100644 --- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c +++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c @@ -294,7 +294,7 @@ MODULE_DEVICE_TABLE(of, eusb2_repeater_of_match_table); static struct platform_driver eusb2_repeater_driver = { .probe = eusb2_repeater_probe, - .remove_new = eusb2_repeater_remove, + .remove = eusb2_repeater_remove, .driver = { .name = "qcom-eusb2-repeater", .of_match_table = eusb2_repeater_of_match_table, diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c index f0a72b82c770..f5eb0bdac418 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c @@ -184,11 +184,11 @@ static const struct of_device_id qcom_ipq806x_sata_phy_of_match[] = { MODULE_DEVICE_TABLE(of, qcom_ipq806x_sata_phy_of_match); static struct platform_driver qcom_ipq806x_sata_phy_driver = { - .probe = qcom_ipq806x_sata_phy_probe, - .remove_new = qcom_ipq806x_sata_phy_remove, + .probe = qcom_ipq806x_sata_phy_probe, + .remove = qcom_ipq806x_sata_phy_remove, .driver = { - .name = "qcom-ipq806x-sata-phy", - .of_match_table = qcom_ipq806x_sata_phy_of_match, + .name = "qcom-ipq806x-sata-phy", + .of_match_table = qcom_ipq806x_sata_phy_of_match, } }; module_platform_driver(qcom_ipq806x_sata_phy_driver); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 643045c9024e..3bae39381fd0 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -3483,7 +3483,7 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp) } #endif -static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np) +static int qmp_combo_parse_dt_legacy_dp(struct qmp_combo *qmp, struct device_node *np) { struct device *dev = qmp->dev; @@ -3510,7 +3510,7 @@ static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_nod return 0; } -static int qmp_combo_parse_dt_lecacy_usb(struct qmp_combo *qmp, struct device_node *np) +static int qmp_combo_parse_dt_legacy_usb(struct qmp_combo *qmp, struct device_node *np) { const struct qmp_phy_cfg *cfg = qmp->cfg; struct device *dev = qmp->dev; @@ -3576,11 +3576,11 @@ static int qmp_combo_parse_dt_legacy(struct qmp_combo *qmp, struct device_node * if (IS_ERR(qmp->dp_serdes)) return PTR_ERR(qmp->dp_serdes); - ret = qmp_combo_parse_dt_lecacy_usb(qmp, usb_np); + ret = qmp_combo_parse_dt_legacy_usb(qmp, usb_np); if (ret) return ret; - ret = qmp_combo_parse_dt_lecacy_dp(qmp, dp_np); + ret = qmp_combo_parse_dt_legacy_dp(qmp, dp_np); if (ret) return ret; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 36aaac34e6c6..873f2f9844c6 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -34,6 +34,8 @@ #include "phy-qcom-qmp-pcs-pcie-v5_20.h" #include "phy-qcom-qmp-pcs-pcie-v6.h" #include "phy-qcom-qmp-pcs-pcie-v6_20.h" +#include "phy-qcom-qmp-pcs-pcie-v6_30.h" +#include "phy-qcom-qmp-pcs-v6_30.h" #include "phy-qcom-qmp-pcie-qhp.h" #define PHY_INIT_COMPLETE_TIMEOUT 10000 @@ -1344,6 +1346,154 @@ static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0x8a), }; +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x26), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xf8), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_POST_DIV_MUX, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MISC_1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MODE, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_ln_shrd_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_SUMMER_CAL_SPD_MODE, 0x5b), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B3, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B4, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B5, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B6, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3, 0x1f), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_txz_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_RX, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_2, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_3, 0x51), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_TRAN_DRVR_EMP_EN, 0x34), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_rxz_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_GAIN_RATE_2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_BKUP_CTRL1, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x45), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_VGA_CAL_CNTRL1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x39), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0x23), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B2, 0x58), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B3, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B4, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B5, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B6, 0xee), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B0, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B1, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B2, 0x60), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B3, 0xdf), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_TX_ADPT_CTRL, 0x10), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG_LANE(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x3a, BIT(0)), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_LOCK_DETECT_CONFIG2, 0x00), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_G3S2_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_RX_SIGDET_LVL, 0x99), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_ALIGN_DETECT_CONFIG7, 0x00), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_EQ_CONFIG4, 0x00), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_EQ_CONFIG5, 0x22), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_TX_RX_CONFIG, 0x04), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_TX_RX_CONFIG2, 0x02), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_EQ_CONFIG1, 0x16), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_EQ_CONFIG5, 0x02), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG1, 0x03), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG3, 0x28), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG5, 0x18), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G3_FOM_EQ_CONFIG5, 0x7a), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_FOM_EQ_CONFIG5, 0x8a), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G3_RXEQEVAL_TIME, 0x27), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_RXEQEVAL_TIME, 0x27), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_TX_RX_CONFIG, 0xc0), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_POWER_STATE_CONFIG2, 0x1d), +}; + static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), @@ -2582,6 +2732,8 @@ struct qmp_pcie_offsets { u16 rx; u16 tx2; u16 rx2; + u16 txz; + u16 rxz; u16 ln_shrd; }; @@ -2592,6 +2744,10 @@ struct qmp_phy_cfg_tbls { int tx_num; const struct qmp_phy_init_tbl *rx; int rx_num; + const struct qmp_phy_init_tbl *txz; + int txz_num; + const struct qmp_phy_init_tbl *rxz; + int rxz_num; const struct qmp_phy_init_tbl *pcs; int pcs_num; const struct qmp_phy_init_tbl *pcs_misc; @@ -2659,6 +2815,8 @@ struct qmp_pcie { void __iomem *rx; void __iomem *tx2; void __iomem *rx2; + void __iomem *txz; + void __iomem *rxz; void __iomem *ln_shrd; void __iomem *port_b; @@ -2826,6 +2984,17 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = { .ln_shrd = 0x0e00, }; +static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_30 = { + .serdes = 0x8800, + .pcs = 0x9000, + .pcs_misc = 0x9800, + .tx = 0x0000, + .rx = 0x0200, + .txz = 0xe000, + .rxz = 0xe200, + .ln_shrd = 0x8000, +}; + static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .lanes = 1, @@ -3704,6 +3873,38 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = { .has_nocsr_reset = true, }; +static const struct qmp_phy_cfg x1e80100_qmp_gen4x8_pciephy_cfg = { + .lanes = 8, + + .offsets = &qmp_pcie_offsets_v6_30, + .tbls = { + .serdes = x1e80100_qmp_gen4x8_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_serdes_tbl), + .rx = x1e80100_qmp_gen4x8_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_rx_tbl), + .txz = x1e80100_qmp_gen4x8_pcie_txz_tbl, + .txz_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_txz_tbl), + .rxz = x1e80100_qmp_gen4x8_pcie_rxz_tbl, + .rxz_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_rxz_tbl), + .pcs = x1e80100_qmp_gen4x8_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_pcs_tbl), + .pcs_misc = x1e80100_qmp_gen4x8_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_pcs_misc_tbl), + .ln_shrd = x1e80100_qmp_gen4x8_pcie_ln_shrd_tbl, + .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_ln_shrd_tbl), + }, + + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = sm8550_qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l), + .regs = pciephy_v6_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS_4_20, + .has_nocsr_reset = true, +}; + static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls) { const struct qmp_phy_cfg *cfg = qmp->cfg; @@ -3751,6 +3952,13 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); + /* + * Tx/Rx registers that require different settings than + * txz/rxz must be programmed after txz/rxz. + */ + qmp_configure(qmp->dev, qmp->txz, tbls->txz, tbls->txz_num); + qmp_configure(qmp->dev, qmp->rxz, tbls->rxz, tbls->rxz_num); + qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1); qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1); @@ -4293,6 +4501,9 @@ static int qmp_pcie_parse_dt(struct qmp_pcie *qmp) return PTR_ERR(qmp->port_b); } + qmp->txz = base + offs->txz; + qmp->rxz = base + offs->rxz; + if (cfg->tbls.ln_shrd) qmp->ln_shrd = base + offs->ln_shrd; @@ -4478,6 +4689,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = { }, { .compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy", .data = &x1e80100_qmp_gen4x4_pciephy_cfg, + }, { + .compatible = "qcom,x1e80100-qmp-gen4x8-pcie-phy", + .data = &x1e80100_qmp_gen4x8_pciephy_cfg, }, { }, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h new file mode 100644 index 000000000000..5a58ff197e6e --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_PCIE_V6_30_H_ +#define QCOM_PHY_QMP_PCS_PCIE_V6_30_H_ + +/* Only for QMP V6_30 PHY - PCIE have different offsets than V6 */ +#define QPHY_PCIE_V6_30_PCS_POWER_STATE_CONFIG2 0x014 +#define QPHY_PCIE_V6_30_PCS_TX_RX_CONFIG 0x020 +#define QPHY_PCIE_V6_30_PCS_ENDPOINT_REFCLK_DRIVE 0x024 +#define QPHY_PCIE_V6_30_PCS_OSC_DTCT_ACTIONS 0x098 +#define QPHY_PCIE_V6_30_PCS_EQ_CONFIG1 0x0a8 +#define QPHY_PCIE_V6_30_PCS_G3_RXEQEVAL_TIME 0x0f8 +#define QPHY_PCIE_V6_30_PCS_G4_RXEQEVAL_TIME 0x0fc +#define QPHY_PCIE_V6_30_PCS_G4_EQ_CONFIG5 0x110 +#define QPHY_PCIE_V6_30_PCS_G4_PRE_GAIN 0x164 +#define QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG1 0x184 +#define QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG3 0x18c +#define QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG5 0x194 +#define QPHY_PCIE_V6_30_PCS_G3_FOM_EQ_CONFIG5 0x1b4 +#define QPHY_PCIE_V6_30_PCS_G4_FOM_EQ_CONFIG5 0x1c8 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h new file mode 100644 index 000000000000..369120d88bc2 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V6_30_H_ +#define QCOM_PHY_QMP_PCS_V6_30_H_ + +/* Only for QMP V6_30 PHY - PCIe PCS registers */ +#define QPHY_V6_30_PCS_LOCK_DETECT_CONFIG2 0x0cc +#define QPHY_V6_30_PCS_G3S2_PRE_GAIN 0x17c +#define QPHY_V6_30_PCS_RX_SIGDET_LVL 0x194 +#define QPHY_V6_30_PCS_ALIGN_DETECT_CONFIG7 0x1dc +#define QPHY_V6_30_PCS_TX_RX_CONFIG 0x1e0 +#define QPHY_V6_30_PCS_TX_RX_CONFIG2 0x1e4 +#define QPHY_V6_30_PCS_EQ_CONFIG4 0x1fc +#define QPHY_V6_30_PCS_EQ_CONFIG5 0x200 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 1246d3bc8b92..acd6075bf6d9 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -871,6 +871,16 @@ static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_pcs_usb_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00), }; +static const struct qmp_phy_init_tbl qcs8300_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0xf2), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e), +}; + static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82), @@ -989,6 +999,40 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e), }; +static const struct qmp_phy_init_tbl qcs8300_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xec), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00), +}; + static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc), QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), @@ -1462,6 +1506,24 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = { .regs = qmp_v5_usb3phy_regs_layout, }; +static const struct qmp_phy_cfg qcs8300_usb3_uniphy_cfg = { + .offsets = &qmp_usb_offsets_v5, + + .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl), + .tx_tbl = qcs8300_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qcs8300_usb3_uniphy_tx_tbl), + .rx_tbl = qcs8300_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qcs8300_usb3_uniphy_rx_tbl), + .pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sa8775p_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v5_usb3phy_regs_layout, +}; + static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = { .offsets = &qmp_usb_offsets_v5, @@ -2248,6 +2310,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = { .compatible = "qcom,msm8996-qmp-usb3-phy", .data = &msm8996_usb3phy_cfg, }, { + .compatible = "qcom,qcs8300-qmp-usb3-uni-phy", + .data = &qcs8300_usb3_uniphy_cfg, + }, { .compatible = "qcom,qdu1000-qmp-usb3-uni-phy", .data = &qdu1000_usb3_uniphy_cfg, }, { diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c index e3ad7cea5109..c3e131a64131 100644 --- a/drivers/phy/realtek/phy-rtk-usb2.c +++ b/drivers/phy/realtek/phy-rtk-usb2.c @@ -1298,7 +1298,7 @@ MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); static struct platform_driver rtk_usb2phy_driver = { .probe = rtk_usb2phy_probe, - .remove_new = rtk_usb2phy_remove, + .remove = rtk_usb2phy_remove, .driver = { .name = "rtk-usb2phy", .of_match_table = usbphy_rtk_dt_match, diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c index dfcf4b921bba..0cef29a7ddd9 100644 --- a/drivers/phy/realtek/phy-rtk-usb3.c +++ b/drivers/phy/realtek/phy-rtk-usb3.c @@ -734,7 +734,7 @@ MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); static struct platform_driver rtk_usb3phy_driver = { .probe = rtk_usb3phy_probe, - .remove_new = rtk_usb3phy_remove, + .remove = rtk_usb3phy_remove, .driver = { .name = "rtk-usb3phy", .of_match_table = usbphy_rtk_dt_match, diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c index 0ce7e9c94444..feca4cb2ff4d 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-pcie.c +++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c @@ -132,11 +132,11 @@ static void rcar_gen3_phy_pcie_remove(struct platform_device *pdev) static struct platform_driver rcar_gen3_phy_driver = { .driver = { - .name = "phy_rcar_gen3_pcie", - .of_match_table = rcar_gen3_phy_pcie_match_table, + .name = "phy_rcar_gen3_pcie", + .of_match_table = rcar_gen3_phy_pcie_match_table, }, - .probe = rcar_gen3_phy_pcie_probe, - .remove_new = rcar_gen3_phy_pcie_remove, + .probe = rcar_gen3_phy_pcie_probe, + .remove = rcar_gen3_phy_pcie_remove, }; module_platform_driver(rcar_gen3_phy_driver); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 58e123305152..775f4f973a6c 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -825,7 +825,7 @@ static struct platform_driver rcar_gen3_phy_usb2_driver = { .of_match_table = rcar_gen3_phy_usb2_match_table, }, .probe = rcar_gen3_phy_usb2_probe, - .remove_new = rcar_gen3_phy_usb2_remove, + .remove = rcar_gen3_phy_usb2_remove, }; module_platform_driver(rcar_gen3_phy_usb2_driver); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c index e2d630edd992..5c267d148c90 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c @@ -206,11 +206,11 @@ static void rcar_gen3_phy_usb3_remove(struct platform_device *pdev) static struct platform_driver rcar_gen3_phy_usb3_driver = { .driver = { - .name = "phy_rcar_gen3_usb3", - .of_match_table = rcar_gen3_phy_usb3_match_table, + .name = "phy_rcar_gen3_usb3", + .of_match_table = rcar_gen3_phy_usb3_match_table, }, - .probe = rcar_gen3_phy_usb3_probe, - .remove_new = rcar_gen3_phy_usb3_remove, + .probe = rcar_gen3_phy_usb3_probe, + .remove = rcar_gen3_phy_usb3_remove, }; module_platform_driver(rcar_gen3_phy_usb3_driver); diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c index f1f1da4a0b1f..3b2d8cef75e5 100644 --- a/drivers/phy/renesas/r8a779f0-ether-serdes.c +++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c @@ -404,7 +404,7 @@ static void r8a779f0_eth_serdes_remove(struct platform_device *pdev) static struct platform_driver r8a779f0_eth_serdes_driver_platform = { .probe = r8a779f0_eth_serdes_probe, - .remove_new = r8a779f0_eth_serdes_remove, + .remove = r8a779f0_eth_serdes_remove, .driver = { .name = "r8a779f0_eth_serdes", .of_match_table = r8a779f0_eth_serdes_of_table, diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c index 98c92d6c482f..2ab99e1d47eb 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c @@ -472,7 +472,7 @@ static struct platform_driver rockchip_inno_csidphy_driver = { .of_match_table = rockchip_inno_csidphy_match_id, }, .probe = rockchip_inno_csidphy_probe, - .remove_new = rockchip_inno_csidphy_remove, + .remove = rockchip_inno_csidphy_remove, }; module_platform_driver(rockchip_inno_csidphy_driver); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index 6405943a2676..d5b1a4e2f7d3 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -784,7 +784,7 @@ static struct platform_driver inno_dsidphy_driver = { .of_match_table = of_match_ptr(inno_dsidphy_of_match), }, .probe = inno_dsidphy_probe, - .remove_new = inno_dsidphy_remove, + .remove = inno_dsidphy_remove, }; module_platform_driver(inno_dsidphy_driver); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 053bd62e31ba..8dcc2bb777b5 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -1424,8 +1424,8 @@ static const struct of_device_id inno_hdmi_phy_of_match[] = { MODULE_DEVICE_TABLE(of, inno_hdmi_phy_of_match); static struct platform_driver inno_hdmi_phy_driver = { - .probe = inno_hdmi_phy_probe, - .remove_new = inno_hdmi_phy_remove, + .probe = inno_hdmi_phy_probe, + .remove = inno_hdmi_phy_remove, .driver = { .name = "inno-hdmi-phy", .of_match_table = inno_hdmi_phy_of_match, diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 4f71373ae6e1..96f3d868a526 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -229,9 +229,10 @@ struct rockchip_usb2phy_port { * @dev: pointer to device. * @grf: General Register Files regmap. * @usbgrf: USB General Register Files regmap. - * @clk: clock struct of phy input clk. + * @clks: array of phy input clocks. * @clk480m: clock struct of phy output clk. * @clk480m_hw: clock struct of phy output clk management. + * @num_clks: number of phy input clocks. * @phy_reset: phy reset control. * @chg_state: states involved in USB charger detection. * @chg_type: USB charger types. @@ -246,9 +247,10 @@ struct rockchip_usb2phy { struct device *dev; struct regmap *grf; struct regmap *usbgrf; - struct clk *clk; + struct clk_bulk_data *clks; struct clk *clk480m; struct clk_hw clk480m_hw; + int num_clks; struct reset_control *phy_reset; enum usb_chg_state chg_state; enum power_supply_type chg_type; @@ -310,6 +312,13 @@ static int rockchip_usb2phy_reset(struct rockchip_usb2phy *rphy) return 0; } +static void rockchip_usb2phy_clk_bulk_disable(void *data) +{ + struct rockchip_usb2phy *rphy = data; + + clk_bulk_disable_unprepare(rphy->num_clks, rphy->clks); +} + static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw) { struct rockchip_usb2phy *rphy = @@ -376,7 +385,9 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy) { struct device_node *node = rphy->dev->of_node; struct clk_init_data init; + struct clk *refclk = NULL; const char *clk_name; + int i; int ret = 0; init.flags = 0; @@ -386,8 +397,15 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy) /* optional override of the clockname */ of_property_read_string(node, "clock-output-names", &init.name); - if (rphy->clk) { - clk_name = __clk_get_name(rphy->clk); + for (i = 0; i < rphy->num_clks; i++) { + if (!strncmp(rphy->clks[i].id, "phyclk", 6)) { + refclk = rphy->clks[i].clk; + break; + } + } + + if (!IS_ERR(refclk)) { + clk_name = __clk_get_name(refclk); init.parent_names = &clk_name; init.num_parents = 1; } else { @@ -418,30 +436,28 @@ err_ret: static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy) { - int ret; struct device_node *node = rphy->dev->of_node; struct extcon_dev *edev; + int ret; if (of_property_read_bool(node, "extcon")) { edev = extcon_get_edev_by_phandle(rphy->dev, 0); - if (IS_ERR(edev)) { - if (PTR_ERR(edev) != -EPROBE_DEFER) - dev_err(rphy->dev, "Invalid or missing extcon\n"); - return PTR_ERR(edev); - } + if (IS_ERR(edev)) + return dev_err_probe(rphy->dev, PTR_ERR(edev), + "invalid or missing extcon\n"); } else { /* Initialize extcon device */ edev = devm_extcon_dev_allocate(rphy->dev, rockchip_usb2phy_extcon_cable); if (IS_ERR(edev)) - return -ENOMEM; + return dev_err_probe(rphy->dev, PTR_ERR(edev), + "failed to allocate extcon device\n"); ret = devm_extcon_dev_register(rphy->dev, edev); - if (ret) { - dev_err(rphy->dev, "failed to register extcon device\n"); - return ret; - } + if (ret) + return dev_err_probe(rphy->dev, ret, + "failed to register extcon device\n"); } rphy->edev = edev; @@ -1327,7 +1343,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) struct rockchip_usb2phy *rphy; const struct rockchip_usb2phy_cfg *phy_cfgs; unsigned int reg; - int index, ret; + int index = 0, ret; rphy = devm_kzalloc(dev, sizeof(*rphy), GFP_KERNEL); if (!rphy) @@ -1339,9 +1355,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) dev_err(dev, "failed to locate usbgrf\n"); return PTR_ERR(rphy->grf); } - } - - else { + } else { rphy->grf = syscon_node_to_regmap(dev->parent->of_node); if (IS_ERR(rphy->grf)) return PTR_ERR(rphy->grf); @@ -1358,16 +1372,14 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) } if (of_property_read_u32_index(np, "reg", 0, ®)) { - dev_err(dev, "the reg property is not assigned in %pOFn node\n", - np); + dev_err(dev, "the reg property is not assigned in %pOFn node\n", np); return -EINVAL; } /* support address_cells=2 */ if (of_property_count_u32_elems(np, "reg") > 2 && reg == 0) { if (of_property_read_u32_index(np, "reg", 1, ®)) { - dev_err(dev, "the reg property is not assigned in %pOFn node\n", - np); + dev_err(dev, "the reg property is not assigned in %pOFn node\n", np); return -EINVAL; } } @@ -1386,8 +1398,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) if (ret) return ret; - /* find out a proper config which can be matched with dt. */ - index = 0; + /* find a proper config that can be matched with the DT */ do { if (phy_cfgs[index].reg == reg) { rphy->phy_cfg = &phy_cfgs[index]; @@ -1406,17 +1417,25 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) if (IS_ERR(rphy->phy_reset)) return PTR_ERR(rphy->phy_reset); - rphy->clk = devm_clk_get_optional_enabled(dev, "phyclk"); - if (IS_ERR(rphy->clk)) { - return dev_err_probe(&pdev->dev, PTR_ERR(rphy->clk), - "failed to get phyclk\n"); - } + ret = devm_clk_bulk_get_all(dev, &rphy->clks); + if (ret == -EPROBE_DEFER) + return dev_err_probe(&pdev->dev, -EPROBE_DEFER, + "failed to get phy clock\n"); + + /* Clocks are optional */ + rphy->num_clks = ret < 0 ? 0 : ret; ret = rockchip_usb2phy_clk480m_register(rphy); - if (ret) { - dev_err(dev, "failed to register 480m output clock\n"); + if (ret) + return dev_err_probe(dev, ret, "failed to register 480m output clock\n"); + + ret = clk_bulk_prepare_enable(rphy->num_clks, rphy->clks); + if (ret) + return dev_err_probe(dev, ret, "failed to enable phy clock\n"); + + ret = devm_add_action_or_reset(dev, rockchip_usb2phy_clk_bulk_disable, rphy); + if (ret) return ret; - } if (rphy->phy_cfg->phy_tuning) { ret = rphy->phy_cfg->phy_tuning(rphy); @@ -1436,8 +1455,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops); if (IS_ERR(phy)) { - dev_err_probe(dev, PTR_ERR(phy), "failed to create phy\n"); - ret = PTR_ERR(phy); + ret = dev_err_probe(dev, PTR_ERR(phy), "failed to create phy\n"); goto put_child; } @@ -1446,13 +1464,11 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) /* initialize otg/host port separately */ if (of_node_name_eq(child_np, "host-port")) { - ret = rockchip_usb2phy_host_port_init(rphy, rport, - child_np); + ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np); if (ret) goto put_child; } else { - ret = rockchip_usb2phy_otg_port_init(rphy, rport, - child_np); + ret = rockchip_usb2phy_otg_port_init(rphy, rport, child_np); if (ret) goto put_child; } @@ -1474,8 +1490,7 @@ next_child: "rockchip_usb2phy", rphy); if (ret) { - dev_err(rphy->dev, - "failed to request usb2phy irq handle\n"); + dev_err_probe(rphy->dev, ret, "failed to request usb2phy irq handle\n"); goto put_child; } } @@ -1495,6 +1510,30 @@ static int rk3128_usb2phy_tuning(struct rockchip_usb2phy *rphy) BIT(2) << BIT_WRITEABLE_SHIFT | 0); } +static int rk3576_usb2phy_tuning(struct rockchip_usb2phy *rphy) +{ + int ret; + u32 reg = rphy->phy_cfg->reg; + + /* Deassert SIDDQ to power on analog block */ + ret = regmap_write(rphy->grf, reg + 0x0010, GENMASK(29, 29) | 0x0000); + if (ret) + return ret; + + /* Do reset after exit IDDQ mode */ + ret = rockchip_usb2phy_reset(rphy); + if (ret) + return ret; + + /* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */ + ret |= regmap_write(rphy->grf, reg + 0x000c, GENMASK(27, 24) | 0x0900); + + /* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */ + ret |= regmap_write(rphy->grf, reg + 0x0010, GENMASK(20, 19) | 0x0010); + + return ret; +} + static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy) { int ret; @@ -1923,6 +1962,84 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = { { /* sentinel */ } }; +static const struct rockchip_usb2phy_cfg rk3576_phy_cfgs[] = { + { + .reg = 0x0, + .num_ports = 1, + .phy_tuning = rk3576_usb2phy_tuning, + .clkout_ctl = { 0x0008, 0, 0, 1, 0 }, + .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0x0000, 8, 0, 0, 0x1d1 }, + .bvalid_det_en = { 0x00c0, 1, 1, 0, 1 }, + .bvalid_det_st = { 0x00c4, 1, 1, 0, 1 }, + .bvalid_det_clr = { 0x00c8, 1, 1, 0, 1 }, + .ls_det_en = { 0x00c0, 0, 0, 0, 1 }, + .ls_det_st = { 0x00c4, 0, 0, 0, 1 }, + .ls_det_clr = { 0x00c8, 0, 0, 0, 1 }, + .disfall_en = { 0x00c0, 6, 6, 0, 1 }, + .disfall_st = { 0x00c4, 6, 6, 0, 1 }, + .disfall_clr = { 0x00c8, 6, 6, 0, 1 }, + .disrise_en = { 0x00c0, 5, 5, 0, 1 }, + .disrise_st = { 0x00c4, 5, 5, 0, 1 }, + .disrise_clr = { 0x00c8, 5, 5, 0, 1 }, + .utmi_avalid = { 0x0080, 1, 1, 0, 1 }, + .utmi_bvalid = { 0x0080, 0, 0, 0, 1 }, + .utmi_ls = { 0x0080, 5, 4, 0, 1 }, + } + }, + .chg_det = { + .cp_det = { 0x0080, 8, 8, 0, 1 }, + .dcp_det = { 0x0080, 8, 8, 0, 1 }, + .dp_det = { 0x0080, 9, 9, 1, 0 }, + .idm_sink_en = { 0x0010, 5, 5, 1, 0 }, + .idp_sink_en = { 0x0010, 5, 5, 0, 1 }, + .idp_src_en = { 0x0010, 14, 14, 0, 1 }, + .rdm_pdwn_en = { 0x0010, 14, 14, 0, 1 }, + .vdm_src_en = { 0x0010, 7, 6, 0, 3 }, + .vdp_src_en = { 0x0010, 7, 6, 0, 3 }, + }, + }, + { + .reg = 0x2000, + .num_ports = 1, + .phy_tuning = rk3576_usb2phy_tuning, + .clkout_ctl = { 0x2008, 0, 0, 1, 0 }, + .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0x2000, 8, 0, 0, 0x1d1 }, + .bvalid_det_en = { 0x20c0, 1, 1, 0, 1 }, + .bvalid_det_st = { 0x20c4, 1, 1, 0, 1 }, + .bvalid_det_clr = { 0x20c8, 1, 1, 0, 1 }, + .ls_det_en = { 0x20c0, 0, 0, 0, 1 }, + .ls_det_st = { 0x20c4, 0, 0, 0, 1 }, + .ls_det_clr = { 0x20c8, 0, 0, 0, 1 }, + .disfall_en = { 0x20c0, 6, 6, 0, 1 }, + .disfall_st = { 0x20c4, 6, 6, 0, 1 }, + .disfall_clr = { 0x20c8, 6, 6, 0, 1 }, + .disrise_en = { 0x20c0, 5, 5, 0, 1 }, + .disrise_st = { 0x20c4, 5, 5, 0, 1 }, + .disrise_clr = { 0x20c8, 5, 5, 0, 1 }, + .utmi_avalid = { 0x2080, 1, 1, 0, 1 }, + .utmi_bvalid = { 0x2080, 0, 0, 0, 1 }, + .utmi_ls = { 0x2080, 5, 4, 0, 1 }, + } + }, + .chg_det = { + .cp_det = { 0x2080, 8, 8, 0, 1 }, + .dcp_det = { 0x2080, 8, 8, 0, 1 }, + .dp_det = { 0x2080, 9, 9, 1, 0 }, + .idm_sink_en = { 0x2010, 5, 5, 1, 0 }, + .idp_sink_en = { 0x2010, 5, 5, 0, 1 }, + .idp_src_en = { 0x2010, 14, 14, 0, 1 }, + .rdm_pdwn_en = { 0x2010, 14, 14, 0, 1 }, + .vdm_src_en = { 0x2010, 7, 6, 0, 3 }, + .vdp_src_en = { 0x2010, 7, 6, 0, 3 }, + }, + }, + { /* sentinel */ } +}; + static const struct rockchip_usb2phy_cfg rk3588_phy_cfgs[] = { { .reg = 0x0000, @@ -2094,6 +2211,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = { { .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs }, { .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs }, { .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs }, + { .compatible = "rockchip,rk3576-usb2phy", .data = &rk3576_phy_cfgs }, { .compatible = "rockchip,rk3588-usb2phy", .data = &rk3588_phy_cfgs }, { .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs }, {} diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 9f084697dd05..ceab9c71d3b5 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -256,13 +256,10 @@ struct ropll_config { }; enum rk_hdptx_reset { - RST_PHY = 0, - RST_APB, + RST_APB = 0, RST_INIT, RST_CMN, RST_LANE, - RST_ROPLL, - RST_LCPLL, RST_MAX }; @@ -665,11 +662,6 @@ static void rk_hdptx_phy_disable(struct rk_hdptx_phy *hdptx) { u32 val; - /* reset phy and apb, or phy locked flag may keep 1 */ - reset_control_assert(hdptx->rsts[RST_PHY].rstc); - usleep_range(20, 30); - reset_control_deassert(hdptx->rsts[RST_PHY].rstc); - reset_control_assert(hdptx->rsts[RST_APB].rstc); usleep_range(20, 30); reset_control_deassert(hdptx->rsts[RST_APB].rstc); @@ -792,10 +784,6 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx, rk_hdptx_pre_power_up(hdptx); - reset_control_assert(hdptx->rsts[RST_ROPLL].rstc); - usleep_range(20, 30); - reset_control_deassert(hdptx->rsts[RST_ROPLL].rstc); - rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_cmn_init_seq); rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_cmn_init_seq); @@ -1098,13 +1086,10 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(hdptx->regmap), "Failed to init regmap\n"); - hdptx->rsts[RST_PHY].id = "phy"; hdptx->rsts[RST_APB].id = "apb"; hdptx->rsts[RST_INIT].id = "init"; hdptx->rsts[RST_CMN].id = "cmn"; hdptx->rsts[RST_LANE].id = "lane"; - hdptx->rsts[RST_ROPLL].id = "ropll"; - hdptx->rsts[RST_LCPLL].id = "lcpll"; ret = devm_reset_control_bulk_get_exclusive(dev, RST_MAX, hdptx->rsts); if (ret) diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index 4efcb78b0ab1..122ae0fdc785 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c @@ -1210,7 +1210,7 @@ MODULE_DEVICE_TABLE(of, rockchip_typec_phy_dt_ids); static struct platform_driver rockchip_typec_phy_driver = { .probe = rockchip_typec_phy_probe, - .remove_new = rockchip_typec_phy_remove, + .remove = rockchip_typec_phy_remove, .driver = { .name = "rockchip-typec-phy", .of_match_table = rockchip_typec_phy_dt_ids, diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c index 2c51e5c62d3e..5b1e8a3806ed 100644 --- a/drivers/phy/rockchip/phy-rockchip-usbdp.c +++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c @@ -1538,6 +1538,43 @@ static const char * const rk_udphy_rst_list[] = { "init", "cmn", "lane", "pcs_apb", "pma_apb" }; +static const struct rk_udphy_cfg rk3576_udphy_cfgs = { + .num_phys = 1, + .phy_ids = { 0x2b010000 }, + .num_rsts = ARRAY_SIZE(rk_udphy_rst_list), + .rst_list = rk_udphy_rst_list, + .grfcfg = { + /* u2phy-grf */ + .bvalid_phy_con = RK_UDPHY_GEN_GRF_REG(0x0010, 1, 0, 0x2, 0x3), + .bvalid_grf_con = RK_UDPHY_GEN_GRF_REG(0x0000, 15, 14, 0x1, 0x3), + + /* usb-grf */ + .usb3otg0_cfg = RK_UDPHY_GEN_GRF_REG(0x0030, 15, 0, 0x1100, 0x0188), + + /* usbdpphy-grf */ + .low_pwrn = RK_UDPHY_GEN_GRF_REG(0x0004, 13, 13, 0, 1), + .rx_lfps = RK_UDPHY_GEN_GRF_REG(0x0004, 14, 14, 0, 1), + }, + .vogrfcfg = { + { + .hpd_trigger = RK_UDPHY_GEN_GRF_REG(0x0000, 11, 10, 1, 3), + .dp_lane_reg = 0x0000, + }, + }, + .dp_tx_ctrl_cfg = { + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_hbr2, + rk3588_dp_tx_drv_ctrl_hbr3, + }, + .dp_tx_ctrl_cfg_typec = { + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_hbr2, + rk3588_dp_tx_drv_ctrl_hbr3, + }, +}; + static const struct rk_udphy_cfg rk3588_udphy_cfgs = { .num_phys = 2, .phy_ids = { @@ -1585,6 +1622,10 @@ static const struct rk_udphy_cfg rk3588_udphy_cfgs = { static const struct of_device_id rk_udphy_dt_match[] = { { + .compatible = "rockchip,rk3576-usbdp-phy", + .data = &rk3576_udphy_cfgs + }, + { .compatible = "rockchip,rk3588-usbdp-phy", .data = &rk3588_udphy_cfgs }, diff --git a/drivers/phy/st/Kconfig b/drivers/phy/st/Kconfig index 3fc3d0781fb8..304614b6dabf 100644 --- a/drivers/phy/st/Kconfig +++ b/drivers/phy/st/Kconfig @@ -33,6 +33,17 @@ config PHY_STIH407_USB Enable this support to enable the picoPHY device used by USB2 and USB3 controllers on STMicroelectronics STiH407 SoC families. +config PHY_STM32_COMBOPHY + tristate "STMicroelectronics COMBOPHY driver for STM32MP25" + depends on ARCH_STM32 || COMPILE_TEST + select GENERIC_PHY + help + Enable this to support the COMBOPHY device used by USB3 or PCIe + controllers on STMicroelectronics STM32MP25 SoC. + This driver controls the COMBOPHY block to generate the PCIe 100Mhz + reference clock from either the external clock generator or HSE + internal SoC clock source. + config PHY_STM32_USBPHYC tristate "STMicroelectronics STM32 USB HS PHY Controller driver" depends on ARCH_STM32 || COMPILE_TEST diff --git a/drivers/phy/st/Makefile b/drivers/phy/st/Makefile index c862dd937b64..cb80e954ea9f 100644 --- a/drivers/phy/st/Makefile +++ b/drivers/phy/st/Makefile @@ -3,4 +3,5 @@ obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o +obj-$(CONFIG_PHY_STM32_COMBOPHY) += phy-stm32-combophy.o obj-$(CONFIG_PHY_STM32_USBPHYC) += phy-stm32-usbphyc.o diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c new file mode 100644 index 000000000000..765bb34fe358 --- /dev/null +++ b/drivers/phy/st/phy-stm32-combophy.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * STMicroelectronics COMBOPHY STM32MP25 Controller driver. + * + * Copyright (C) 2024 STMicroelectronics + * Author: Christian Bruel <christian.bruel@foss.st.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <dt-bindings/phy/phy.h> + +#define SYSCFG_COMBOPHY_CR1 0x4c00 +#define SYSCFG_COMBOPHY_CR2 0x4c04 +#define SYSCFG_COMBOPHY_CR4 0x4c0c +#define SYSCFG_COMBOPHY_CR5 0x4c10 +#define SYSCFG_COMBOPHY_SR 0x4c14 +#define SYSCFG_PCIEPRGCR 0x6080 + +/* SYSCFG PCIEPRGCR */ +#define STM32MP25_PCIEPRGCR_EN BIT(0) +#define STM32MP25_PCIEPRG_IMPCTRL_OHM GENMASK(3, 1) +#define STM32MP25_PCIEPRG_IMPCTRL_VSWING GENMASK(5, 4) + +/* SYSCFG SYSCFG_COMBOPHY_SR */ +#define STM32MP25_PIPE0_PHYSTATUS BIT(1) + +/* SYSCFG CR1 */ +#define SYSCFG_COMBOPHY_CR1_REFUSEPAD BIT(0) +#define SYSCFG_COMBOPHY_CR1_MPLLMULT GENMASK(7, 1) +#define SYSCFG_COMBOPHY_CR1_REFCLKSEL GENMASK(16, 8) +#define SYSCFG_COMBOPHY_CR1_REFCLKDIV2 BIT(17) +#define SYSCFG_COMBOPHY_CR1_REFSSPEN BIT(18) +#define SYSCFG_COMBOPHY_CR1_SSCEN BIT(19) + +/* SYSCFG CR4 */ +#define SYSCFG_COMBOPHY_CR4_RX0_EQ GENMASK(2, 0) + +#define MPLLMULT_19_2 (0x02u << 1) +#define MPLLMULT_20 (0x7du << 1) +#define MPLLMULT_24 (0x68u << 1) +#define MPLLMULT_25 (0x64u << 1) +#define MPLLMULT_26 (0x60u << 1) +#define MPLLMULT_38_4 (0x41u << 1) +#define MPLLMULT_48 (0x6cu << 1) +#define MPLLMULT_50 (0x32u << 1) +#define MPLLMULT_52 (0x30u << 1) +#define MPLLMULT_100 (0x19u << 1) + +#define REFCLKSEL_0 0 +#define REFCLKSEL_1 (0x108u << 8) + +#define REFCLDIV_0 0 + +/* SYSCFG CR2 */ +#define SYSCFG_COMBOPHY_CR2_MODESEL GENMASK(1, 0) +#define SYSCFG_COMBOPHY_CR2_ISO_DIS BIT(15) + +#define COMBOPHY_MODESEL_PCIE 0 +#define COMBOPHY_MODESEL_USB 3 + +/* SYSCFG CR5 */ +#define SYSCFG_COMBOPHY_CR5_COMMON_CLOCKS BIT(12) + +#define COMBOPHY_SUP_ANA_MPLL_LOOP_CTL 0xc0 +#define COMBOPHY_PROP_CNTRL GENMASK(7, 4) + +/* Required apb/ker clocks first, optional pad last. */ +static const char * const combophy_clks[] = {"apb", "ker", "pad"}; +#define APB_CLK 0 +#define KER_CLK 1 +#define PAD_CLK 2 + +struct stm32_combophy { + struct phy *phy; + struct regmap *regmap; + struct device *dev; + void __iomem *base; + struct reset_control *phy_reset; + struct clk_bulk_data clks[ARRAY_SIZE(combophy_clks)]; + int num_clks; + bool have_pad_clk; + unsigned int type; + bool is_init; + int irq_wakeup; +}; + +struct clk_impedance { + u32 microohm; + u32 vswing[4]; +}; + +/* + * lookup table to hold the settings needed for a ref clock frequency + * impedance, the offset is used to set the IMP_CTL and DE_EMP bit of the + * PRG_IMP_CTRL register. Use ordered discrete values in the table + */ +static const struct clk_impedance imp_lookup[] = { + { 6090000, { 442000, 564000, 684000, 802000 } }, + { 5662000, { 528000, 621000, 712000, 803000 } }, + { 5292000, { 491000, 596000, 700000, 802000 } }, + { 4968000, { 558000, 640000, 722000, 803000 } }, + { 4684000, { 468000, 581000, 692000, 802000 } }, + { 4429000, { 554000, 613000, 717000, 803000 } }, + { 4204000, { 511000, 609000, 706000, 802000 } }, + { 3999000, { 571000, 648000, 726000, 803000 } } +}; + +static int stm32_impedance_tune(struct stm32_combophy *combophy) +{ + u8 imp_size = ARRAY_SIZE(imp_lookup); + u8 vswing_size = ARRAY_SIZE(imp_lookup[0].vswing); + u8 imp_of, vswing_of; + u32 max_imp = imp_lookup[0].microohm; + u32 min_imp = imp_lookup[imp_size - 1].microohm; + u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1]; + u32 min_vswing = imp_lookup[0].vswing[0]; + u32 val; + + if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) { + if (val < min_imp || val > max_imp) { + dev_err(combophy->dev, "Invalid value %u for output ohm\n", val); + return -EINVAL; + } + + for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) + if (imp_lookup[imp_of].microohm <= val) + break; + + dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n", + imp_lookup[imp_of].microohm); + + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRG_IMPCTRL_OHM, + FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of)); + } else { + regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val); + imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val); + } + + if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) { + if (val < min_vswing || val > max_vswing) { + dev_err(combophy->dev, "Invalid value %u for output vswing\n", val); + return -EINVAL; + } + + for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) + if (imp_lookup[imp_of].vswing[vswing_of] >= val) + break; + + dev_dbg(combophy->dev, "Set %u microvolt swing\n", + imp_lookup[imp_of].vswing[vswing_of]); + + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRG_IMPCTRL_VSWING, + FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of)); + } + + return 0; +} + +static int stm32_combophy_pll_init(struct stm32_combophy *combophy) +{ + int ret; + u32 refclksel, pllmult, propcntrl, val; + u32 clk_rate; + struct clk *clk; + u32 cr1_val = 0, cr1_mask = 0; + + if (combophy->have_pad_clk) + clk = combophy->clks[PAD_CLK].clk; + else + clk = combophy->clks[KER_CLK].clk; + + clk_rate = clk_get_rate(clk); + + dev_dbg(combophy->dev, "%s pll init rate %d\n", + combophy->have_pad_clk ? "External" : "Ker", clk_rate); + + if (combophy->type != PHY_TYPE_PCIE) { + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFSSPEN; + cr1_val |= SYSCFG_COMBOPHY_CR1_REFSSPEN; + } + + if (of_property_present(combophy->dev->of_node, "st,ssc-on")) { + dev_dbg(combophy->dev, "Enabling clock with SSC\n"); + cr1_mask |= SYSCFG_COMBOPHY_CR1_SSCEN; + cr1_val |= SYSCFG_COMBOPHY_CR1_SSCEN; + } + + switch (clk_rate) { + case 100000000: + pllmult = MPLLMULT_100; + refclksel = REFCLKSEL_0; + propcntrl = 0x8u << 4; + break; + case 19200000: + pllmult = MPLLMULT_19_2; + refclksel = REFCLKSEL_1; + propcntrl = 0x8u << 4; + break; + case 25000000: + pllmult = MPLLMULT_25; + refclksel = REFCLKSEL_0; + propcntrl = 0xeu << 4; + break; + case 24000000: + pllmult = MPLLMULT_24; + refclksel = REFCLKSEL_1; + propcntrl = 0xeu << 4; + break; + case 20000000: + pllmult = MPLLMULT_20; + refclksel = REFCLKSEL_0; + propcntrl = 0xeu << 4; + break; + default: + dev_err(combophy->dev, "Invalid rate 0x%x\n", clk_rate); + return -EINVAL; + } + + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFCLKDIV2; + cr1_val |= REFCLDIV_0; + + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFCLKSEL; + cr1_val |= refclksel; + + cr1_mask |= SYSCFG_COMBOPHY_CR1_MPLLMULT; + cr1_val |= pllmult; + + /* + * vddcombophy is interconnected with vddcore. Isolation bit should be unset + * before using the ComboPHY. + */ + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_ISO_DIS, SYSCFG_COMBOPHY_CR2_ISO_DIS); + + reset_control_assert(combophy->phy_reset); + + if (combophy->type == PHY_TYPE_PCIE) { + ret = stm32_impedance_tune(combophy); + if (ret) + goto out_iso; + + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFUSEPAD; + cr1_val |= combophy->have_pad_clk ? SYSCFG_COMBOPHY_CR1_REFUSEPAD : 0; + } + + if (!of_property_read_u32(combophy->dev->of_node, "st,rx-equalizer", &val)) { + dev_dbg(combophy->dev, "Set RX equalizer %u\n", val); + if (val > SYSCFG_COMBOPHY_CR4_RX0_EQ) { + dev_err(combophy->dev, "Invalid value %u for rx0 equalizer\n", val); + ret = -EINVAL; + goto out_iso; + } + + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR4, + SYSCFG_COMBOPHY_CR4_RX0_EQ, val); + } + + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR1, cr1_mask, cr1_val); + + /* + * Force elasticity buffer to be tuned for the reference clock as + * the separated clock model is not supported + */ + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR5, + SYSCFG_COMBOPHY_CR5_COMMON_CLOCKS, SYSCFG_COMBOPHY_CR5_COMMON_CLOCKS); + + reset_control_deassert(combophy->phy_reset); + + ret = regmap_read_poll_timeout(combophy->regmap, SYSCFG_COMBOPHY_SR, val, + !(val & STM32MP25_PIPE0_PHYSTATUS), + 10, 1000); + if (ret) { + dev_err(combophy->dev, "timeout, cannot lock PLL\n"); + if (combophy->type == PHY_TYPE_PCIE && !combophy->have_pad_clk) + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRGCR_EN, 0); + + if (combophy->type != PHY_TYPE_PCIE) + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR1, + SYSCFG_COMBOPHY_CR1_REFSSPEN, 0); + + goto out; + } + + + if (combophy->type == PHY_TYPE_PCIE) { + if (!combophy->have_pad_clk) + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRGCR_EN, STM32MP25_PCIEPRGCR_EN); + + val = readl_relaxed(combophy->base + COMBOPHY_SUP_ANA_MPLL_LOOP_CTL); + val &= ~COMBOPHY_PROP_CNTRL; + val |= propcntrl; + writel_relaxed(val, combophy->base + COMBOPHY_SUP_ANA_MPLL_LOOP_CTL); + } + + return 0; + +out_iso: + reset_control_deassert(combophy->phy_reset); + +out: + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_ISO_DIS, 0); + + return ret; +} + +static struct phy *stm32_combophy_xlate(struct device *dev, + const struct of_phandle_args *args) +{ + struct stm32_combophy *combophy = dev_get_drvdata(dev); + unsigned int type; + + if (args->args_count != 1) { + dev_err(dev, "invalid number of cells in 'phy' property\n"); + return ERR_PTR(-EINVAL); + } + + type = args->args[0]; + if (type != PHY_TYPE_USB3 && type != PHY_TYPE_PCIE) { + dev_err(dev, "unsupported device type: %d\n", type); + return ERR_PTR(-EINVAL); + } + + if (combophy->have_pad_clk && type != PHY_TYPE_PCIE) { + dev_err(dev, "Invalid use of clk_pad for USB3 mode\n"); + return ERR_PTR(-EINVAL); + } + + combophy->type = type; + + return combophy->phy; +} + +static int stm32_combophy_set_mode(struct stm32_combophy *combophy) +{ + int type = combophy->type; + u32 val; + + switch (type) { + case PHY_TYPE_PCIE: + dev_dbg(combophy->dev, "setting PCIe ComboPHY\n"); + val = COMBOPHY_MODESEL_PCIE; + break; + case PHY_TYPE_USB3: + dev_dbg(combophy->dev, "setting USB3 ComboPHY\n"); + val = COMBOPHY_MODESEL_USB; + break; + default: + dev_err(combophy->dev, "Invalid PHY mode %d\n", type); + return -EINVAL; + } + + return regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_MODESEL, val); +} + +static int stm32_combophy_suspend_noirq(struct device *dev) +{ + struct stm32_combophy *combophy = dev_get_drvdata(dev); + + /* + * Clocks should be turned off since it is not needed for + * wakeup capability. In case usb-remote wakeup is not enabled, + * combo-phy is already turned off by HCD driver using exit callback + */ + if (combophy->is_init) { + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + + /* since wakeup is enabled for ctrl */ + enable_irq_wake(combophy->irq_wakeup); + } + + return 0; +} + +static int stm32_combophy_resume_noirq(struct device *dev) +{ + struct stm32_combophy *combophy = dev_get_drvdata(dev); + int ret; + + /* + * If clocks was turned off by suspend call for wakeup then needs + * to be turned back ON in resume. In case usb-remote wakeup is not + * enabled, clocks already turned ON by HCD driver using init callback + */ + if (combophy->is_init) { + /* since wakeup was enabled for ctrl */ + disable_irq_wake(combophy->irq_wakeup); + + ret = clk_bulk_prepare_enable(combophy->num_clks, combophy->clks); + if (ret) { + dev_err(dev, "can't enable clocks (%d)\n", ret); + return ret; + } + } + + return 0; +} + +static int stm32_combophy_exit(struct phy *phy) +{ + struct stm32_combophy *combophy = phy_get_drvdata(phy); + struct device *dev = combophy->dev; + + combophy->is_init = false; + + if (combophy->type == PHY_TYPE_PCIE && !combophy->have_pad_clk) + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRGCR_EN, 0); + + if (combophy->type != PHY_TYPE_PCIE) + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR1, + SYSCFG_COMBOPHY_CR1_REFSSPEN, 0); + + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_ISO_DIS, 0); + + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + + pm_runtime_put_noidle(dev); + + return 0; +} + +static int stm32_combophy_init(struct phy *phy) +{ + struct stm32_combophy *combophy = phy_get_drvdata(phy); + struct device *dev = combophy->dev; + int ret; + + pm_runtime_get_noresume(dev); + + ret = clk_bulk_prepare_enable(combophy->num_clks, combophy->clks); + if (ret) { + dev_err(dev, "can't enable clocks (%d)\n", ret); + pm_runtime_put_noidle(dev); + return ret; + } + + ret = stm32_combophy_set_mode(combophy); + if (ret) { + dev_err(dev, "combophy mode not set\n"); + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + pm_runtime_put_noidle(dev); + return ret; + } + + ret = stm32_combophy_pll_init(combophy); + if (ret) { + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + pm_runtime_put_noidle(dev); + return ret; + } + + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + combophy->is_init = true; + + return ret; +} + +static const struct phy_ops stm32_combophy_phy_data = { + .init = stm32_combophy_init, + .exit = stm32_combophy_exit, + .owner = THIS_MODULE +}; + +static irqreturn_t stm32_combophy_irq_wakeup_handler(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static int stm32_combophy_get_clocks(struct stm32_combophy *combophy) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(combophy_clks); i++) + combophy->clks[i].id = combophy_clks[i]; + + combophy->num_clks = ARRAY_SIZE(combophy_clks) - 1; + + ret = devm_clk_bulk_get(combophy->dev, combophy->num_clks, combophy->clks); + if (ret) + return ret; + + ret = devm_clk_bulk_get_optional(combophy->dev, 1, combophy->clks + combophy->num_clks); + if (ret) + return ret; + + if (combophy->clks[combophy->num_clks].clk != NULL) { + combophy->have_pad_clk = true; + combophy->num_clks++; + } + + return 0; +} + +static int stm32_combophy_probe(struct platform_device *pdev) +{ + struct stm32_combophy *combophy; + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + int ret, irq; + + combophy = devm_kzalloc(dev, sizeof(*combophy), GFP_KERNEL); + if (!combophy) + return -ENOMEM; + + combophy->dev = dev; + + dev_set_drvdata(dev, combophy); + + combophy->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(combophy->base)) + return PTR_ERR(combophy->base); + + ret = stm32_combophy_get_clocks(combophy); + if (ret) + return ret; + + combophy->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(combophy->phy_reset)) + return dev_err_probe(dev, PTR_ERR(combophy->phy_reset), + "Failed to get PHY reset\n"); + + combophy->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg"); + if (IS_ERR(combophy->regmap)) + return dev_err_probe(dev, PTR_ERR(combophy->regmap), + "No syscfg specified\n"); + + combophy->phy = devm_phy_create(dev, NULL, &stm32_combophy_phy_data); + if (IS_ERR(combophy->phy)) + return dev_err_probe(dev, PTR_ERR(combophy->phy), + "failed to create PCIe/USB3 ComboPHY\n"); + + if (device_property_read_bool(dev, "wakeup-source")) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "failed to get IRQ\n"); + combophy->irq_wakeup = irq; + + ret = devm_request_threaded_irq(dev, combophy->irq_wakeup, NULL, + stm32_combophy_irq_wakeup_handler, IRQF_ONESHOT, + NULL, NULL); + if (ret) + return dev_err_probe(dev, ret, "unable to request wake IRQ %d\n", + combophy->irq_wakeup); + } + + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable pm runtime\n"); + + phy_set_drvdata(combophy->phy, combophy); + + phy_provider = devm_of_phy_provider_register(dev, stm32_combophy_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct dev_pm_ops stm32_combophy_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_combophy_suspend_noirq, + stm32_combophy_resume_noirq) +}; + +static const struct of_device_id stm32_combophy_of_match[] = { + { .compatible = "st,stm32mp25-combophy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, stm32_combophy_of_match); + +static struct platform_driver stm32_combophy_driver = { + .probe = stm32_combophy_probe, + .driver = { + .name = "stm32-combophy", + .of_match_table = stm32_combophy_of_match, + .pm = pm_sleep_ptr(&stm32_combophy_pm_ops) + } +}; + +module_platform_driver(stm32_combophy_driver); + +MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>"); +MODULE_DESCRIPTION("STM32MP25 Combophy USB3/PCIe controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 9dbe60dcf319..b917cd413de7 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -812,7 +812,7 @@ MODULE_DEVICE_TABLE(of, stm32_usbphyc_of_match); static struct platform_driver stm32_usbphyc_driver = { .probe = stm32_usbphyc_probe, - .remove_new = stm32_usbphyc_remove, + .remove = stm32_usbphyc_remove, .driver = { .of_match_table = stm32_usbphyc_of_match, .name = "stm32-usbphyc", diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 342f5ccf611d..f9b862c1c438 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1327,7 +1327,7 @@ static struct platform_driver tegra_xusb_padctl_driver = { .pm = &tegra_xusb_padctl_pm_ops, }, .probe = tegra_xusb_padctl_probe, - .remove_new = tegra_xusb_padctl_remove, + .remove = tegra_xusb_padctl_remove, }; module_platform_driver(tegra_xusb_padctl_driver); diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 3bf3aff4b1c7..431b223996e0 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -837,7 +837,7 @@ static void serdes_am654_remove(struct platform_device *pdev) static struct platform_driver serdes_am654_driver = { .probe = serdes_am654_probe, - .remove_new = serdes_am654_remove, + .remove = serdes_am654_remove, .driver = { .name = "phy-am654", .of_match_table = serdes_am654_id_table, diff --git a/drivers/phy/ti/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c index 68aa595b6ad8..1d81a1e6ec6b 100644 --- a/drivers/phy/ti/phy-da8xx-usb.c +++ b/drivers/phy/ti/phy-da8xx-usb.c @@ -277,11 +277,11 @@ MODULE_DEVICE_TABLE(of, da8xx_usb_phy_ids); static struct platform_driver da8xx_usb_phy_driver = { .probe = da8xx_usb_phy_probe, - .remove_new = da8xx_usb_phy_remove, + .remove = da8xx_usb_phy_remove, .driver = { .name = "da8xx-usb-phy", .pm = &da8xx_usb_phy_pm_ops, - .of_match_table = da8xx_usb_phy_ids, + .of_match_table = da8xx_usb_phy_ids, }, }; diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c index d5ae972a31fb..e8f842d4e841 100644 --- a/drivers/phy/ti/phy-dm816x-usb.c +++ b/drivers/phy/ti/phy-dm816x-usb.c @@ -259,7 +259,7 @@ static void dm816x_usb_phy_remove(struct platform_device *pdev) static struct platform_driver dm816x_usb_phy_driver = { .probe = dm816x_usb_phy_probe, - .remove_new = dm816x_usb_phy_remove, + .remove = dm816x_usb_phy_remove, .driver = { .name = "dm816x-usb-phy", .pm = &dm816x_usb_phy_pm_ops, diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 103b266fec77..e0ca59ae3153 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -230,7 +230,8 @@ static const struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = { .use_of_data = true, .regfields = phy_gmii_sel_fields_am654, - .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), + .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | + BIT(PHY_INTERFACE_MODE_USXGMII), .num_ports = 4, .num_qsgmii_main_ports = 1, }; diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index c6e846d385d2..ab2a4f2c0a5b 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -1685,7 +1685,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(wiz_pm_ops, NULL, wiz_resume_noirq); static struct platform_driver wiz_driver = { .probe = wiz_probe, - .remove_new = wiz_remove, + .remove = wiz_remove, .driver = { .name = "wiz", .of_match_table = wiz_id_table, diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index 78e19b128962..c1a0ef979142 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -511,7 +511,7 @@ static void omap_usb2_remove(struct platform_device *pdev) static struct platform_driver omap_usb2_driver = { .probe = omap_usb2_probe, - .remove_new = omap_usb2_remove, + .remove = omap_usb2_remove, .driver = { .name = "omap-usb2", .of_match_table = omap_usb2_id_table, diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c index 874c1a25ce36..da2cbacb982c 100644 --- a/drivers/phy/ti/phy-ti-pipe3.c +++ b/drivers/phy/ti/phy-ti-pipe3.c @@ -920,7 +920,7 @@ MODULE_DEVICE_TABLE(of, ti_pipe3_id_table); static struct platform_driver ti_pipe3_driver = { .probe = ti_pipe3_probe, - .remove_new = ti_pipe3_remove, + .remove = ti_pipe3_remove, .driver = { .name = "ti-pipe3", .of_match_table = ti_pipe3_id_table, diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index 6b265992d988..6f12b38cd894 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -834,7 +834,7 @@ MODULE_DEVICE_TABLE(of, twl4030_usb_id_table); static struct platform_driver twl4030_usb_driver = { .probe = twl4030_usb_probe, - .remove_new = twl4030_usb_remove, + .remove = twl4030_usb_remove, .driver = { .name = "twl4030_usb", .pm = &twl4030_usb_pm_ops, diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index e6579002f114..05a4a59f7c40 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -1071,7 +1071,7 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match); static struct platform_driver xpsgtr_driver = { .probe = xpsgtr_probe, - .remove_new = xpsgtr_remove, + .remove = xpsgtr_remove, .driver = { .name = "xilinx-psgtr", .of_match_table = xpsgtr_of_match, diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 354536de564b..95a8e2b9a614 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -268,6 +268,17 @@ config PINCTRL_K210 Add support for the Canaan Kendryte K210 RISC-V SOC Field Programmable IO Array (FPIOA) controller. +config PINCTRL_K230 + bool "Pinctrl driver for the Canaan Kendryte K230 SoC" + depends on OF + depends on ARCH_CANAAN || COMPILE_TEST + select GENERIC_PINCTRL_GROUPS + select GENERIC_PINMUX_FUNCTIONS + select GENERIC_PINCONF + select REGMAP_MMIO + help + Add support for the Canaan Kendryte K230 RISC-V SOC pin controller. + config PINCTRL_KEEMBAY tristate "Pinctrl driver for Intel Keem Bay SoC" depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST) @@ -551,6 +562,20 @@ config PINCTRL_TPS6594 This driver can also be built as a module called tps6594-pinctrl. +config PINCTRL_TH1520 + tristate "Pinctrl driver for the T-Head TH1520 SoC" + depends on ARCH_THEAD || COMPILE_TEST + depends on OF + select GENERIC_PINMUX_FUNCTIONS + select GENERIC_PINCONF + select PINMUX + help + This is the driver for the pin controller blocks on the + T-Head TH1520 SoC. + + This driver is needed for RISC-V development boards like + the BeagleV Ahead and the LicheePi 4A. + config PINCTRL_ZYNQ bool "Pinctrl driver for Xilinx Zynq" depends on ARCH_ZYNQ @@ -606,6 +631,7 @@ source "drivers/pinctrl/realtek/Kconfig" source "drivers/pinctrl/renesas/Kconfig" source "drivers/pinctrl/samsung/Kconfig" source "drivers/pinctrl/sophgo/Kconfig" +source "drivers/pinctrl/spacemit/Kconfig" source "drivers/pinctrl/spear/Kconfig" source "drivers/pinctrl/sprd/Kconfig" source "drivers/pinctrl/starfive/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 97823f52b972..fba1c56624c0 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_EYEQ5) += pinctrl-eyeq5.o obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o +obj-$(CONFIG_PINCTRL_K230) += pinctrl-k230.o obj-$(CONFIG_PINCTRL_KEEMBAY) += pinctrl-keembay.o obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o @@ -54,6 +55,7 @@ obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_TPS6594) += pinctrl-tps6594.o +obj-$(CONFIG_PINCTRL_TH1520) += pinctrl-th1520.o obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o @@ -76,6 +78,7 @@ obj-$(CONFIG_ARCH_REALTEK) += realtek/ obj-$(CONFIG_PINCTRL_RENESAS) += renesas/ obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/ obj-y += sophgo/ +obj-y += spacemit/ obj-$(CONFIG_PINCTRL_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_SOC_STARFIVE) += starfive/ diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index 6ecc656abc44..5a7cd0a88687 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -2607,6 +2607,10 @@ static struct aspeed_pin_config aspeed_g6_configs[] = { { PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)}, /* LAD0 */ { PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)}, + /* GPIOF */ + { PIN_CONFIG_DRIVE_STRENGTH, { D22, A23 }, SCU458, GENMASK(9, 8)}, + /* GPIOG */ + { PIN_CONFIG_DRIVE_STRENGTH, { E21, B21 }, SCU458, GENMASK(11, 10)}, /* MAC3 */ { PIN_CONFIG_POWER_SOURCE, { H24, E26 }, SCU458, BIT_MASK(4)}, diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index 2932d7aba725..73ec5b9beb49 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -1091,7 +1091,7 @@ static void madera_pin_remove(struct platform_device *pdev) static struct platform_driver madera_pin_driver = { .probe = madera_pin_probe, - .remove_new = madera_pin_remove, + .remove = madera_pin_remove, .driver = { .name = "madera-pinctrl", }, diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 4061890a1748..b3eec63c00ba 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -220,6 +220,9 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev, /* Set owner */ pindesc->pctldev = pctldev; +#ifdef CONFIG_PINMUX + mutex_init(&pindesc->mux_lock); +#endif /* Copy basic pin info */ if (pin->name) { diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 4e07707d2435..d6c24978e708 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -177,6 +177,7 @@ struct pin_desc { const char *mux_owner; const struct pinctrl_setting_mux *mux_setting; const char *gpio_owner; + struct mutex mux_lock; #endif }; diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig index 3b59d7189004..4c420b21b804 100644 --- a/drivers/pinctrl/freescale/Kconfig +++ b/drivers/pinctrl/freescale/Kconfig @@ -9,7 +9,7 @@ config PINCTRL_IMX config PINCTRL_IMX_SCMI tristate "i.MX95 pinctrl driver using SCMI protocol interface" - depends on ARM_SCMI_PROTOCOL && OF || COMPILE_TEST + depends on ARM_SCMI_PROTOCOL && OF select PINMUX select GENERIC_PINCONF select GENERIC_PINCTRL_GROUPS @@ -20,7 +20,7 @@ config PINCTRL_IMX_SCMI config PINCTRL_IMX_SCU tristate - depends on IMX_SCU + depends on IMX_SCU || COMPILE_TEST select PINCTRL_IMX config PINCTRL_IMX1_CORE @@ -30,14 +30,18 @@ config PINCTRL_IMX1_CORE config PINCTRL_IMX1 bool "IMX1 pinctrl driver" - depends on SOC_IMX1 + depends on OF + depends on SOC_IMX1 || COMPILE_TEST + default SOC_IMX1 select PINCTRL_IMX1_CORE help Say Y here to enable the imx1 pinctrl driver config PINCTRL_IMX27 bool "IMX27 pinctrl driver" - depends on SOC_IMX27 + depends on OF + depends on SOC_IMX27 || COMPILE_TEST + default SOC_IMX27 select PINCTRL_IMX1_CORE help Say Y here to enable the imx27 pinctrl driver @@ -46,84 +50,107 @@ config PINCTRL_IMX27 config PINCTRL_IMX25 bool "IMX25 pinctrl driver" depends on OF - depends on SOC_IMX25 + depends on SOC_IMX25 || COMPILE_TEST + default SOC_IMX25 select PINCTRL_IMX help Say Y here to enable the imx25 pinctrl driver config PINCTRL_IMX35 bool "IMX35 pinctrl driver" - depends on SOC_IMX35 + depends on OF + depends on SOC_IMX35 || COMPILE_TEST + default SOC_IMX35 select PINCTRL_IMX help Say Y here to enable the imx35 pinctrl driver config PINCTRL_IMX50 bool "IMX50 pinctrl driver" - depends on SOC_IMX50 + depends on OF + depends on SOC_IMX50 || COMPILE_TEST + default SOC_IMX50 select PINCTRL_IMX help Say Y here to enable the imx50 pinctrl driver config PINCTRL_IMX51 bool "IMX51 pinctrl driver" - depends on SOC_IMX51 + depends on OF + depends on SOC_IMX51 || COMPILE_TEST + default SOC_IMX51 select PINCTRL_IMX help Say Y here to enable the imx51 pinctrl driver config PINCTRL_IMX53 bool "IMX53 pinctrl driver" - depends on SOC_IMX53 + depends on OF + depends on SOC_IMX53 || COMPILE_TEST + default SOC_IMX53 select PINCTRL_IMX help Say Y here to enable the imx53 pinctrl driver config PINCTRL_IMX6Q bool "IMX6Q/DL pinctrl driver" - depends on SOC_IMX6Q + depends on OF + depends on SOC_IMX6Q || COMPILE_TEST + default SOC_IMX6Q select PINCTRL_IMX help Say Y here to enable the imx6q/dl pinctrl driver config PINCTRL_IMX6SL bool "IMX6SL pinctrl driver" - depends on SOC_IMX6SL + depends on OF + depends on SOC_IMX6SL || COMPILE_TEST + default SOC_IMX6SL select PINCTRL_IMX help Say Y here to enable the imx6sl pinctrl driver config PINCTRL_IMX6SLL bool "IMX6SLL pinctrl driver" - depends on SOC_IMX6SLL + depends on OF + depends on SOC_IMX6SLL || COMPILE_TEST + default SOC_IMX6SLL select PINCTRL_IMX help Say Y here to enable the imx6sll pinctrl driver config PINCTRL_IMX6SX bool "IMX6SX pinctrl driver" - depends on SOC_IMX6SX + depends on OF + depends on SOC_IMX6SX || COMPILE_TEST + default SOC_IMX6SX select PINCTRL_IMX help Say Y here to enable the imx6sx pinctrl driver config PINCTRL_IMX6UL bool "IMX6UL pinctrl driver" - depends on SOC_IMX6UL + depends on OF + depends on SOC_IMX6UL || COMPILE_TEST + default SOC_IMX6UL select PINCTRL_IMX help Say Y here to enable the imx6ul pinctrl driver config PINCTRL_IMX7D bool "IMX7D pinctrl driver" - depends on SOC_IMX7D + depends on OF + depends on SOC_IMX7D || COMPILE_TEST + default SOC_IMX7D select PINCTRL_IMX help Say Y here to enable the imx7d pinctrl driver config PINCTRL_IMX7ULP bool "IMX7ULP pinctrl driver" - depends on SOC_IMX7ULP + depends on OF + depends on SOC_IMX7ULP || COMPILE_TEST + default SOC_IMX7ULP select PINCTRL_IMX help Say Y here to enable the imx7ulp pinctrl driver @@ -131,7 +158,7 @@ config PINCTRL_IMX7ULP config PINCTRL_IMX8MM tristate "IMX8MM pinctrl driver" depends on OF - depends on SOC_IMX8M + depends on SOC_IMX8M || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imx8mm pinctrl driver @@ -139,7 +166,7 @@ config PINCTRL_IMX8MM config PINCTRL_IMX8MN tristate "IMX8MN pinctrl driver" depends on OF - depends on SOC_IMX8M + depends on SOC_IMX8M || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imx8mn pinctrl driver @@ -147,7 +174,7 @@ config PINCTRL_IMX8MN config PINCTRL_IMX8MP tristate "IMX8MP pinctrl driver" depends on OF - depends on SOC_IMX8M + depends on SOC_IMX8M || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imx8mp pinctrl driver @@ -155,42 +182,48 @@ config PINCTRL_IMX8MP config PINCTRL_IMX8MQ tristate "IMX8MQ pinctrl driver" depends on OF - depends on SOC_IMX8M + depends on SOC_IMX8M || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imx8mq pinctrl driver config PINCTRL_IMX8QM tristate "IMX8QM pinctrl driver" - depends on IMX_SCU && ARCH_MXC && ARM64 + depends on OF + depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST select PINCTRL_IMX_SCU help Say Y here to enable the imx8qm pinctrl driver config PINCTRL_IMX8QXP tristate "IMX8QXP pinctrl driver" - depends on IMX_SCU && ARCH_MXC && ARM64 + depends on OF + depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST select PINCTRL_IMX_SCU help Say Y here to enable the imx8qxp pinctrl driver config PINCTRL_IMX8DXL tristate "IMX8DXL pinctrl driver" - depends on IMX_SCU && ARCH_MXC && ARM64 + depends on OF + depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST select PINCTRL_IMX_SCU help Say Y here to enable the imx8dxl pinctrl driver config PINCTRL_IMX8ULP tristate "IMX8ULP pinctrl driver" - depends on ARCH_MXC + depends on OF + depends on ARCH_MXC || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imx8ulp pinctrl driver config PINCTRL_IMXRT1050 bool "IMXRT1050 pinctrl driver" - depends on ARCH_MXC + depends on OF + depends on SOC_IMXRT || COMPILE_TEST + default SOC_IMXRT select PINCTRL_IMX help Say Y here to enable the imxrt1050 pinctrl driver @@ -204,14 +237,17 @@ config PINCTRL_IMX91 config PINCTRL_IMX93 tristate "IMX93 pinctrl driver" - depends on ARCH_MXC + depends on OF + depends on ARCH_MXC || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imx93 pinctrl driver config PINCTRL_VF610 bool "Freescale Vybrid VF610 pinctrl driver" - depends on SOC_VF610 + depends on OF + depends on SOC_VF610 || COMPILE_TEST + default SOC_VF610 select PINCTRL_IMX help Say Y here to enable the Freescale Vybrid VF610 pinctrl driver @@ -231,7 +267,8 @@ config PINCTRL_IMX28 config PINCTRL_IMXRT1170 bool "IMXRT1170 pinctrl driver" - depends on ARCH_MXC + depends on OF + depends on SOC_IMXRT || COMPILE_TEST select PINCTRL_IMX help Say Y here to enable the imxrt1170 pinctrl driver diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index d05c2c478e79..842a1e6cbfc4 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -633,11 +633,11 @@ static int imx_pinctrl_parse_functions(struct device_node *np, static bool imx_pinctrl_dt_is_flat_functions(struct device_node *np) { for_each_child_of_node_scoped(np, function_np) { - if (of_property_read_bool(function_np, "fsl,pins")) + if (of_property_present(function_np, "fsl,pins")) return true; for_each_child_of_node_scoped(function_np, pinctrl_np) { - if (of_property_read_bool(pinctrl_np, "fsl,pins")) + if (of_property_present(pinctrl_np, "fsl,pins")) return false; } } @@ -746,7 +746,7 @@ int imx_pinctrl_probe(struct platform_device *pdev, if (IS_ERR(ipctl->base)) return PTR_ERR(ipctl->base); - if (of_property_read_bool(dev_np, "fsl,input-sel")) { + if (of_property_present(dev_np, "fsl,input-sel")) { np = of_parse_phandle(dev_np, "fsl,input-sel", 0); if (!np) { dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c index 1e2b0fe9ffd6..bd39cadf1f34 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1.c @@ -12,122 +12,122 @@ #include "pinctrl-imx1.h" #define PAD_ID(port, pin) ((port) * 32 + (pin)) -#define PA 0 -#define PB 1 -#define PC 2 -#define PD 3 +#define IMX1_PA 0 +#define IMX1_PB 1 +#define IMX1_PC 2 +#define IMX1_PD 3 enum imx1_pads { - MX1_PAD_A24 = PAD_ID(PA, 0), - MX1_PAD_TIN = PAD_ID(PA, 1), - MX1_PAD_PWMO = PAD_ID(PA, 2), - MX1_PAD_CSI_MCLK = PAD_ID(PA, 3), - MX1_PAD_CSI_D0 = PAD_ID(PA, 4), - MX1_PAD_CSI_D1 = PAD_ID(PA, 5), - MX1_PAD_CSI_D2 = PAD_ID(PA, 6), - MX1_PAD_CSI_D3 = PAD_ID(PA, 7), - MX1_PAD_CSI_D4 = PAD_ID(PA, 8), - MX1_PAD_CSI_D5 = PAD_ID(PA, 9), - MX1_PAD_CSI_D6 = PAD_ID(PA, 10), - MX1_PAD_CSI_D7 = PAD_ID(PA, 11), - MX1_PAD_CSI_VSYNC = PAD_ID(PA, 12), - MX1_PAD_CSI_HSYNC = PAD_ID(PA, 13), - MX1_PAD_CSI_PIXCLK = PAD_ID(PA, 14), - MX1_PAD_I2C_SDA = PAD_ID(PA, 15), - MX1_PAD_I2C_SCL = PAD_ID(PA, 16), - MX1_PAD_DTACK = PAD_ID(PA, 17), - MX1_PAD_BCLK = PAD_ID(PA, 18), - MX1_PAD_LBA = PAD_ID(PA, 19), - MX1_PAD_ECB = PAD_ID(PA, 20), - MX1_PAD_A0 = PAD_ID(PA, 21), - MX1_PAD_CS4 = PAD_ID(PA, 22), - MX1_PAD_CS5 = PAD_ID(PA, 23), - MX1_PAD_A16 = PAD_ID(PA, 24), - MX1_PAD_A17 = PAD_ID(PA, 25), - MX1_PAD_A18 = PAD_ID(PA, 26), - MX1_PAD_A19 = PAD_ID(PA, 27), - MX1_PAD_A20 = PAD_ID(PA, 28), - MX1_PAD_A21 = PAD_ID(PA, 29), - MX1_PAD_A22 = PAD_ID(PA, 30), - MX1_PAD_A23 = PAD_ID(PA, 31), - MX1_PAD_SD_DAT0 = PAD_ID(PB, 8), - MX1_PAD_SD_DAT1 = PAD_ID(PB, 9), - MX1_PAD_SD_DAT2 = PAD_ID(PB, 10), - MX1_PAD_SD_DAT3 = PAD_ID(PB, 11), - MX1_PAD_SD_SCLK = PAD_ID(PB, 12), - MX1_PAD_SD_CMD = PAD_ID(PB, 13), - MX1_PAD_SIM_SVEN = PAD_ID(PB, 14), - MX1_PAD_SIM_PD = PAD_ID(PB, 15), - MX1_PAD_SIM_TX = PAD_ID(PB, 16), - MX1_PAD_SIM_RX = PAD_ID(PB, 17), - MX1_PAD_SIM_RST = PAD_ID(PB, 18), - MX1_PAD_SIM_CLK = PAD_ID(PB, 19), - MX1_PAD_USBD_AFE = PAD_ID(PB, 20), - MX1_PAD_USBD_OE = PAD_ID(PB, 21), - MX1_PAD_USBD_RCV = PAD_ID(PB, 22), - MX1_PAD_USBD_SUSPND = PAD_ID(PB, 23), - MX1_PAD_USBD_VP = PAD_ID(PB, 24), - MX1_PAD_USBD_VM = PAD_ID(PB, 25), - MX1_PAD_USBD_VPO = PAD_ID(PB, 26), - MX1_PAD_USBD_VMO = PAD_ID(PB, 27), - MX1_PAD_UART2_CTS = PAD_ID(PB, 28), - MX1_PAD_UART2_RTS = PAD_ID(PB, 29), - MX1_PAD_UART2_TXD = PAD_ID(PB, 30), - MX1_PAD_UART2_RXD = PAD_ID(PB, 31), - MX1_PAD_SSI_RXFS = PAD_ID(PC, 3), - MX1_PAD_SSI_RXCLK = PAD_ID(PC, 4), - MX1_PAD_SSI_RXDAT = PAD_ID(PC, 5), - MX1_PAD_SSI_TXDAT = PAD_ID(PC, 6), - MX1_PAD_SSI_TXFS = PAD_ID(PC, 7), - MX1_PAD_SSI_TXCLK = PAD_ID(PC, 8), - MX1_PAD_UART1_CTS = PAD_ID(PC, 9), - MX1_PAD_UART1_RTS = PAD_ID(PC, 10), - MX1_PAD_UART1_TXD = PAD_ID(PC, 11), - MX1_PAD_UART1_RXD = PAD_ID(PC, 12), - MX1_PAD_SPI1_RDY = PAD_ID(PC, 13), - MX1_PAD_SPI1_SCLK = PAD_ID(PC, 14), - MX1_PAD_SPI1_SS = PAD_ID(PC, 15), - MX1_PAD_SPI1_MISO = PAD_ID(PC, 16), - MX1_PAD_SPI1_MOSI = PAD_ID(PC, 17), - MX1_PAD_BT13 = PAD_ID(PC, 19), - MX1_PAD_BT12 = PAD_ID(PC, 20), - MX1_PAD_BT11 = PAD_ID(PC, 21), - MX1_PAD_BT10 = PAD_ID(PC, 22), - MX1_PAD_BT9 = PAD_ID(PC, 23), - MX1_PAD_BT8 = PAD_ID(PC, 24), - MX1_PAD_BT7 = PAD_ID(PC, 25), - MX1_PAD_BT6 = PAD_ID(PC, 26), - MX1_PAD_BT5 = PAD_ID(PC, 27), - MX1_PAD_BT4 = PAD_ID(PC, 28), - MX1_PAD_BT3 = PAD_ID(PC, 29), - MX1_PAD_BT2 = PAD_ID(PC, 30), - MX1_PAD_BT1 = PAD_ID(PC, 31), - MX1_PAD_LSCLK = PAD_ID(PD, 6), - MX1_PAD_REV = PAD_ID(PD, 7), - MX1_PAD_CLS = PAD_ID(PD, 8), - MX1_PAD_PS = PAD_ID(PD, 9), - MX1_PAD_SPL_SPR = PAD_ID(PD, 10), - MX1_PAD_CONTRAST = PAD_ID(PD, 11), - MX1_PAD_ACD_OE = PAD_ID(PD, 12), - MX1_PAD_LP_HSYNC = PAD_ID(PD, 13), - MX1_PAD_FLM_VSYNC = PAD_ID(PD, 14), - MX1_PAD_LD0 = PAD_ID(PD, 15), - MX1_PAD_LD1 = PAD_ID(PD, 16), - MX1_PAD_LD2 = PAD_ID(PD, 17), - MX1_PAD_LD3 = PAD_ID(PD, 18), - MX1_PAD_LD4 = PAD_ID(PD, 19), - MX1_PAD_LD5 = PAD_ID(PD, 20), - MX1_PAD_LD6 = PAD_ID(PD, 21), - MX1_PAD_LD7 = PAD_ID(PD, 22), - MX1_PAD_LD8 = PAD_ID(PD, 23), - MX1_PAD_LD9 = PAD_ID(PD, 24), - MX1_PAD_LD10 = PAD_ID(PD, 25), - MX1_PAD_LD11 = PAD_ID(PD, 26), - MX1_PAD_LD12 = PAD_ID(PD, 27), - MX1_PAD_LD13 = PAD_ID(PD, 28), - MX1_PAD_LD14 = PAD_ID(PD, 29), - MX1_PAD_LD15 = PAD_ID(PD, 30), - MX1_PAD_TMR2OUT = PAD_ID(PD, 31), + MX1_PAD_A24 = PAD_ID(IMX1_PA, 0), + MX1_PAD_TIN = PAD_ID(IMX1_PA, 1), + MX1_PAD_PWMO = PAD_ID(IMX1_PA, 2), + MX1_PAD_CSI_MCLK = PAD_ID(IMX1_PA, 3), + MX1_PAD_CSI_D0 = PAD_ID(IMX1_PA, 4), + MX1_PAD_CSI_D1 = PAD_ID(IMX1_PA, 5), + MX1_PAD_CSI_D2 = PAD_ID(IMX1_PA, 6), + MX1_PAD_CSI_D3 = PAD_ID(IMX1_PA, 7), + MX1_PAD_CSI_D4 = PAD_ID(IMX1_PA, 8), + MX1_PAD_CSI_D5 = PAD_ID(IMX1_PA, 9), + MX1_PAD_CSI_D6 = PAD_ID(IMX1_PA, 10), + MX1_PAD_CSI_D7 = PAD_ID(IMX1_PA, 11), + MX1_PAD_CSI_VSYNC = PAD_ID(IMX1_PA, 12), + MX1_PAD_CSI_HSYNC = PAD_ID(IMX1_PA, 13), + MX1_PAD_CSI_PIXCLK = PAD_ID(IMX1_PA, 14), + MX1_PAD_I2C_SDA = PAD_ID(IMX1_PA, 15), + MX1_PAD_I2C_SCL = PAD_ID(IMX1_PA, 16), + MX1_PAD_DTACK = PAD_ID(IMX1_PA, 17), + MX1_PAD_BCLK = PAD_ID(IMX1_PA, 18), + MX1_PAD_LBA = PAD_ID(IMX1_PA, 19), + MX1_PAD_ECB = PAD_ID(IMX1_PA, 20), + MX1_PAD_A0 = PAD_ID(IMX1_PA, 21), + MX1_PAD_CS4 = PAD_ID(IMX1_PA, 22), + MX1_PAD_CS5 = PAD_ID(IMX1_PA, 23), + MX1_PAD_A16 = PAD_ID(IMX1_PA, 24), + MX1_PAD_A17 = PAD_ID(IMX1_PA, 25), + MX1_PAD_A18 = PAD_ID(IMX1_PA, 26), + MX1_PAD_A19 = PAD_ID(IMX1_PA, 27), + MX1_PAD_A20 = PAD_ID(IMX1_PA, 28), + MX1_PAD_A21 = PAD_ID(IMX1_PA, 29), + MX1_PAD_A22 = PAD_ID(IMX1_PA, 30), + MX1_PAD_A23 = PAD_ID(IMX1_PA, 31), + MX1_PAD_SD_DAT0 = PAD_ID(IMX1_PB, 8), + MX1_PAD_SD_DAT1 = PAD_ID(IMX1_PB, 9), + MX1_PAD_SD_DAT2 = PAD_ID(IMX1_PB, 10), + MX1_PAD_SD_DAT3 = PAD_ID(IMX1_PB, 11), + MX1_PAD_SD_SCLK = PAD_ID(IMX1_PB, 12), + MX1_PAD_SD_CMD = PAD_ID(IMX1_PB, 13), + MX1_PAD_SIM_SVEN = PAD_ID(IMX1_PB, 14), + MX1_PAD_SIM_PD = PAD_ID(IMX1_PB, 15), + MX1_PAD_SIM_TX = PAD_ID(IMX1_PB, 16), + MX1_PAD_SIM_RX = PAD_ID(IMX1_PB, 17), + MX1_PAD_SIM_RST = PAD_ID(IMX1_PB, 18), + MX1_PAD_SIM_CLK = PAD_ID(IMX1_PB, 19), + MX1_PAD_USBD_AFE = PAD_ID(IMX1_PB, 20), + MX1_PAD_USBD_OE = PAD_ID(IMX1_PB, 21), + MX1_PAD_USBD_RCV = PAD_ID(IMX1_PB, 22), + MX1_PAD_USBD_SUSPND = PAD_ID(IMX1_PB, 23), + MX1_PAD_USBD_VP = PAD_ID(IMX1_PB, 24), + MX1_PAD_USBD_VM = PAD_ID(IMX1_PB, 25), + MX1_PAD_USBD_VPO = PAD_ID(IMX1_PB, 26), + MX1_PAD_USBD_VMO = PAD_ID(IMX1_PB, 27), + MX1_PAD_UART2_CTS = PAD_ID(IMX1_PB, 28), + MX1_PAD_UART2_RTS = PAD_ID(IMX1_PB, 29), + MX1_PAD_UART2_TXD = PAD_ID(IMX1_PB, 30), + MX1_PAD_UART2_RXD = PAD_ID(IMX1_PB, 31), + MX1_PAD_SSI_RXFS = PAD_ID(IMX1_PC, 3), + MX1_PAD_SSI_RXCLK = PAD_ID(IMX1_PC, 4), + MX1_PAD_SSI_RXDAT = PAD_ID(IMX1_PC, 5), + MX1_PAD_SSI_TXDAT = PAD_ID(IMX1_PC, 6), + MX1_PAD_SSI_TXFS = PAD_ID(IMX1_PC, 7), + MX1_PAD_SSI_TXCLK = PAD_ID(IMX1_PC, 8), + MX1_PAD_UART1_CTS = PAD_ID(IMX1_PC, 9), + MX1_PAD_UART1_RTS = PAD_ID(IMX1_PC, 10), + MX1_PAD_UART1_TXD = PAD_ID(IMX1_PC, 11), + MX1_PAD_UART1_RXD = PAD_ID(IMX1_PC, 12), + MX1_PAD_SPI1_RDY = PAD_ID(IMX1_PC, 13), + MX1_PAD_SPI1_SCLK = PAD_ID(IMX1_PC, 14), + MX1_PAD_SPI1_SS = PAD_ID(IMX1_PC, 15), + MX1_PAD_SPI1_MISO = PAD_ID(IMX1_PC, 16), + MX1_PAD_SPI1_MOSI = PAD_ID(IMX1_PC, 17), + MX1_PAD_BT13 = PAD_ID(IMX1_PC, 19), + MX1_PAD_BT12 = PAD_ID(IMX1_PC, 20), + MX1_PAD_BT11 = PAD_ID(IMX1_PC, 21), + MX1_PAD_BT10 = PAD_ID(IMX1_PC, 22), + MX1_PAD_BT9 = PAD_ID(IMX1_PC, 23), + MX1_PAD_BT8 = PAD_ID(IMX1_PC, 24), + MX1_PAD_BT7 = PAD_ID(IMX1_PC, 25), + MX1_PAD_BT6 = PAD_ID(IMX1_PC, 26), + MX1_PAD_BT5 = PAD_ID(IMX1_PC, 27), + MX1_PAD_BT4 = PAD_ID(IMX1_PC, 28), + MX1_PAD_BT3 = PAD_ID(IMX1_PC, 29), + MX1_PAD_BT2 = PAD_ID(IMX1_PC, 30), + MX1_PAD_BT1 = PAD_ID(IMX1_PC, 31), + MX1_PAD_LSCLK = PAD_ID(IMX1_PD, 6), + MX1_PAD_REV = PAD_ID(IMX1_PD, 7), + MX1_PAD_CLS = PAD_ID(IMX1_PD, 8), + MX1_PAD_PS = PAD_ID(IMX1_PD, 9), + MX1_PAD_SPL_SPR = PAD_ID(IMX1_PD, 10), + MX1_PAD_CONTRAST = PAD_ID(IMX1_PD, 11), + MX1_PAD_ACD_OE = PAD_ID(IMX1_PD, 12), + MX1_PAD_LP_HSYNC = PAD_ID(IMX1_PD, 13), + MX1_PAD_FLM_VSYNC = PAD_ID(IMX1_PD, 14), + MX1_PAD_LD0 = PAD_ID(IMX1_PD, 15), + MX1_PAD_LD1 = PAD_ID(IMX1_PD, 16), + MX1_PAD_LD2 = PAD_ID(IMX1_PD, 17), + MX1_PAD_LD3 = PAD_ID(IMX1_PD, 18), + MX1_PAD_LD4 = PAD_ID(IMX1_PD, 19), + MX1_PAD_LD5 = PAD_ID(IMX1_PD, 20), + MX1_PAD_LD6 = PAD_ID(IMX1_PD, 21), + MX1_PAD_LD7 = PAD_ID(IMX1_PD, 22), + MX1_PAD_LD8 = PAD_ID(IMX1_PD, 23), + MX1_PAD_LD9 = PAD_ID(IMX1_PD, 24), + MX1_PAD_LD10 = PAD_ID(IMX1_PD, 25), + MX1_PAD_LD11 = PAD_ID(IMX1_PD, 26), + MX1_PAD_LD12 = PAD_ID(IMX1_PD, 27), + MX1_PAD_LD13 = PAD_ID(IMX1_PD, 28), + MX1_PAD_LD14 = PAD_ID(IMX1_PD, 29), + MX1_PAD_LD15 = PAD_ID(IMX1_PD, 30), + MX1_PAD_TMR2OUT = PAD_ID(IMX1_PD, 31), }; /* Pad names for the pinmux subsystem */ diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c index 1738df461235..afeb39957203 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx27.c +++ b/drivers/pinctrl/freescale/pinctrl-imx27.c @@ -16,188 +16,188 @@ #include "pinctrl-imx1.h" #define PAD_ID(port, pin) (port*32 + pin) -#define PA 0 -#define PB 1 -#define PC 2 -#define PD 3 -#define PE 4 -#define PF 5 +#define MX27_PA 0 +#define MX27_PB 1 +#define MX27_PC 2 +#define MX27_PD 3 +#define MX27_PE 4 +#define MX27_PF 5 enum imx27_pads { - MX27_PAD_USBH2_CLK = PAD_ID(PA, 0), - MX27_PAD_USBH2_DIR = PAD_ID(PA, 1), - MX27_PAD_USBH2_DATA7 = PAD_ID(PA, 2), - MX27_PAD_USBH2_NXT = PAD_ID(PA, 3), - MX27_PAD_USBH2_STP = PAD_ID(PA, 4), - MX27_PAD_LSCLK = PAD_ID(PA, 5), - MX27_PAD_LD0 = PAD_ID(PA, 6), - MX27_PAD_LD1 = PAD_ID(PA, 7), - MX27_PAD_LD2 = PAD_ID(PA, 8), - MX27_PAD_LD3 = PAD_ID(PA, 9), - MX27_PAD_LD4 = PAD_ID(PA, 10), - MX27_PAD_LD5 = PAD_ID(PA, 11), - MX27_PAD_LD6 = PAD_ID(PA, 12), - MX27_PAD_LD7 = PAD_ID(PA, 13), - MX27_PAD_LD8 = PAD_ID(PA, 14), - MX27_PAD_LD9 = PAD_ID(PA, 15), - MX27_PAD_LD10 = PAD_ID(PA, 16), - MX27_PAD_LD11 = PAD_ID(PA, 17), - MX27_PAD_LD12 = PAD_ID(PA, 18), - MX27_PAD_LD13 = PAD_ID(PA, 19), - MX27_PAD_LD14 = PAD_ID(PA, 20), - MX27_PAD_LD15 = PAD_ID(PA, 21), - MX27_PAD_LD16 = PAD_ID(PA, 22), - MX27_PAD_LD17 = PAD_ID(PA, 23), - MX27_PAD_REV = PAD_ID(PA, 24), - MX27_PAD_CLS = PAD_ID(PA, 25), - MX27_PAD_PS = PAD_ID(PA, 26), - MX27_PAD_SPL_SPR = PAD_ID(PA, 27), - MX27_PAD_HSYNC = PAD_ID(PA, 28), - MX27_PAD_VSYNC = PAD_ID(PA, 29), - MX27_PAD_CONTRAST = PAD_ID(PA, 30), - MX27_PAD_OE_ACD = PAD_ID(PA, 31), + MX27_PAD_USBH2_CLK = PAD_ID(MX27_PA, 0), + MX27_PAD_USBH2_DIR = PAD_ID(MX27_PA, 1), + MX27_PAD_USBH2_DATA7 = PAD_ID(MX27_PA, 2), + MX27_PAD_USBH2_NXT = PAD_ID(MX27_PA, 3), + MX27_PAD_USBH2_STP = PAD_ID(MX27_PA, 4), + MX27_PAD_LSCLK = PAD_ID(MX27_PA, 5), + MX27_PAD_LD0 = PAD_ID(MX27_PA, 6), + MX27_PAD_LD1 = PAD_ID(MX27_PA, 7), + MX27_PAD_LD2 = PAD_ID(MX27_PA, 8), + MX27_PAD_LD3 = PAD_ID(MX27_PA, 9), + MX27_PAD_LD4 = PAD_ID(MX27_PA, 10), + MX27_PAD_LD5 = PAD_ID(MX27_PA, 11), + MX27_PAD_LD6 = PAD_ID(MX27_PA, 12), + MX27_PAD_LD7 = PAD_ID(MX27_PA, 13), + MX27_PAD_LD8 = PAD_ID(MX27_PA, 14), + MX27_PAD_LD9 = PAD_ID(MX27_PA, 15), + MX27_PAD_LD10 = PAD_ID(MX27_PA, 16), + MX27_PAD_LD11 = PAD_ID(MX27_PA, 17), + MX27_PAD_LD12 = PAD_ID(MX27_PA, 18), + MX27_PAD_LD13 = PAD_ID(MX27_PA, 19), + MX27_PAD_LD14 = PAD_ID(MX27_PA, 20), + MX27_PAD_LD15 = PAD_ID(MX27_PA, 21), + MX27_PAD_LD16 = PAD_ID(MX27_PA, 22), + MX27_PAD_LD17 = PAD_ID(MX27_PA, 23), + MX27_PAD_REV = PAD_ID(MX27_PA, 24), + MX27_PAD_CLS = PAD_ID(MX27_PA, 25), + MX27_PAD_PS = PAD_ID(MX27_PA, 26), + MX27_PAD_SPL_SPR = PAD_ID(MX27_PA, 27), + MX27_PAD_HSYNC = PAD_ID(MX27_PA, 28), + MX27_PAD_VSYNC = PAD_ID(MX27_PA, 29), + MX27_PAD_CONTRAST = PAD_ID(MX27_PA, 30), + MX27_PAD_OE_ACD = PAD_ID(MX27_PA, 31), - MX27_PAD_SD2_D0 = PAD_ID(PB, 4), - MX27_PAD_SD2_D1 = PAD_ID(PB, 5), - MX27_PAD_SD2_D2 = PAD_ID(PB, 6), - MX27_PAD_SD2_D3 = PAD_ID(PB, 7), - MX27_PAD_SD2_CMD = PAD_ID(PB, 8), - MX27_PAD_SD2_CLK = PAD_ID(PB, 9), - MX27_PAD_CSI_D0 = PAD_ID(PB, 10), - MX27_PAD_CSI_D1 = PAD_ID(PB, 11), - MX27_PAD_CSI_D2 = PAD_ID(PB, 12), - MX27_PAD_CSI_D3 = PAD_ID(PB, 13), - MX27_PAD_CSI_D4 = PAD_ID(PB, 14), - MX27_PAD_CSI_MCLK = PAD_ID(PB, 15), - MX27_PAD_CSI_PIXCLK = PAD_ID(PB, 16), - MX27_PAD_CSI_D5 = PAD_ID(PB, 17), - MX27_PAD_CSI_D6 = PAD_ID(PB, 18), - MX27_PAD_CSI_D7 = PAD_ID(PB, 19), - MX27_PAD_CSI_VSYNC = PAD_ID(PB, 20), - MX27_PAD_CSI_HSYNC = PAD_ID(PB, 21), - MX27_PAD_USBH1_SUSP = PAD_ID(PB, 22), - MX27_PAD_USB_PWR = PAD_ID(PB, 23), - MX27_PAD_USB_OC_B = PAD_ID(PB, 24), - MX27_PAD_USBH1_RCV = PAD_ID(PB, 25), - MX27_PAD_USBH1_FS = PAD_ID(PB, 26), - MX27_PAD_USBH1_OE_B = PAD_ID(PB, 27), - MX27_PAD_USBH1_TXDM = PAD_ID(PB, 28), - MX27_PAD_USBH1_TXDP = PAD_ID(PB, 29), - MX27_PAD_USBH1_RXDM = PAD_ID(PB, 30), - MX27_PAD_USBH1_RXDP = PAD_ID(PB, 31), + MX27_PAD_SD2_D0 = PAD_ID(MX27_PB, 4), + MX27_PAD_SD2_D1 = PAD_ID(MX27_PB, 5), + MX27_PAD_SD2_D2 = PAD_ID(MX27_PB, 6), + MX27_PAD_SD2_D3 = PAD_ID(MX27_PB, 7), + MX27_PAD_SD2_CMD = PAD_ID(MX27_PB, 8), + MX27_PAD_SD2_CLK = PAD_ID(MX27_PB, 9), + MX27_PAD_CSI_D0 = PAD_ID(MX27_PB, 10), + MX27_PAD_CSI_D1 = PAD_ID(MX27_PB, 11), + MX27_PAD_CSI_D2 = PAD_ID(MX27_PB, 12), + MX27_PAD_CSI_D3 = PAD_ID(MX27_PB, 13), + MX27_PAD_CSI_D4 = PAD_ID(MX27_PB, 14), + MX27_PAD_CSI_MCLK = PAD_ID(MX27_PB, 15), + MX27_PAD_CSI_PIXCLK = PAD_ID(MX27_PB, 16), + MX27_PAD_CSI_D5 = PAD_ID(MX27_PB, 17), + MX27_PAD_CSI_D6 = PAD_ID(MX27_PB, 18), + MX27_PAD_CSI_D7 = PAD_ID(MX27_PB, 19), + MX27_PAD_CSI_VSYNC = PAD_ID(MX27_PB, 20), + MX27_PAD_CSI_HSYNC = PAD_ID(MX27_PB, 21), + MX27_PAD_USBH1_SUSP = PAD_ID(MX27_PB, 22), + MX27_PAD_USB_PWR = PAD_ID(MX27_PB, 23), + MX27_PAD_USB_OC_B = PAD_ID(MX27_PB, 24), + MX27_PAD_USBH1_RCV = PAD_ID(MX27_PB, 25), + MX27_PAD_USBH1_FS = PAD_ID(MX27_PB, 26), + MX27_PAD_USBH1_OE_B = PAD_ID(MX27_PB, 27), + MX27_PAD_USBH1_TXDM = PAD_ID(MX27_PB, 28), + MX27_PAD_USBH1_TXDP = PAD_ID(MX27_PB, 29), + MX27_PAD_USBH1_RXDM = PAD_ID(MX27_PB, 30), + MX27_PAD_USBH1_RXDP = PAD_ID(MX27_PB, 31), - MX27_PAD_I2C2_SDA = PAD_ID(PC, 5), - MX27_PAD_I2C2_SCL = PAD_ID(PC, 6), - MX27_PAD_USBOTG_DATA5 = PAD_ID(PC, 7), - MX27_PAD_USBOTG_DATA6 = PAD_ID(PC, 8), - MX27_PAD_USBOTG_DATA0 = PAD_ID(PC, 9), - MX27_PAD_USBOTG_DATA2 = PAD_ID(PC, 10), - MX27_PAD_USBOTG_DATA1 = PAD_ID(PC, 11), - MX27_PAD_USBOTG_DATA4 = PAD_ID(PC, 12), - MX27_PAD_USBOTG_DATA3 = PAD_ID(PC, 13), - MX27_PAD_TOUT = PAD_ID(PC, 14), - MX27_PAD_TIN = PAD_ID(PC, 15), - MX27_PAD_SSI4_FS = PAD_ID(PC, 16), - MX27_PAD_SSI4_RXDAT = PAD_ID(PC, 17), - MX27_PAD_SSI4_TXDAT = PAD_ID(PC, 18), - MX27_PAD_SSI4_CLK = PAD_ID(PC, 19), - MX27_PAD_SSI1_FS = PAD_ID(PC, 20), - MX27_PAD_SSI1_RXDAT = PAD_ID(PC, 21), - MX27_PAD_SSI1_TXDAT = PAD_ID(PC, 22), - MX27_PAD_SSI1_CLK = PAD_ID(PC, 23), - MX27_PAD_SSI2_FS = PAD_ID(PC, 24), - MX27_PAD_SSI2_RXDAT = PAD_ID(PC, 25), - MX27_PAD_SSI2_TXDAT = PAD_ID(PC, 26), - MX27_PAD_SSI2_CLK = PAD_ID(PC, 27), - MX27_PAD_SSI3_FS = PAD_ID(PC, 28), - MX27_PAD_SSI3_RXDAT = PAD_ID(PC, 29), - MX27_PAD_SSI3_TXDAT = PAD_ID(PC, 30), - MX27_PAD_SSI3_CLK = PAD_ID(PC, 31), + MX27_PAD_I2C2_SDA = PAD_ID(MX27_PC, 5), + MX27_PAD_I2C2_SCL = PAD_ID(MX27_PC, 6), + MX27_PAD_USBOTG_DATA5 = PAD_ID(MX27_PC, 7), + MX27_PAD_USBOTG_DATA6 = PAD_ID(MX27_PC, 8), + MX27_PAD_USBOTG_DATA0 = PAD_ID(MX27_PC, 9), + MX27_PAD_USBOTG_DATA2 = PAD_ID(MX27_PC, 10), + MX27_PAD_USBOTG_DATA1 = PAD_ID(MX27_PC, 11), + MX27_PAD_USBOTG_DATA4 = PAD_ID(MX27_PC, 12), + MX27_PAD_USBOTG_DATA3 = PAD_ID(MX27_PC, 13), + MX27_PAD_TOUT = PAD_ID(MX27_PC, 14), + MX27_PAD_TIN = PAD_ID(MX27_PC, 15), + MX27_PAD_SSI4_FS = PAD_ID(MX27_PC, 16), + MX27_PAD_SSI4_RXDAT = PAD_ID(MX27_PC, 17), + MX27_PAD_SSI4_TXDAT = PAD_ID(MX27_PC, 18), + MX27_PAD_SSI4_CLK = PAD_ID(MX27_PC, 19), + MX27_PAD_SSI1_FS = PAD_ID(MX27_PC, 20), + MX27_PAD_SSI1_RXDAT = PAD_ID(MX27_PC, 21), + MX27_PAD_SSI1_TXDAT = PAD_ID(MX27_PC, 22), + MX27_PAD_SSI1_CLK = PAD_ID(MX27_PC, 23), + MX27_PAD_SSI2_FS = PAD_ID(MX27_PC, 24), + MX27_PAD_SSI2_RXDAT = PAD_ID(MX27_PC, 25), + MX27_PAD_SSI2_TXDAT = PAD_ID(MX27_PC, 26), + MX27_PAD_SSI2_CLK = PAD_ID(MX27_PC, 27), + MX27_PAD_SSI3_FS = PAD_ID(MX27_PC, 28), + MX27_PAD_SSI3_RXDAT = PAD_ID(MX27_PC, 29), + MX27_PAD_SSI3_TXDAT = PAD_ID(MX27_PC, 30), + MX27_PAD_SSI3_CLK = PAD_ID(MX27_PC, 31), - MX27_PAD_SD3_CMD = PAD_ID(PD, 0), - MX27_PAD_SD3_CLK = PAD_ID(PD, 1), - MX27_PAD_ATA_DATA0 = PAD_ID(PD, 2), - MX27_PAD_ATA_DATA1 = PAD_ID(PD, 3), - MX27_PAD_ATA_DATA2 = PAD_ID(PD, 4), - MX27_PAD_ATA_DATA3 = PAD_ID(PD, 5), - MX27_PAD_ATA_DATA4 = PAD_ID(PD, 6), - MX27_PAD_ATA_DATA5 = PAD_ID(PD, 7), - MX27_PAD_ATA_DATA6 = PAD_ID(PD, 8), - MX27_PAD_ATA_DATA7 = PAD_ID(PD, 9), - MX27_PAD_ATA_DATA8 = PAD_ID(PD, 10), - MX27_PAD_ATA_DATA9 = PAD_ID(PD, 11), - MX27_PAD_ATA_DATA10 = PAD_ID(PD, 12), - MX27_PAD_ATA_DATA11 = PAD_ID(PD, 13), - MX27_PAD_ATA_DATA12 = PAD_ID(PD, 14), - MX27_PAD_ATA_DATA13 = PAD_ID(PD, 15), - MX27_PAD_ATA_DATA14 = PAD_ID(PD, 16), - MX27_PAD_I2C_DATA = PAD_ID(PD, 17), - MX27_PAD_I2C_CLK = PAD_ID(PD, 18), - MX27_PAD_CSPI2_SS2 = PAD_ID(PD, 19), - MX27_PAD_CSPI2_SS1 = PAD_ID(PD, 20), - MX27_PAD_CSPI2_SS0 = PAD_ID(PD, 21), - MX27_PAD_CSPI2_SCLK = PAD_ID(PD, 22), - MX27_PAD_CSPI2_MISO = PAD_ID(PD, 23), - MX27_PAD_CSPI2_MOSI = PAD_ID(PD, 24), - MX27_PAD_CSPI1_RDY = PAD_ID(PD, 25), - MX27_PAD_CSPI1_SS2 = PAD_ID(PD, 26), - MX27_PAD_CSPI1_SS1 = PAD_ID(PD, 27), - MX27_PAD_CSPI1_SS0 = PAD_ID(PD, 28), - MX27_PAD_CSPI1_SCLK = PAD_ID(PD, 29), - MX27_PAD_CSPI1_MISO = PAD_ID(PD, 30), - MX27_PAD_CSPI1_MOSI = PAD_ID(PD, 31), + MX27_PAD_SD3_CMD = PAD_ID(MX27_PD, 0), + MX27_PAD_SD3_CLK = PAD_ID(MX27_PD, 1), + MX27_PAD_ATA_DATA0 = PAD_ID(MX27_PD, 2), + MX27_PAD_ATA_DATA1 = PAD_ID(MX27_PD, 3), + MX27_PAD_ATA_DATA2 = PAD_ID(MX27_PD, 4), + MX27_PAD_ATA_DATA3 = PAD_ID(MX27_PD, 5), + MX27_PAD_ATA_DATA4 = PAD_ID(MX27_PD, 6), + MX27_PAD_ATA_DATA5 = PAD_ID(MX27_PD, 7), + MX27_PAD_ATA_DATA6 = PAD_ID(MX27_PD, 8), + MX27_PAD_ATA_DATA7 = PAD_ID(MX27_PD, 9), + MX27_PAD_ATA_DATA8 = PAD_ID(MX27_PD, 10), + MX27_PAD_ATA_DATA9 = PAD_ID(MX27_PD, 11), + MX27_PAD_ATA_DATA10 = PAD_ID(MX27_PD, 12), + MX27_PAD_ATA_DATA11 = PAD_ID(MX27_PD, 13), + MX27_PAD_ATA_DATA12 = PAD_ID(MX27_PD, 14), + MX27_PAD_ATA_DATA13 = PAD_ID(MX27_PD, 15), + MX27_PAD_ATA_DATA14 = PAD_ID(MX27_PD, 16), + MX27_PAD_I2C_DATA = PAD_ID(MX27_PD, 17), + MX27_PAD_I2C_CLK = PAD_ID(MX27_PD, 18), + MX27_PAD_CSPI2_SS2 = PAD_ID(MX27_PD, 19), + MX27_PAD_CSPI2_SS1 = PAD_ID(MX27_PD, 20), + MX27_PAD_CSPI2_SS0 = PAD_ID(MX27_PD, 21), + MX27_PAD_CSPI2_SCLK = PAD_ID(MX27_PD, 22), + MX27_PAD_CSPI2_MISO = PAD_ID(MX27_PD, 23), + MX27_PAD_CSPI2_MOSI = PAD_ID(MX27_PD, 24), + MX27_PAD_CSPI1_RDY = PAD_ID(MX27_PD, 25), + MX27_PAD_CSPI1_SS2 = PAD_ID(MX27_PD, 26), + MX27_PAD_CSPI1_SS1 = PAD_ID(MX27_PD, 27), + MX27_PAD_CSPI1_SS0 = PAD_ID(MX27_PD, 28), + MX27_PAD_CSPI1_SCLK = PAD_ID(MX27_PD, 29), + MX27_PAD_CSPI1_MISO = PAD_ID(MX27_PD, 30), + MX27_PAD_CSPI1_MOSI = PAD_ID(MX27_PD, 31), - MX27_PAD_USBOTG_NXT = PAD_ID(PE, 0), - MX27_PAD_USBOTG_STP = PAD_ID(PE, 1), - MX27_PAD_USBOTG_DIR = PAD_ID(PE, 2), - MX27_PAD_UART2_CTS = PAD_ID(PE, 3), - MX27_PAD_UART2_RTS = PAD_ID(PE, 4), - MX27_PAD_PWMO = PAD_ID(PE, 5), - MX27_PAD_UART2_TXD = PAD_ID(PE, 6), - MX27_PAD_UART2_RXD = PAD_ID(PE, 7), - MX27_PAD_UART3_TXD = PAD_ID(PE, 8), - MX27_PAD_UART3_RXD = PAD_ID(PE, 9), - MX27_PAD_UART3_CTS = PAD_ID(PE, 10), - MX27_PAD_UART3_RTS = PAD_ID(PE, 11), - MX27_PAD_UART1_TXD = PAD_ID(PE, 12), - MX27_PAD_UART1_RXD = PAD_ID(PE, 13), - MX27_PAD_UART1_CTS = PAD_ID(PE, 14), - MX27_PAD_UART1_RTS = PAD_ID(PE, 15), - MX27_PAD_RTCK = PAD_ID(PE, 16), - MX27_PAD_RESET_OUT_B = PAD_ID(PE, 17), - MX27_PAD_SD1_D0 = PAD_ID(PE, 18), - MX27_PAD_SD1_D1 = PAD_ID(PE, 19), - MX27_PAD_SD1_D2 = PAD_ID(PE, 20), - MX27_PAD_SD1_D3 = PAD_ID(PE, 21), - MX27_PAD_SD1_CMD = PAD_ID(PE, 22), - MX27_PAD_SD1_CLK = PAD_ID(PE, 23), - MX27_PAD_USBOTG_CLK = PAD_ID(PE, 24), - MX27_PAD_USBOTG_DATA7 = PAD_ID(PE, 25), + MX27_PAD_USBOTG_NXT = PAD_ID(MX27_PE, 0), + MX27_PAD_USBOTG_STP = PAD_ID(MX27_PE, 1), + MX27_PAD_USBOTG_DIR = PAD_ID(MX27_PE, 2), + MX27_PAD_UART2_CTS = PAD_ID(MX27_PE, 3), + MX27_PAD_UART2_RTS = PAD_ID(MX27_PE, 4), + MX27_PAD_PWMO = PAD_ID(MX27_PE, 5), + MX27_PAD_UART2_TXD = PAD_ID(MX27_PE, 6), + MX27_PAD_UART2_RXD = PAD_ID(MX27_PE, 7), + MX27_PAD_UART3_TXD = PAD_ID(MX27_PE, 8), + MX27_PAD_UART3_RXD = PAD_ID(MX27_PE, 9), + MX27_PAD_UART3_CTS = PAD_ID(MX27_PE, 10), + MX27_PAD_UART3_RTS = PAD_ID(MX27_PE, 11), + MX27_PAD_UART1_TXD = PAD_ID(MX27_PE, 12), + MX27_PAD_UART1_RXD = PAD_ID(MX27_PE, 13), + MX27_PAD_UART1_CTS = PAD_ID(MX27_PE, 14), + MX27_PAD_UART1_RTS = PAD_ID(MX27_PE, 15), + MX27_PAD_RTCK = PAD_ID(MX27_PE, 16), + MX27_PAD_RESET_OUT_B = PAD_ID(MX27_PE, 17), + MX27_PAD_SD1_D0 = PAD_ID(MX27_PE, 18), + MX27_PAD_SD1_D1 = PAD_ID(MX27_PE, 19), + MX27_PAD_SD1_D2 = PAD_ID(MX27_PE, 20), + MX27_PAD_SD1_D3 = PAD_ID(MX27_PE, 21), + MX27_PAD_SD1_CMD = PAD_ID(MX27_PE, 22), + MX27_PAD_SD1_CLK = PAD_ID(MX27_PE, 23), + MX27_PAD_USBOTG_CLK = PAD_ID(MX27_PE, 24), + MX27_PAD_USBOTG_DATA7 = PAD_ID(MX27_PE, 25), - MX27_PAD_NFRB = PAD_ID(PF, 0), - MX27_PAD_NFCLE = PAD_ID(PF, 1), - MX27_PAD_NFWP_B = PAD_ID(PF, 2), - MX27_PAD_NFCE_B = PAD_ID(PF, 3), - MX27_PAD_NFALE = PAD_ID(PF, 4), - MX27_PAD_NFRE_B = PAD_ID(PF, 5), - MX27_PAD_NFWE_B = PAD_ID(PF, 6), - MX27_PAD_PC_POE = PAD_ID(PF, 7), - MX27_PAD_PC_RW_B = PAD_ID(PF, 8), - MX27_PAD_IOIS16 = PAD_ID(PF, 9), - MX27_PAD_PC_RST = PAD_ID(PF, 10), - MX27_PAD_PC_BVD2 = PAD_ID(PF, 11), - MX27_PAD_PC_BVD1 = PAD_ID(PF, 12), - MX27_PAD_PC_VS2 = PAD_ID(PF, 13), - MX27_PAD_PC_VS1 = PAD_ID(PF, 14), - MX27_PAD_CLKO = PAD_ID(PF, 15), - MX27_PAD_PC_PWRON = PAD_ID(PF, 16), - MX27_PAD_PC_READY = PAD_ID(PF, 17), - MX27_PAD_PC_WAIT_B = PAD_ID(PF, 18), - MX27_PAD_PC_CD2_B = PAD_ID(PF, 19), - MX27_PAD_PC_CD1_B = PAD_ID(PF, 20), - MX27_PAD_CS4_B = PAD_ID(PF, 21), - MX27_PAD_CS5_B = PAD_ID(PF, 22), - MX27_PAD_ATA_DATA15 = PAD_ID(PF, 23), + MX27_PAD_NFRB = PAD_ID(MX27_PF, 0), + MX27_PAD_NFCLE = PAD_ID(MX27_PF, 1), + MX27_PAD_NFWP_B = PAD_ID(MX27_PF, 2), + MX27_PAD_NFCE_B = PAD_ID(MX27_PF, 3), + MX27_PAD_NFALE = PAD_ID(MX27_PF, 4), + MX27_PAD_NFRE_B = PAD_ID(MX27_PF, 5), + MX27_PAD_NFWE_B = PAD_ID(MX27_PF, 6), + MX27_PAD_PC_POE = PAD_ID(MX27_PF, 7), + MX27_PAD_PC_RW_B = PAD_ID(MX27_PF, 8), + MX27_PAD_IOIS16 = PAD_ID(MX27_PF, 9), + MX27_PAD_PC_RST = PAD_ID(MX27_PF, 10), + MX27_PAD_PC_BVD2 = PAD_ID(MX27_PF, 11), + MX27_PAD_PC_BVD1 = PAD_ID(MX27_PF, 12), + MX27_PAD_PC_VS2 = PAD_ID(MX27_PF, 13), + MX27_PAD_PC_VS1 = PAD_ID(MX27_PF, 14), + MX27_PAD_CLKO = PAD_ID(MX27_PF, 15), + MX27_PAD_PC_PWRON = PAD_ID(MX27_PF, 16), + MX27_PAD_PC_READY = PAD_ID(MX27_PF, 17), + MX27_PAD_PC_WAIT_B = PAD_ID(MX27_PF, 18), + MX27_PAD_PC_CD2_B = PAD_ID(MX27_PF, 19), + MX27_PAD_PC_CD1_B = PAD_ID(MX27_PF, 20), + MX27_PAD_CS4_B = PAD_ID(MX27_PF, 21), + MX27_PAD_CS5_B = PAD_ID(MX27_PF, 22), + MX27_PAD_ATA_DATA15 = PAD_ID(MX27_PF, 23), }; /* Pad names for the pinmux subsystem */ diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 067b0d344f0e..9f938718927b 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1792,7 +1792,7 @@ MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match); static struct platform_driver chv_pinctrl_driver = { .probe = chv_pinctrl_probe, - .remove_new = chv_pinctrl_remove, + .remove = chv_pinctrl_remove, .driver = { .name = "cherryview-pinctrl", .pm = pm_sleep_ptr(&chv_pinctrl_pm_ops), diff --git a/drivers/pinctrl/intel/pinctrl-elkhartlake.c b/drivers/pinctrl/intel/pinctrl-elkhartlake.c index 1678634ebc06..3e45d7fb003a 100644 --- a/drivers/pinctrl/intel/pinctrl-elkhartlake.c +++ b/drivers/pinctrl/intel/pinctrl-elkhartlake.c @@ -264,6 +264,43 @@ static const struct intel_pinctrl_soc_data ehl_community1_soc_data = { .ncommunities = ARRAY_SIZE(ehl_community1), }; +static const struct pinctrl_pin_desc ehl_community2_pins[] = { + /* DSW */ + PINCTRL_PIN(0, "BATLOWB"), + PINCTRL_PIN(1, "ACPRESENT"), + PINCTRL_PIN(2, "LAN_WAKEB"), + PINCTRL_PIN(3, "PWRBTNB"), + PINCTRL_PIN(4, "SLP_S3B"), + PINCTRL_PIN(5, "SLP_S4B"), + PINCTRL_PIN(6, "SLP_AB"), + PINCTRL_PIN(7, "GPD_7"), + PINCTRL_PIN(8, "SUSCLK"), + PINCTRL_PIN(9, "SLP_WLANB"), + PINCTRL_PIN(10, "SLP_S5B"), + PINCTRL_PIN(11, "LANPHYPC"), + PINCTRL_PIN(12, "INPUT3VSEL"), + PINCTRL_PIN(13, "SLP_LANB"), + PINCTRL_PIN(14, "SLP_SUSB"), + PINCTRL_PIN(15, "WAKEB"), + PINCTRL_PIN(16, "DRAM_RESETB"), +}; + +static const struct intel_padgroup ehl_community2_gpps[] = { + EHL_GPP(0, 0, 16), /* DSW */ +}; + +static const struct intel_community ehl_community2[] = { + EHL_COMMUNITY(0, 0, 16, ehl_community2_gpps), +}; + +static const struct intel_pinctrl_soc_data ehl_community2_soc_data = { + .uid = "2", + .pins = ehl_community2_pins, + .npins = ARRAY_SIZE(ehl_community2_pins), + .communities = ehl_community2, + .ncommunities = ARRAY_SIZE(ehl_community2), +}; + static const struct pinctrl_pin_desc ehl_community3_pins[] = { /* CPU */ PINCTRL_PIN(0, "HDACPU_SDI"), @@ -474,6 +511,7 @@ static const struct intel_pinctrl_soc_data ehl_community5_soc_data = { static const struct intel_pinctrl_soc_data *ehl_soc_data_array[] = { &ehl_community0_soc_data, &ehl_community1_soc_data, + &ehl_community2_soc_data, &ehl_community3_soc_data, &ehl_community4_soc_data, &ehl_community5_soc_data, diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 928607a21d36..04b438f63ccb 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -85,6 +85,18 @@ #define PADCFG1_TERM_UP BIT(13) #define PADCFG1_TERM_SHIFT 10 #define PADCFG1_TERM_MASK GENMASK(12, 10) +/* + * Bit 0 Bit 1 Bit 2 Value, Ohms + * + * 0 0 0 - + * 0 0 1 20000 + * 0 1 0 5000 + * 0 1 1 ~4000 + * 1 0 0 1000 (if supported) + * 1 0 1 ~952 (if supported) + * 1 1 0 ~833 (if supported) + * 1 1 1 ~800 (if supported) + */ #define PADCFG1_TERM_20K BIT(2) #define PADCFG1_TERM_5K BIT(1) #define PADCFG1_TERM_4K (BIT(2) | BIT(1)) diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 7af287252834..a417a031659c 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menu "MediaTek pinctrl drivers" - depends on ARCH_MEDIATEK || RALINK || COMPILE_TEST + depends on ARCH_MEDIATEK || ARCH_AIROHA || RALINK || COMPILE_TEST config EINT_MTK tristate "MediaTek External Interrupt Support" @@ -126,6 +126,21 @@ config PINCTRL_MT8127 select PINCTRL_MTK # For ARMv8 SoCs +config PINCTRL_AIROHA + tristate "Airoha EN7581 pin control" + depends on OF + depends on ARM64 || COMPILE_TEST + select PINMUX + select GENERIC_PINCONF + select GENERIC_PINCTRL_GROUPS + select GENERIC_PINMUX_FUNCTIONS + select GPIOLIB + select GPIOLIB_IRQCHIP + select REGMAP_MMIO + help + Say yes here to support pin controller and gpio driver + on Airoha EN7581 SoC. + config PINCTRL_MT2712 bool "MediaTek MT2712 pin control" depends on OF diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile index 680f7e8526e0..1405d434218e 100644 --- a/drivers/pinctrl/mediatek/Makefile +++ b/drivers/pinctrl/mediatek/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o # SoC Drivers +obj-$(CONFIG_PINCTRL_AIROHA) += pinctrl-airoha.o obj-$(CONFIG_PINCTRL_MT7620) += pinctrl-mt7620.o obj-$(CONFIG_PINCTRL_MT7621) += pinctrl-mt7621.o obj-$(CONFIG_PINCTRL_MT76X8) += pinctrl-mt76x8.o diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c new file mode 100644 index 000000000000..547a798b71c8 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c @@ -0,0 +1,2971 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + * Author: Benjamin Larsson <benjamin.larsson@genexis.eu> + * Author: Markus Gothe <markus.gothe@genexis.eu> + */ + +#include <dt-bindings/pinctrl/mt65xx.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/gpio/driver.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "../core.h" +#include "../pinconf.h" +#include "../pinmux.h" + +#define PINCTRL_PIN_GROUP(id) \ + PINCTRL_PINGROUP(#id, id##_pins, ARRAY_SIZE(id##_pins)) + +#define PINCTRL_FUNC_DESC(id) \ + { \ + .desc = { \ + .func = { \ + .name = #id, \ + .groups = id##_groups, \ + .ngroups = ARRAY_SIZE(id##_groups), \ + } \ + }, \ + .groups = id##_func_group, \ + .group_size = ARRAY_SIZE(id##_func_group), \ + } + +#define PINCTRL_CONF_DESC(p, offset, mask) \ + { \ + .pin = p, \ + .reg = { offset, mask }, \ + } + +/* MUX */ +#define REG_GPIO_2ND_I2C_MODE 0x0214 +#define GPIO_MDC_IO_MASTER_MODE_MODE BIT(14) +#define GPIO_I2C_MASTER_MODE_MODE BIT(13) +#define GPIO_I2S_MODE_MASK BIT(12) +#define GPIO_I2C_SLAVE_MODE_MODE BIT(11) +#define GPIO_LAN3_LED1_MODE_MASK BIT(10) +#define GPIO_LAN3_LED0_MODE_MASK BIT(9) +#define GPIO_LAN2_LED1_MODE_MASK BIT(8) +#define GPIO_LAN2_LED0_MODE_MASK BIT(7) +#define GPIO_LAN1_LED1_MODE_MASK BIT(6) +#define GPIO_LAN1_LED0_MODE_MASK BIT(5) +#define GPIO_LAN0_LED1_MODE_MASK BIT(4) +#define GPIO_LAN0_LED0_MODE_MASK BIT(3) +#define PON_TOD_1PPS_MODE_MASK BIT(2) +#define GSW_TOD_1PPS_MODE_MASK BIT(1) +#define GPIO_2ND_I2C_MODE_MASK BIT(0) + +#define REG_GPIO_SPI_CS1_MODE 0x0218 +#define GPIO_PCM_SPI_CS4_MODE_MASK BIT(21) +#define GPIO_PCM_SPI_CS3_MODE_MASK BIT(20) +#define GPIO_PCM_SPI_CS2_MODE_P156_MASK BIT(19) +#define GPIO_PCM_SPI_CS2_MODE_P128_MASK BIT(18) +#define GPIO_PCM_SPI_CS1_MODE_MASK BIT(17) +#define GPIO_PCM_SPI_MODE_MASK BIT(16) +#define GPIO_PCM2_MODE_MASK BIT(13) +#define GPIO_PCM1_MODE_MASK BIT(12) +#define GPIO_PCM_INT_MODE_MASK BIT(9) +#define GPIO_PCM_RESET_MODE_MASK BIT(8) +#define GPIO_SPI_QUAD_MODE_MASK BIT(4) +#define GPIO_SPI_CS4_MODE_MASK BIT(3) +#define GPIO_SPI_CS3_MODE_MASK BIT(2) +#define GPIO_SPI_CS2_MODE_MASK BIT(1) +#define GPIO_SPI_CS1_MODE_MASK BIT(0) + +#define REG_GPIO_PON_MODE 0x021c +#define GPIO_PARALLEL_NAND_MODE_MASK BIT(14) +#define GPIO_SGMII_MDIO_MODE_MASK BIT(13) +#define GPIO_PCIE_RESET2_MASK BIT(12) +#define SIPO_RCLK_MODE_MASK BIT(11) +#define GPIO_PCIE_RESET1_MASK BIT(10) +#define GPIO_PCIE_RESET0_MASK BIT(9) +#define GPIO_UART5_MODE_MASK BIT(8) +#define GPIO_UART4_MODE_MASK BIT(7) +#define GPIO_HSUART_CTS_RTS_MODE_MASK BIT(6) +#define GPIO_HSUART_MODE_MASK BIT(5) +#define GPIO_UART2_CTS_RTS_MODE_MASK BIT(4) +#define GPIO_UART2_MODE_MASK BIT(3) +#define GPIO_SIPO_MODE_MASK BIT(2) +#define GPIO_EMMC_MODE_MASK BIT(1) +#define GPIO_PON_MODE_MASK BIT(0) + +#define REG_NPU_UART_EN 0x0224 +#define JTAG_UDI_EN_MASK BIT(4) +#define JTAG_DFD_EN_MASK BIT(3) + +/* LED MAP */ +#define REG_LAN_LED0_MAPPING 0x027c +#define REG_LAN_LED1_MAPPING 0x0280 + +#define LAN4_LED_MAPPING_MASK GENMASK(18, 16) +#define LAN4_PHY4_LED_MAP BIT(18) +#define LAN4_PHY2_LED_MAP BIT(17) +#define LAN4_PHY1_LED_MAP BIT(16) +#define LAN4_PHY0_LED_MAP 0 +#define LAN4_PHY3_LED_MAP GENMASK(17, 16) + +#define LAN3_LED_MAPPING_MASK GENMASK(14, 12) +#define LAN3_PHY4_LED_MAP BIT(14) +#define LAN3_PHY2_LED_MAP BIT(13) +#define LAN3_PHY1_LED_MAP BIT(12) +#define LAN3_PHY0_LED_MAP 0 +#define LAN3_PHY3_LED_MAP GENMASK(13, 12) + +#define LAN2_LED_MAPPING_MASK GENMASK(10, 8) +#define LAN2_PHY4_LED_MAP BIT(12) +#define LAN2_PHY2_LED_MAP BIT(11) +#define LAN2_PHY1_LED_MAP BIT(10) +#define LAN2_PHY0_LED_MAP 0 +#define LAN2_PHY3_LED_MAP GENMASK(11, 10) + +#define LAN1_LED_MAPPING_MASK GENMASK(6, 4) +#define LAN1_PHY4_LED_MAP BIT(6) +#define LAN1_PHY2_LED_MAP BIT(5) +#define LAN1_PHY1_LED_MAP BIT(4) +#define LAN1_PHY0_LED_MAP 0 +#define LAN1_PHY3_LED_MAP GENMASK(5, 4) + +#define LAN0_LED_MAPPING_MASK GENMASK(2, 0) +#define LAN0_PHY4_LED_MAP BIT(3) +#define LAN0_PHY2_LED_MAP BIT(2) +#define LAN0_PHY1_LED_MAP BIT(1) +#define LAN0_PHY0_LED_MAP 0 +#define LAN0_PHY3_LED_MAP GENMASK(2, 1) + +/* CONF */ +#define REG_I2C_SDA_E2 0x001c +#define SPI_MISO_E2_MASK BIT(14) +#define SPI_MOSI_E2_MASK BIT(13) +#define SPI_CLK_E2_MASK BIT(12) +#define SPI_CS0_E2_MASK BIT(11) +#define PCIE2_RESET_E2_MASK BIT(10) +#define PCIE1_RESET_E2_MASK BIT(9) +#define PCIE0_RESET_E2_MASK BIT(8) +#define UART1_RXD_E2_MASK BIT(3) +#define UART1_TXD_E2_MASK BIT(2) +#define I2C_SCL_E2_MASK BIT(1) +#define I2C_SDA_E2_MASK BIT(0) + +#define REG_I2C_SDA_E4 0x0020 +#define SPI_MISO_E4_MASK BIT(14) +#define SPI_MOSI_E4_MASK BIT(13) +#define SPI_CLK_E4_MASK BIT(12) +#define SPI_CS0_E4_MASK BIT(11) +#define PCIE2_RESET_E4_MASK BIT(10) +#define PCIE1_RESET_E4_MASK BIT(9) +#define PCIE0_RESET_E4_MASK BIT(8) +#define UART1_RXD_E4_MASK BIT(3) +#define UART1_TXD_E4_MASK BIT(2) +#define I2C_SCL_E4_MASK BIT(1) +#define I2C_SDA_E4_MASK BIT(0) + +#define REG_GPIO_L_E2 0x0024 +#define REG_GPIO_L_E4 0x0028 +#define REG_GPIO_H_E2 0x002c +#define REG_GPIO_H_E4 0x0030 + +#define REG_I2C_SDA_PU 0x0044 +#define SPI_MISO_PU_MASK BIT(14) +#define SPI_MOSI_PU_MASK BIT(13) +#define SPI_CLK_PU_MASK BIT(12) +#define SPI_CS0_PU_MASK BIT(11) +#define PCIE2_RESET_PU_MASK BIT(10) +#define PCIE1_RESET_PU_MASK BIT(9) +#define PCIE0_RESET_PU_MASK BIT(8) +#define UART1_RXD_PU_MASK BIT(3) +#define UART1_TXD_PU_MASK BIT(2) +#define I2C_SCL_PU_MASK BIT(1) +#define I2C_SDA_PU_MASK BIT(0) + +#define REG_I2C_SDA_PD 0x0048 +#define SPI_MISO_PD_MASK BIT(14) +#define SPI_MOSI_PD_MASK BIT(13) +#define SPI_CLK_PD_MASK BIT(12) +#define SPI_CS0_PD_MASK BIT(11) +#define PCIE2_RESET_PD_MASK BIT(10) +#define PCIE1_RESET_PD_MASK BIT(9) +#define PCIE0_RESET_PD_MASK BIT(8) +#define UART1_RXD_PD_MASK BIT(3) +#define UART1_TXD_PD_MASK BIT(2) +#define I2C_SCL_PD_MASK BIT(1) +#define I2C_SDA_PD_MASK BIT(0) + +#define REG_GPIO_L_PU 0x004c +#define REG_GPIO_L_PD 0x0050 +#define REG_GPIO_H_PU 0x0054 +#define REG_GPIO_H_PD 0x0058 + +#define REG_PCIE_RESET_OD 0x018c +#define PCIE2_RESET_OD_MASK BIT(2) +#define PCIE1_RESET_OD_MASK BIT(1) +#define PCIE0_RESET_OD_MASK BIT(0) + +/* GPIOs */ +#define REG_GPIO_CTRL 0x0000 +#define REG_GPIO_DATA 0x0004 +#define REG_GPIO_INT 0x0008 +#define REG_GPIO_INT_EDGE 0x000c +#define REG_GPIO_INT_LEVEL 0x0010 +#define REG_GPIO_OE 0x0014 +#define REG_GPIO_CTRL1 0x0020 + +/* PWM MODE CONF */ +#define REG_GPIO_FLASH_MODE_CFG 0x0034 +#define GPIO15_FLASH_MODE_CFG BIT(15) +#define GPIO14_FLASH_MODE_CFG BIT(14) +#define GPIO13_FLASH_MODE_CFG BIT(13) +#define GPIO12_FLASH_MODE_CFG BIT(12) +#define GPIO11_FLASH_MODE_CFG BIT(11) +#define GPIO10_FLASH_MODE_CFG BIT(10) +#define GPIO9_FLASH_MODE_CFG BIT(9) +#define GPIO8_FLASH_MODE_CFG BIT(8) +#define GPIO7_FLASH_MODE_CFG BIT(7) +#define GPIO6_FLASH_MODE_CFG BIT(6) +#define GPIO5_FLASH_MODE_CFG BIT(5) +#define GPIO4_FLASH_MODE_CFG BIT(4) +#define GPIO3_FLASH_MODE_CFG BIT(3) +#define GPIO2_FLASH_MODE_CFG BIT(2) +#define GPIO1_FLASH_MODE_CFG BIT(1) +#define GPIO0_FLASH_MODE_CFG BIT(0) + +#define REG_GPIO_CTRL2 0x0060 +#define REG_GPIO_CTRL3 0x0064 + +/* PWM MODE CONF EXT */ +#define REG_GPIO_FLASH_MODE_CFG_EXT 0x0068 +#define GPIO51_FLASH_MODE_CFG BIT(31) +#define GPIO50_FLASH_MODE_CFG BIT(30) +#define GPIO49_FLASH_MODE_CFG BIT(29) +#define GPIO48_FLASH_MODE_CFG BIT(28) +#define GPIO47_FLASH_MODE_CFG BIT(27) +#define GPIO46_FLASH_MODE_CFG BIT(26) +#define GPIO45_FLASH_MODE_CFG BIT(25) +#define GPIO44_FLASH_MODE_CFG BIT(24) +#define GPIO43_FLASH_MODE_CFG BIT(23) +#define GPIO42_FLASH_MODE_CFG BIT(22) +#define GPIO41_FLASH_MODE_CFG BIT(21) +#define GPIO40_FLASH_MODE_CFG BIT(20) +#define GPIO39_FLASH_MODE_CFG BIT(19) +#define GPIO38_FLASH_MODE_CFG BIT(18) +#define GPIO37_FLASH_MODE_CFG BIT(17) +#define GPIO36_FLASH_MODE_CFG BIT(16) +#define GPIO31_FLASH_MODE_CFG BIT(15) +#define GPIO30_FLASH_MODE_CFG BIT(14) +#define GPIO29_FLASH_MODE_CFG BIT(13) +#define GPIO28_FLASH_MODE_CFG BIT(12) +#define GPIO27_FLASH_MODE_CFG BIT(11) +#define GPIO26_FLASH_MODE_CFG BIT(10) +#define GPIO25_FLASH_MODE_CFG BIT(9) +#define GPIO24_FLASH_MODE_CFG BIT(8) +#define GPIO23_FLASH_MODE_CFG BIT(7) +#define GPIO22_FLASH_MODE_CFG BIT(6) +#define GPIO21_FLASH_MODE_CFG BIT(5) +#define GPIO20_FLASH_MODE_CFG BIT(4) +#define GPIO19_FLASH_MODE_CFG BIT(3) +#define GPIO18_FLASH_MODE_CFG BIT(2) +#define GPIO17_FLASH_MODE_CFG BIT(1) +#define GPIO16_FLASH_MODE_CFG BIT(0) + +#define REG_GPIO_DATA1 0x0070 +#define REG_GPIO_OE1 0x0078 +#define REG_GPIO_INT1 0x007c +#define REG_GPIO_INT_EDGE1 0x0080 +#define REG_GPIO_INT_EDGE2 0x0084 +#define REG_GPIO_INT_EDGE3 0x0088 +#define REG_GPIO_INT_LEVEL1 0x008c +#define REG_GPIO_INT_LEVEL2 0x0090 +#define REG_GPIO_INT_LEVEL3 0x0094 + +#define AIROHA_NUM_PINS 64 +#define AIROHA_PIN_BANK_SIZE (AIROHA_NUM_PINS / 2) +#define AIROHA_REG_GPIOCTRL_NUM_PIN (AIROHA_NUM_PINS / 4) + +static const u32 gpio_data_regs[] = { + REG_GPIO_DATA, + REG_GPIO_DATA1 +}; + +static const u32 gpio_out_regs[] = { + REG_GPIO_OE, + REG_GPIO_OE1 +}; + +static const u32 gpio_dir_regs[] = { + REG_GPIO_CTRL, + REG_GPIO_CTRL1, + REG_GPIO_CTRL2, + REG_GPIO_CTRL3 +}; + +static const u32 irq_status_regs[] = { + REG_GPIO_INT, + REG_GPIO_INT1 +}; + +static const u32 irq_level_regs[] = { + REG_GPIO_INT_LEVEL, + REG_GPIO_INT_LEVEL1, + REG_GPIO_INT_LEVEL2, + REG_GPIO_INT_LEVEL3 +}; + +static const u32 irq_edge_regs[] = { + REG_GPIO_INT_EDGE, + REG_GPIO_INT_EDGE1, + REG_GPIO_INT_EDGE2, + REG_GPIO_INT_EDGE3 +}; + +struct airoha_pinctrl_reg { + u32 offset; + u32 mask; +}; + +enum airoha_pinctrl_mux_func { + AIROHA_FUNC_MUX, + AIROHA_FUNC_PWM_MUX, + AIROHA_FUNC_PWM_EXT_MUX, +}; + +struct airoha_pinctrl_func_group { + const char *name; + struct { + enum airoha_pinctrl_mux_func mux; + u32 offset; + u32 mask; + u32 val; + } regmap[2]; + int regmap_size; +}; + +struct airoha_pinctrl_func { + const struct function_desc desc; + const struct airoha_pinctrl_func_group *groups; + u8 group_size; +}; + +struct airoha_pinctrl_conf { + u32 pin; + struct airoha_pinctrl_reg reg; +}; + +struct airoha_pinctrl_gpiochip { + struct gpio_chip chip; + + /* gpio */ + const u32 *data; + const u32 *dir; + const u32 *out; + /* irq */ + const u32 *status; + const u32 *level; + const u32 *edge; + + u32 irq_type[AIROHA_NUM_PINS]; +}; + +struct airoha_pinctrl { + struct pinctrl_dev *ctrl; + + struct regmap *chip_scu; + struct regmap *regmap; + + struct airoha_pinctrl_gpiochip gpiochip; +}; + +static struct pinctrl_pin_desc airoha_pinctrl_pins[] = { + PINCTRL_PIN(0, "uart1_txd"), + PINCTRL_PIN(1, "uart1_rxd"), + PINCTRL_PIN(2, "i2c_scl"), + PINCTRL_PIN(3, "i2c_sda"), + PINCTRL_PIN(4, "spi_cs0"), + PINCTRL_PIN(5, "spi_clk"), + PINCTRL_PIN(6, "spi_mosi"), + PINCTRL_PIN(7, "spi_miso"), + PINCTRL_PIN(13, "gpio0"), + PINCTRL_PIN(14, "gpio1"), + PINCTRL_PIN(15, "gpio2"), + PINCTRL_PIN(16, "gpio3"), + PINCTRL_PIN(17, "gpio4"), + PINCTRL_PIN(18, "gpio5"), + PINCTRL_PIN(19, "gpio6"), + PINCTRL_PIN(20, "gpio7"), + PINCTRL_PIN(21, "gpio8"), + PINCTRL_PIN(22, "gpio9"), + PINCTRL_PIN(23, "gpio10"), + PINCTRL_PIN(24, "gpio11"), + PINCTRL_PIN(25, "gpio12"), + PINCTRL_PIN(26, "gpio13"), + PINCTRL_PIN(27, "gpio14"), + PINCTRL_PIN(28, "gpio15"), + PINCTRL_PIN(29, "gpio16"), + PINCTRL_PIN(30, "gpio17"), + PINCTRL_PIN(31, "gpio18"), + PINCTRL_PIN(32, "gpio19"), + PINCTRL_PIN(33, "gpio20"), + PINCTRL_PIN(34, "gpio21"), + PINCTRL_PIN(35, "gpio22"), + PINCTRL_PIN(36, "gpio23"), + PINCTRL_PIN(37, "gpio24"), + PINCTRL_PIN(38, "gpio25"), + PINCTRL_PIN(39, "gpio26"), + PINCTRL_PIN(40, "gpio27"), + PINCTRL_PIN(41, "gpio28"), + PINCTRL_PIN(42, "gpio29"), + PINCTRL_PIN(43, "gpio30"), + PINCTRL_PIN(44, "gpio31"), + PINCTRL_PIN(45, "gpio32"), + PINCTRL_PIN(46, "gpio33"), + PINCTRL_PIN(47, "gpio34"), + PINCTRL_PIN(48, "gpio35"), + PINCTRL_PIN(49, "gpio36"), + PINCTRL_PIN(50, "gpio37"), + PINCTRL_PIN(51, "gpio38"), + PINCTRL_PIN(52, "gpio39"), + PINCTRL_PIN(53, "gpio40"), + PINCTRL_PIN(54, "gpio41"), + PINCTRL_PIN(55, "gpio42"), + PINCTRL_PIN(56, "gpio43"), + PINCTRL_PIN(57, "gpio44"), + PINCTRL_PIN(58, "gpio45"), + PINCTRL_PIN(59, "gpio46"), + PINCTRL_PIN(61, "pcie_reset0"), + PINCTRL_PIN(62, "pcie_reset1"), + PINCTRL_PIN(63, "pcie_reset2"), +}; + +static const int pon_pins[] = { 49, 50, 51, 52, 53, 54 }; +static const int pon_tod_1pps_pins[] = { 46 }; +static const int gsw_tod_1pps_pins[] = { 46 }; +static const int sipo_pins[] = { 16, 17 }; +static const int sipo_rclk_pins[] = { 16, 17, 43 }; +static const int mdio_pins[] = { 14, 15 }; +static const int uart2_pins[] = { 48, 55 }; +static const int uart2_cts_rts_pins[] = { 46, 47 }; +static const int hsuart_pins[] = { 28, 29 }; +static const int hsuart_cts_rts_pins[] = { 26, 27 }; +static const int uart4_pins[] = { 38, 39 }; +static const int uart5_pins[] = { 18, 19 }; +static const int i2c0_pins[] = { 2, 3 }; +static const int i2c1_pins[] = { 14, 15 }; +static const int jtag_udi_pins[] = { 16, 17, 18, 19, 20 }; +static const int jtag_dfd_pins[] = { 16, 17, 18, 19, 20 }; +static const int i2s_pins[] = { 26, 27, 28, 29 }; +static const int pcm1_pins[] = { 22, 23, 24, 25 }; +static const int pcm2_pins[] = { 18, 19, 20, 21 }; +static const int spi_quad_pins[] = { 32, 33 }; +static const int spi_pins[] = { 4, 5, 6, 7 }; +static const int spi_cs1_pins[] = { 34 }; +static const int pcm_spi_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25 }; +static const int pcm_spi_int_pins[] = { 14 }; +static const int pcm_spi_rst_pins[] = { 15 }; +static const int pcm_spi_cs1_pins[] = { 43 }; +static const int pcm_spi_cs2_pins[] = { 40 }; +static const int pcm_spi_cs2_p128_pins[] = { 40 }; +static const int pcm_spi_cs2_p156_pins[] = { 40 }; +static const int pcm_spi_cs3_pins[] = { 41 }; +static const int pcm_spi_cs4_pins[] = { 42 }; +static const int emmc_pins[] = { 4, 5, 6, 30, 31, 32, 33, 34, 35, 36, 37 }; +static const int pnand_pins[] = { 4, 5, 6, 7, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42 }; +static const int gpio0_pins[] = { 13 }; +static const int gpio1_pins[] = { 14 }; +static const int gpio2_pins[] = { 15 }; +static const int gpio3_pins[] = { 16 }; +static const int gpio4_pins[] = { 17 }; +static const int gpio5_pins[] = { 18 }; +static const int gpio6_pins[] = { 19 }; +static const int gpio7_pins[] = { 20 }; +static const int gpio8_pins[] = { 21 }; +static const int gpio9_pins[] = { 22 }; +static const int gpio10_pins[] = { 23 }; +static const int gpio11_pins[] = { 24 }; +static const int gpio12_pins[] = { 25 }; +static const int gpio13_pins[] = { 26 }; +static const int gpio14_pins[] = { 27 }; +static const int gpio15_pins[] = { 28 }; +static const int gpio16_pins[] = { 29 }; +static const int gpio17_pins[] = { 30 }; +static const int gpio18_pins[] = { 31 }; +static const int gpio19_pins[] = { 32 }; +static const int gpio20_pins[] = { 33 }; +static const int gpio21_pins[] = { 34 }; +static const int gpio22_pins[] = { 35 }; +static const int gpio23_pins[] = { 36 }; +static const int gpio24_pins[] = { 37 }; +static const int gpio25_pins[] = { 38 }; +static const int gpio26_pins[] = { 39 }; +static const int gpio27_pins[] = { 40 }; +static const int gpio28_pins[] = { 41 }; +static const int gpio29_pins[] = { 42 }; +static const int gpio30_pins[] = { 43 }; +static const int gpio31_pins[] = { 44 }; +static const int gpio33_pins[] = { 46 }; +static const int gpio34_pins[] = { 47 }; +static const int gpio35_pins[] = { 48 }; +static const int gpio36_pins[] = { 49 }; +static const int gpio37_pins[] = { 50 }; +static const int gpio38_pins[] = { 51 }; +static const int gpio39_pins[] = { 52 }; +static const int gpio40_pins[] = { 53 }; +static const int gpio41_pins[] = { 54 }; +static const int gpio42_pins[] = { 55 }; +static const int gpio43_pins[] = { 56 }; +static const int gpio44_pins[] = { 57 }; +static const int gpio45_pins[] = { 58 }; +static const int gpio46_pins[] = { 59 }; +static const int pcie_reset0_pins[] = { 61 }; +static const int pcie_reset1_pins[] = { 62 }; +static const int pcie_reset2_pins[] = { 63 }; + +static const struct pingroup airoha_pinctrl_groups[] = { + PINCTRL_PIN_GROUP(pon), + PINCTRL_PIN_GROUP(pon_tod_1pps), + PINCTRL_PIN_GROUP(gsw_tod_1pps), + PINCTRL_PIN_GROUP(sipo), + PINCTRL_PIN_GROUP(sipo_rclk), + PINCTRL_PIN_GROUP(mdio), + PINCTRL_PIN_GROUP(uart2), + PINCTRL_PIN_GROUP(uart2_cts_rts), + PINCTRL_PIN_GROUP(hsuart), + PINCTRL_PIN_GROUP(hsuart_cts_rts), + PINCTRL_PIN_GROUP(uart4), + PINCTRL_PIN_GROUP(uart5), + PINCTRL_PIN_GROUP(i2c0), + PINCTRL_PIN_GROUP(i2c1), + PINCTRL_PIN_GROUP(jtag_udi), + PINCTRL_PIN_GROUP(jtag_dfd), + PINCTRL_PIN_GROUP(i2s), + PINCTRL_PIN_GROUP(pcm1), + PINCTRL_PIN_GROUP(pcm2), + PINCTRL_PIN_GROUP(spi), + PINCTRL_PIN_GROUP(spi_quad), + PINCTRL_PIN_GROUP(spi_cs1), + PINCTRL_PIN_GROUP(pcm_spi), + PINCTRL_PIN_GROUP(pcm_spi_int), + PINCTRL_PIN_GROUP(pcm_spi_rst), + PINCTRL_PIN_GROUP(pcm_spi_cs1), + PINCTRL_PIN_GROUP(pcm_spi_cs2_p128), + PINCTRL_PIN_GROUP(pcm_spi_cs2_p156), + PINCTRL_PIN_GROUP(pcm_spi_cs2), + PINCTRL_PIN_GROUP(pcm_spi_cs3), + PINCTRL_PIN_GROUP(pcm_spi_cs4), + PINCTRL_PIN_GROUP(emmc), + PINCTRL_PIN_GROUP(pnand), + PINCTRL_PIN_GROUP(gpio0), + PINCTRL_PIN_GROUP(gpio1), + PINCTRL_PIN_GROUP(gpio2), + PINCTRL_PIN_GROUP(gpio3), + PINCTRL_PIN_GROUP(gpio4), + PINCTRL_PIN_GROUP(gpio5), + PINCTRL_PIN_GROUP(gpio6), + PINCTRL_PIN_GROUP(gpio7), + PINCTRL_PIN_GROUP(gpio8), + PINCTRL_PIN_GROUP(gpio9), + PINCTRL_PIN_GROUP(gpio10), + PINCTRL_PIN_GROUP(gpio11), + PINCTRL_PIN_GROUP(gpio12), + PINCTRL_PIN_GROUP(gpio13), + PINCTRL_PIN_GROUP(gpio14), + PINCTRL_PIN_GROUP(gpio15), + PINCTRL_PIN_GROUP(gpio16), + PINCTRL_PIN_GROUP(gpio17), + PINCTRL_PIN_GROUP(gpio18), + PINCTRL_PIN_GROUP(gpio19), + PINCTRL_PIN_GROUP(gpio20), + PINCTRL_PIN_GROUP(gpio21), + PINCTRL_PIN_GROUP(gpio22), + PINCTRL_PIN_GROUP(gpio23), + PINCTRL_PIN_GROUP(gpio24), + PINCTRL_PIN_GROUP(gpio25), + PINCTRL_PIN_GROUP(gpio26), + PINCTRL_PIN_GROUP(gpio27), + PINCTRL_PIN_GROUP(gpio28), + PINCTRL_PIN_GROUP(gpio29), + PINCTRL_PIN_GROUP(gpio30), + PINCTRL_PIN_GROUP(gpio31), + PINCTRL_PIN_GROUP(gpio33), + PINCTRL_PIN_GROUP(gpio34), + PINCTRL_PIN_GROUP(gpio35), + PINCTRL_PIN_GROUP(gpio36), + PINCTRL_PIN_GROUP(gpio37), + PINCTRL_PIN_GROUP(gpio38), + PINCTRL_PIN_GROUP(gpio39), + PINCTRL_PIN_GROUP(gpio40), + PINCTRL_PIN_GROUP(gpio41), + PINCTRL_PIN_GROUP(gpio42), + PINCTRL_PIN_GROUP(gpio43), + PINCTRL_PIN_GROUP(gpio44), + PINCTRL_PIN_GROUP(gpio45), + PINCTRL_PIN_GROUP(gpio46), + PINCTRL_PIN_GROUP(pcie_reset0), + PINCTRL_PIN_GROUP(pcie_reset1), + PINCTRL_PIN_GROUP(pcie_reset2), +}; + +static const char *const pon_groups[] = { "pon" }; +static const char *const tod_1pps_groups[] = { "pon_tod_1pps", "gsw_tod_1pps" }; +static const char *const sipo_groups[] = { "sipo", "sipo_rclk" }; +static const char *const mdio_groups[] = { "mdio" }; +static const char *const uart_groups[] = { "uart2", "uart2_cts_rts", "hsuart", + "hsuart_cts_rts", "uart4", + "uart5" }; +static const char *const i2c_groups[] = { "i2c1" }; +static const char *const jtag_groups[] = { "jtag_udi", "jtag_dfd" }; +static const char *const pcm_groups[] = { "pcm1", "pcm2" }; +static const char *const spi_groups[] = { "spi_quad", "spi_cs1" }; +static const char *const pcm_spi_groups[] = { "pcm_spi", "pcm_spi_int", + "pcm_spi_rst", "pcm_spi_cs1", + "pcm_spi_cs2_p156", + "pcm_spi_cs2_p128", + "pcm_spi_cs3", "pcm_spi_cs4" }; +static const char *const i2s_groups[] = { "i2s" }; +static const char *const emmc_groups[] = { "emmc" }; +static const char *const pnand_groups[] = { "pnand" }; +static const char *const pcie_reset_groups[] = { "pcie_reset0", "pcie_reset1", + "pcie_reset2" }; +static const char *const pwm_groups[] = { "gpio0", "gpio1", + "gpio2", "gpio3", + "gpio4", "gpio5", + "gpio6", "gpio7", + "gpio8", "gpio9", + "gpio10", "gpio11", + "gpio12", "gpio13", + "gpio14", "gpio15", + "gpio16", "gpio17", + "gpio18", "gpio19", + "gpio20", "gpio21", + "gpio22", "gpio23", + "gpio24", "gpio25", + "gpio26", "gpio27", + "gpio28", "gpio29", + "gpio30", "gpio31", + "gpio36", "gpio37", + "gpio38", "gpio39", + "gpio40", "gpio41", + "gpio42", "gpio43", + "gpio44", "gpio45", + "gpio46", "gpio47" }; +static const char *const phy1_led0_groups[] = { "gpio33", "gpio34", + "gpio35", "gpio42" }; +static const char *const phy2_led0_groups[] = { "gpio33", "gpio34", + "gpio35", "gpio42" }; +static const char *const phy3_led0_groups[] = { "gpio33", "gpio34", + "gpio35", "gpio42" }; +static const char *const phy4_led0_groups[] = { "gpio33", "gpio34", + "gpio35", "gpio42" }; +static const char *const phy1_led1_groups[] = { "gpio43", "gpio44", + "gpio45", "gpio46" }; +static const char *const phy2_led1_groups[] = { "gpio43", "gpio44", + "gpio45", "gpio46" }; +static const char *const phy3_led1_groups[] = { "gpio43", "gpio44", + "gpio45", "gpio46" }; +static const char *const phy4_led1_groups[] = { "gpio43", "gpio44", + "gpio45", "gpio46" }; + +static const struct airoha_pinctrl_func_group pon_func_group[] = { + { + .name = "pon", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_PON_MODE_MASK, + GPIO_PON_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group tod_1pps_func_group[] = { + { + .name = "pon_tod_1pps", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + PON_TOD_1PPS_MODE_MASK, + PON_TOD_1PPS_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "gsw_tod_1pps", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GSW_TOD_1PPS_MODE_MASK, + GSW_TOD_1PPS_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group sipo_func_group[] = { + { + .name = "sipo", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_SIPO_MODE_MASK | SIPO_RCLK_MODE_MASK, + GPIO_SIPO_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "sipo_rclk", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_SIPO_MODE_MASK | SIPO_RCLK_MODE_MASK, + GPIO_SIPO_MODE_MASK | SIPO_RCLK_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group mdio_func_group[] = { + { + .name = "mdio", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_SGMII_MDIO_MODE_MASK, + GPIO_SGMII_MDIO_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_MDC_IO_MASTER_MODE_MODE, + GPIO_MDC_IO_MASTER_MODE_MODE + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group uart_func_group[] = { + { + .name = "uart2", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_UART2_MODE_MASK, + GPIO_UART2_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "uart2_cts_rts", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_UART2_MODE_MASK | GPIO_UART2_CTS_RTS_MODE_MASK, + GPIO_UART2_MODE_MASK | GPIO_UART2_CTS_RTS_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "hsuart", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_HSUART_MODE_MASK | GPIO_HSUART_CTS_RTS_MODE_MASK, + GPIO_HSUART_MODE_MASK + }, + .regmap_size = 1, + }, + { + .name = "hsuart_cts_rts", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_HSUART_MODE_MASK | GPIO_HSUART_CTS_RTS_MODE_MASK, + GPIO_HSUART_MODE_MASK | GPIO_HSUART_CTS_RTS_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "uart4", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_UART4_MODE_MASK, + GPIO_UART4_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "uart5", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_UART5_MODE_MASK, + GPIO_UART5_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group i2c_func_group[] = { + { + .name = "i2c1", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_2ND_I2C_MODE_MASK, + GPIO_2ND_I2C_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group jtag_func_group[] = { + { + .name = "jtag_udi", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_NPU_UART_EN, + JTAG_UDI_EN_MASK, + JTAG_UDI_EN_MASK + }, + .regmap_size = 1, + }, { + .name = "jtag_dfd", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_NPU_UART_EN, + JTAG_DFD_EN_MASK, + JTAG_DFD_EN_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group pcm_func_group[] = { + { + .name = "pcm1", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM1_MODE_MASK, + GPIO_PCM1_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm2", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM2_MODE_MASK, + GPIO_PCM2_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group spi_func_group[] = { + { + .name = "spi_quad", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_SPI_QUAD_MODE_MASK, + GPIO_SPI_QUAD_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "spi_cs1", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_SPI_CS1_MODE_MASK, + GPIO_SPI_CS1_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "spi_cs2", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_SPI_CS2_MODE_MASK, + GPIO_SPI_CS2_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "spi_cs3", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_SPI_CS3_MODE_MASK, + GPIO_SPI_CS3_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "spi_cs4", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_SPI_CS4_MODE_MASK, + GPIO_SPI_CS4_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group pcm_spi_func_group[] = { + { + .name = "pcm_spi", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_SPI_MODE_MASK, + GPIO_PCM_SPI_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_int", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_INT_MODE_MASK, + GPIO_PCM_INT_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_rst", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_RESET_MODE_MASK, + GPIO_PCM_RESET_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_cs1", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_SPI_CS1_MODE_MASK, + GPIO_PCM_SPI_CS1_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_cs2_p128", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_SPI_CS2_MODE_P128_MASK, + GPIO_PCM_SPI_CS2_MODE_P128_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_cs2_p156", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_SPI_CS2_MODE_P156_MASK, + GPIO_PCM_SPI_CS2_MODE_P156_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_cs3", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_SPI_CS3_MODE_MASK, + GPIO_PCM_SPI_CS3_MODE_MASK + }, + .regmap_size = 1, + }, { + .name = "pcm_spi_cs4", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_SPI_CS1_MODE, + GPIO_PCM_SPI_CS4_MODE_MASK, + GPIO_PCM_SPI_CS4_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group i2s_func_group[] = { + { + .name = "i2s", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_I2S_MODE_MASK, + GPIO_I2S_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group emmc_func_group[] = { + { + .name = "emmc", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_EMMC_MODE_MASK, + GPIO_EMMC_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group pnand_func_group[] = { + { + .name = "pnand", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_PARALLEL_NAND_MODE_MASK, + GPIO_PARALLEL_NAND_MODE_MASK + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group pcie_reset_func_group[] = { + { + .name = "pcie_reset0", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_PCIE_RESET0_MASK, + GPIO_PCIE_RESET0_MASK + }, + .regmap_size = 1, + }, { + .name = "pcie_reset1", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_PCIE_RESET1_MASK, + GPIO_PCIE_RESET1_MASK + }, + .regmap_size = 1, + }, { + .name = "pcie_reset2", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_PON_MODE, + GPIO_PCIE_RESET2_MASK, + GPIO_PCIE_RESET2_MASK + }, + .regmap_size = 1, + }, +}; + +/* PWM */ +static const struct airoha_pinctrl_func_group pwm_func_group[] = { + { + .name = "gpio0", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO0_FLASH_MODE_CFG, + GPIO0_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio1", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO1_FLASH_MODE_CFG, + GPIO1_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio2", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO2_FLASH_MODE_CFG, + GPIO2_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio3", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO3_FLASH_MODE_CFG, + GPIO3_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio4", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO4_FLASH_MODE_CFG, + GPIO4_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio5", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO5_FLASH_MODE_CFG, + GPIO5_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio6", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO6_FLASH_MODE_CFG, + GPIO6_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio7", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO7_FLASH_MODE_CFG, + GPIO7_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio8", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO8_FLASH_MODE_CFG, + GPIO8_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio9", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO9_FLASH_MODE_CFG, + GPIO9_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio10", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO10_FLASH_MODE_CFG, + GPIO10_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio11", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO11_FLASH_MODE_CFG, + GPIO11_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio12", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO12_FLASH_MODE_CFG, + GPIO12_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio13", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO13_FLASH_MODE_CFG, + GPIO13_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio14", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO14_FLASH_MODE_CFG, + GPIO14_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio15", + .regmap[0] = { + AIROHA_FUNC_PWM_MUX, + REG_GPIO_FLASH_MODE_CFG, + GPIO15_FLASH_MODE_CFG, + GPIO15_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio16", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO16_FLASH_MODE_CFG, + GPIO16_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio17", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO17_FLASH_MODE_CFG, + GPIO17_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio18", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO18_FLASH_MODE_CFG, + GPIO18_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio19", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO19_FLASH_MODE_CFG, + GPIO19_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio20", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO20_FLASH_MODE_CFG, + GPIO20_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio21", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO21_FLASH_MODE_CFG, + GPIO21_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio22", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO22_FLASH_MODE_CFG, + GPIO22_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio23", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO23_FLASH_MODE_CFG, + GPIO23_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio24", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO24_FLASH_MODE_CFG, + GPIO24_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio25", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO25_FLASH_MODE_CFG, + GPIO25_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio26", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO26_FLASH_MODE_CFG, + GPIO26_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio27", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO27_FLASH_MODE_CFG, + GPIO27_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio28", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO28_FLASH_MODE_CFG, + GPIO28_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio29", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO29_FLASH_MODE_CFG, + GPIO29_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio30", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO30_FLASH_MODE_CFG, + GPIO30_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio31", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO31_FLASH_MODE_CFG, + GPIO31_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio36", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO36_FLASH_MODE_CFG, + GPIO36_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio37", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO37_FLASH_MODE_CFG, + GPIO37_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio38", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO38_FLASH_MODE_CFG, + GPIO38_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio39", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO39_FLASH_MODE_CFG, + GPIO39_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio40", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO40_FLASH_MODE_CFG, + GPIO40_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio41", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO41_FLASH_MODE_CFG, + GPIO41_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio42", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO42_FLASH_MODE_CFG, + GPIO42_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio43", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO43_FLASH_MODE_CFG, + GPIO43_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio44", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO44_FLASH_MODE_CFG, + GPIO44_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio45", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO45_FLASH_MODE_CFG, + GPIO45_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio46", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO46_FLASH_MODE_CFG, + GPIO46_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, { + .name = "gpio47", + .regmap[0] = { + AIROHA_FUNC_PWM_EXT_MUX, + REG_GPIO_FLASH_MODE_CFG_EXT, + GPIO47_FLASH_MODE_CFG, + GPIO47_FLASH_MODE_CFG + }, + .regmap_size = 1, + }, +}; + +static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { + { + .name = "gpio33", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED0_MODE_MASK, + GPIO_LAN0_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY1_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio34", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED0_MODE_MASK, + GPIO_LAN1_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY1_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio35", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED0_MODE_MASK, + GPIO_LAN2_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY1_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio42", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY1_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { + { + .name = "gpio33", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED0_MODE_MASK, + GPIO_LAN0_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY2_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio34", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED0_MODE_MASK, + GPIO_LAN1_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY2_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio35", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED0_MODE_MASK, + GPIO_LAN2_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY2_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio42", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY2_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { + { + .name = "gpio33", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED0_MODE_MASK, + GPIO_LAN0_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY3_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio34", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED0_MODE_MASK, + GPIO_LAN1_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY3_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio35", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED0_MODE_MASK, + GPIO_LAN2_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY3_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio42", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY3_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { + { + .name = "gpio33", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED0_MODE_MASK, + GPIO_LAN0_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY4_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio34", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED0_MODE_MASK, + GPIO_LAN1_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY4_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio35", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED0_MODE_MASK, + GPIO_LAN2_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY4_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio42", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED0_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY4_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { + { + .name = "gpio43", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED1_MODE_MASK, + GPIO_LAN0_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY1_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio44", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED1_MODE_MASK, + GPIO_LAN1_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY1_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio45", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED1_MODE_MASK, + GPIO_LAN2_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY1_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio46", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY1_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { + { + .name = "gpio43", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED1_MODE_MASK, + GPIO_LAN0_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY2_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio44", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED1_MODE_MASK, + GPIO_LAN1_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY2_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio45", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED1_MODE_MASK, + GPIO_LAN2_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY2_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio46", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY2_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { + { + .name = "gpio43", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED1_MODE_MASK, + GPIO_LAN0_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY3_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio44", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED1_MODE_MASK, + GPIO_LAN1_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY3_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio45", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED1_MODE_MASK, + GPIO_LAN2_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY3_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio46", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY3_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { + { + .name = "gpio43", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN0_LED1_MODE_MASK, + GPIO_LAN0_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN1_LED_MAPPING_MASK, + LAN1_PHY4_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio44", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN1_LED1_MODE_MASK, + GPIO_LAN1_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN2_LED_MAPPING_MASK, + LAN2_PHY4_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio45", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN2_LED1_MODE_MASK, + GPIO_LAN2_LED1_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN3_LED_MAPPING_MASK, + LAN3_PHY4_LED_MAP + }, + .regmap_size = 2, + }, { + .name = "gpio46", + .regmap[0] = { + AIROHA_FUNC_MUX, + REG_GPIO_2ND_I2C_MODE, + GPIO_LAN3_LED0_MODE_MASK, + GPIO_LAN3_LED0_MODE_MASK + }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_LAN_LED1_MAPPING, + LAN4_LED_MAPPING_MASK, + LAN4_PHY4_LED_MAP + }, + .regmap_size = 2, + }, +}; + +static const struct airoha_pinctrl_func airoha_pinctrl_funcs[] = { + PINCTRL_FUNC_DESC(pon), + PINCTRL_FUNC_DESC(tod_1pps), + PINCTRL_FUNC_DESC(sipo), + PINCTRL_FUNC_DESC(mdio), + PINCTRL_FUNC_DESC(uart), + PINCTRL_FUNC_DESC(i2c), + PINCTRL_FUNC_DESC(jtag), + PINCTRL_FUNC_DESC(pcm), + PINCTRL_FUNC_DESC(spi), + PINCTRL_FUNC_DESC(pcm_spi), + PINCTRL_FUNC_DESC(i2s), + PINCTRL_FUNC_DESC(emmc), + PINCTRL_FUNC_DESC(pnand), + PINCTRL_FUNC_DESC(pcie_reset), + PINCTRL_FUNC_DESC(pwm), + PINCTRL_FUNC_DESC(phy1_led0), + PINCTRL_FUNC_DESC(phy2_led0), + PINCTRL_FUNC_DESC(phy3_led0), + PINCTRL_FUNC_DESC(phy4_led0), + PINCTRL_FUNC_DESC(phy1_led1), + PINCTRL_FUNC_DESC(phy2_led1), + PINCTRL_FUNC_DESC(phy3_led1), + PINCTRL_FUNC_DESC(phy4_led1), +}; + +static const struct airoha_pinctrl_conf airoha_pinctrl_pullup_conf[] = { + PINCTRL_CONF_DESC(0, REG_I2C_SDA_PU, UART1_TXD_PU_MASK), + PINCTRL_CONF_DESC(1, REG_I2C_SDA_PU, UART1_RXD_PU_MASK), + PINCTRL_CONF_DESC(2, REG_I2C_SDA_PU, I2C_SDA_PU_MASK), + PINCTRL_CONF_DESC(3, REG_I2C_SDA_PU, I2C_SCL_PU_MASK), + PINCTRL_CONF_DESC(4, REG_I2C_SDA_PU, SPI_CS0_PU_MASK), + PINCTRL_CONF_DESC(5, REG_I2C_SDA_PU, SPI_CLK_PU_MASK), + PINCTRL_CONF_DESC(6, REG_I2C_SDA_PU, SPI_MOSI_PU_MASK), + PINCTRL_CONF_DESC(7, REG_I2C_SDA_PU, SPI_MISO_PU_MASK), + PINCTRL_CONF_DESC(13, REG_GPIO_L_PU, BIT(0)), + PINCTRL_CONF_DESC(14, REG_GPIO_L_PU, BIT(1)), + PINCTRL_CONF_DESC(15, REG_GPIO_L_PU, BIT(2)), + PINCTRL_CONF_DESC(16, REG_GPIO_L_PU, BIT(3)), + PINCTRL_CONF_DESC(17, REG_GPIO_L_PU, BIT(4)), + PINCTRL_CONF_DESC(18, REG_GPIO_L_PU, BIT(5)), + PINCTRL_CONF_DESC(19, REG_GPIO_L_PU, BIT(6)), + PINCTRL_CONF_DESC(20, REG_GPIO_L_PU, BIT(7)), + PINCTRL_CONF_DESC(21, REG_GPIO_L_PU, BIT(8)), + PINCTRL_CONF_DESC(22, REG_GPIO_L_PU, BIT(9)), + PINCTRL_CONF_DESC(23, REG_GPIO_L_PU, BIT(10)), + PINCTRL_CONF_DESC(24, REG_GPIO_L_PU, BIT(11)), + PINCTRL_CONF_DESC(25, REG_GPIO_L_PU, BIT(12)), + PINCTRL_CONF_DESC(26, REG_GPIO_L_PU, BIT(13)), + PINCTRL_CONF_DESC(27, REG_GPIO_L_PU, BIT(14)), + PINCTRL_CONF_DESC(28, REG_GPIO_L_PU, BIT(15)), + PINCTRL_CONF_DESC(29, REG_GPIO_L_PU, BIT(16)), + PINCTRL_CONF_DESC(30, REG_GPIO_L_PU, BIT(17)), + PINCTRL_CONF_DESC(31, REG_GPIO_L_PU, BIT(18)), + PINCTRL_CONF_DESC(32, REG_GPIO_L_PU, BIT(18)), + PINCTRL_CONF_DESC(33, REG_GPIO_L_PU, BIT(20)), + PINCTRL_CONF_DESC(34, REG_GPIO_L_PU, BIT(21)), + PINCTRL_CONF_DESC(35, REG_GPIO_L_PU, BIT(22)), + PINCTRL_CONF_DESC(36, REG_GPIO_L_PU, BIT(23)), + PINCTRL_CONF_DESC(37, REG_GPIO_L_PU, BIT(24)), + PINCTRL_CONF_DESC(38, REG_GPIO_L_PU, BIT(25)), + PINCTRL_CONF_DESC(39, REG_GPIO_L_PU, BIT(26)), + PINCTRL_CONF_DESC(40, REG_GPIO_L_PU, BIT(27)), + PINCTRL_CONF_DESC(41, REG_GPIO_L_PU, BIT(28)), + PINCTRL_CONF_DESC(42, REG_GPIO_L_PU, BIT(29)), + PINCTRL_CONF_DESC(43, REG_GPIO_L_PU, BIT(30)), + PINCTRL_CONF_DESC(44, REG_GPIO_L_PU, BIT(31)), + PINCTRL_CONF_DESC(45, REG_GPIO_H_PU, BIT(0)), + PINCTRL_CONF_DESC(46, REG_GPIO_H_PU, BIT(1)), + PINCTRL_CONF_DESC(47, REG_GPIO_H_PU, BIT(2)), + PINCTRL_CONF_DESC(48, REG_GPIO_H_PU, BIT(3)), + PINCTRL_CONF_DESC(49, REG_GPIO_H_PU, BIT(4)), + PINCTRL_CONF_DESC(50, REG_GPIO_H_PU, BIT(5)), + PINCTRL_CONF_DESC(51, REG_GPIO_H_PU, BIT(6)), + PINCTRL_CONF_DESC(52, REG_GPIO_H_PU, BIT(7)), + PINCTRL_CONF_DESC(53, REG_GPIO_H_PU, BIT(8)), + PINCTRL_CONF_DESC(54, REG_GPIO_H_PU, BIT(9)), + PINCTRL_CONF_DESC(55, REG_GPIO_H_PU, BIT(10)), + PINCTRL_CONF_DESC(56, REG_GPIO_H_PU, BIT(11)), + PINCTRL_CONF_DESC(57, REG_GPIO_H_PU, BIT(12)), + PINCTRL_CONF_DESC(58, REG_GPIO_H_PU, BIT(13)), + PINCTRL_CONF_DESC(59, REG_GPIO_H_PU, BIT(14)), + PINCTRL_CONF_DESC(61, REG_I2C_SDA_PU, PCIE0_RESET_PU_MASK), + PINCTRL_CONF_DESC(62, REG_I2C_SDA_PU, PCIE1_RESET_PU_MASK), + PINCTRL_CONF_DESC(63, REG_I2C_SDA_PU, PCIE2_RESET_PU_MASK), +}; + +static const struct airoha_pinctrl_conf airoha_pinctrl_pulldown_conf[] = { + PINCTRL_CONF_DESC(0, REG_I2C_SDA_PD, UART1_TXD_PD_MASK), + PINCTRL_CONF_DESC(1, REG_I2C_SDA_PD, UART1_RXD_PD_MASK), + PINCTRL_CONF_DESC(2, REG_I2C_SDA_PD, I2C_SDA_PD_MASK), + PINCTRL_CONF_DESC(3, REG_I2C_SDA_PD, I2C_SCL_PD_MASK), + PINCTRL_CONF_DESC(4, REG_I2C_SDA_PD, SPI_CS0_PD_MASK), + PINCTRL_CONF_DESC(5, REG_I2C_SDA_PD, SPI_CLK_PD_MASK), + PINCTRL_CONF_DESC(6, REG_I2C_SDA_PD, SPI_MOSI_PD_MASK), + PINCTRL_CONF_DESC(7, REG_I2C_SDA_PD, SPI_MISO_PD_MASK), + PINCTRL_CONF_DESC(13, REG_GPIO_L_PD, BIT(0)), + PINCTRL_CONF_DESC(14, REG_GPIO_L_PD, BIT(1)), + PINCTRL_CONF_DESC(15, REG_GPIO_L_PD, BIT(2)), + PINCTRL_CONF_DESC(16, REG_GPIO_L_PD, BIT(3)), + PINCTRL_CONF_DESC(17, REG_GPIO_L_PD, BIT(4)), + PINCTRL_CONF_DESC(18, REG_GPIO_L_PD, BIT(5)), + PINCTRL_CONF_DESC(19, REG_GPIO_L_PD, BIT(6)), + PINCTRL_CONF_DESC(20, REG_GPIO_L_PD, BIT(7)), + PINCTRL_CONF_DESC(21, REG_GPIO_L_PD, BIT(8)), + PINCTRL_CONF_DESC(22, REG_GPIO_L_PD, BIT(9)), + PINCTRL_CONF_DESC(23, REG_GPIO_L_PD, BIT(10)), + PINCTRL_CONF_DESC(24, REG_GPIO_L_PD, BIT(11)), + PINCTRL_CONF_DESC(25, REG_GPIO_L_PD, BIT(12)), + PINCTRL_CONF_DESC(26, REG_GPIO_L_PD, BIT(13)), + PINCTRL_CONF_DESC(27, REG_GPIO_L_PD, BIT(14)), + PINCTRL_CONF_DESC(28, REG_GPIO_L_PD, BIT(15)), + PINCTRL_CONF_DESC(29, REG_GPIO_L_PD, BIT(16)), + PINCTRL_CONF_DESC(30, REG_GPIO_L_PD, BIT(17)), + PINCTRL_CONF_DESC(31, REG_GPIO_L_PD, BIT(18)), + PINCTRL_CONF_DESC(32, REG_GPIO_L_PD, BIT(18)), + PINCTRL_CONF_DESC(33, REG_GPIO_L_PD, BIT(20)), + PINCTRL_CONF_DESC(34, REG_GPIO_L_PD, BIT(21)), + PINCTRL_CONF_DESC(35, REG_GPIO_L_PD, BIT(22)), + PINCTRL_CONF_DESC(36, REG_GPIO_L_PD, BIT(23)), + PINCTRL_CONF_DESC(37, REG_GPIO_L_PD, BIT(24)), + PINCTRL_CONF_DESC(38, REG_GPIO_L_PD, BIT(25)), + PINCTRL_CONF_DESC(39, REG_GPIO_L_PD, BIT(26)), + PINCTRL_CONF_DESC(40, REG_GPIO_L_PD, BIT(27)), + PINCTRL_CONF_DESC(41, REG_GPIO_L_PD, BIT(28)), + PINCTRL_CONF_DESC(42, REG_GPIO_L_PD, BIT(29)), + PINCTRL_CONF_DESC(43, REG_GPIO_L_PD, BIT(30)), + PINCTRL_CONF_DESC(44, REG_GPIO_L_PD, BIT(31)), + PINCTRL_CONF_DESC(45, REG_GPIO_H_PD, BIT(0)), + PINCTRL_CONF_DESC(46, REG_GPIO_H_PD, BIT(1)), + PINCTRL_CONF_DESC(47, REG_GPIO_H_PD, BIT(2)), + PINCTRL_CONF_DESC(48, REG_GPIO_H_PD, BIT(3)), + PINCTRL_CONF_DESC(49, REG_GPIO_H_PD, BIT(4)), + PINCTRL_CONF_DESC(50, REG_GPIO_H_PD, BIT(5)), + PINCTRL_CONF_DESC(51, REG_GPIO_H_PD, BIT(6)), + PINCTRL_CONF_DESC(52, REG_GPIO_H_PD, BIT(7)), + PINCTRL_CONF_DESC(53, REG_GPIO_H_PD, BIT(8)), + PINCTRL_CONF_DESC(54, REG_GPIO_H_PD, BIT(9)), + PINCTRL_CONF_DESC(55, REG_GPIO_H_PD, BIT(10)), + PINCTRL_CONF_DESC(56, REG_GPIO_H_PD, BIT(11)), + PINCTRL_CONF_DESC(57, REG_GPIO_H_PD, BIT(12)), + PINCTRL_CONF_DESC(58, REG_GPIO_H_PD, BIT(13)), + PINCTRL_CONF_DESC(59, REG_GPIO_H_PD, BIT(14)), + PINCTRL_CONF_DESC(61, REG_I2C_SDA_PD, PCIE0_RESET_PD_MASK), + PINCTRL_CONF_DESC(62, REG_I2C_SDA_PD, PCIE1_RESET_PD_MASK), + PINCTRL_CONF_DESC(63, REG_I2C_SDA_PD, PCIE2_RESET_PD_MASK), +}; + +static const struct airoha_pinctrl_conf airoha_pinctrl_drive_e2_conf[] = { + PINCTRL_CONF_DESC(0, REG_I2C_SDA_E2, UART1_TXD_E2_MASK), + PINCTRL_CONF_DESC(1, REG_I2C_SDA_E2, UART1_RXD_E2_MASK), + PINCTRL_CONF_DESC(2, REG_I2C_SDA_E2, I2C_SDA_E2_MASK), + PINCTRL_CONF_DESC(3, REG_I2C_SDA_E2, I2C_SCL_E2_MASK), + PINCTRL_CONF_DESC(4, REG_I2C_SDA_E2, SPI_CS0_E2_MASK), + PINCTRL_CONF_DESC(5, REG_I2C_SDA_E2, SPI_CLK_E2_MASK), + PINCTRL_CONF_DESC(6, REG_I2C_SDA_E2, SPI_MOSI_E2_MASK), + PINCTRL_CONF_DESC(7, REG_I2C_SDA_E2, SPI_MISO_E2_MASK), + PINCTRL_CONF_DESC(13, REG_GPIO_L_E2, BIT(0)), + PINCTRL_CONF_DESC(14, REG_GPIO_L_E2, BIT(1)), + PINCTRL_CONF_DESC(15, REG_GPIO_L_E2, BIT(2)), + PINCTRL_CONF_DESC(16, REG_GPIO_L_E2, BIT(3)), + PINCTRL_CONF_DESC(17, REG_GPIO_L_E2, BIT(4)), + PINCTRL_CONF_DESC(18, REG_GPIO_L_E2, BIT(5)), + PINCTRL_CONF_DESC(19, REG_GPIO_L_E2, BIT(6)), + PINCTRL_CONF_DESC(20, REG_GPIO_L_E2, BIT(7)), + PINCTRL_CONF_DESC(21, REG_GPIO_L_E2, BIT(8)), + PINCTRL_CONF_DESC(22, REG_GPIO_L_E2, BIT(9)), + PINCTRL_CONF_DESC(23, REG_GPIO_L_E2, BIT(10)), + PINCTRL_CONF_DESC(24, REG_GPIO_L_E2, BIT(11)), + PINCTRL_CONF_DESC(25, REG_GPIO_L_E2, BIT(12)), + PINCTRL_CONF_DESC(26, REG_GPIO_L_E2, BIT(13)), + PINCTRL_CONF_DESC(27, REG_GPIO_L_E2, BIT(14)), + PINCTRL_CONF_DESC(28, REG_GPIO_L_E2, BIT(15)), + PINCTRL_CONF_DESC(29, REG_GPIO_L_E2, BIT(16)), + PINCTRL_CONF_DESC(30, REG_GPIO_L_E2, BIT(17)), + PINCTRL_CONF_DESC(31, REG_GPIO_L_E2, BIT(18)), + PINCTRL_CONF_DESC(32, REG_GPIO_L_E2, BIT(18)), + PINCTRL_CONF_DESC(33, REG_GPIO_L_E2, BIT(20)), + PINCTRL_CONF_DESC(34, REG_GPIO_L_E2, BIT(21)), + PINCTRL_CONF_DESC(35, REG_GPIO_L_E2, BIT(22)), + PINCTRL_CONF_DESC(36, REG_GPIO_L_E2, BIT(23)), + PINCTRL_CONF_DESC(37, REG_GPIO_L_E2, BIT(24)), + PINCTRL_CONF_DESC(38, REG_GPIO_L_E2, BIT(25)), + PINCTRL_CONF_DESC(39, REG_GPIO_L_E2, BIT(26)), + PINCTRL_CONF_DESC(40, REG_GPIO_L_E2, BIT(27)), + PINCTRL_CONF_DESC(41, REG_GPIO_L_E2, BIT(28)), + PINCTRL_CONF_DESC(42, REG_GPIO_L_E2, BIT(29)), + PINCTRL_CONF_DESC(43, REG_GPIO_L_E2, BIT(30)), + PINCTRL_CONF_DESC(44, REG_GPIO_L_E2, BIT(31)), + PINCTRL_CONF_DESC(45, REG_GPIO_H_E2, BIT(0)), + PINCTRL_CONF_DESC(46, REG_GPIO_H_E2, BIT(1)), + PINCTRL_CONF_DESC(47, REG_GPIO_H_E2, BIT(2)), + PINCTRL_CONF_DESC(48, REG_GPIO_H_E2, BIT(3)), + PINCTRL_CONF_DESC(49, REG_GPIO_H_E2, BIT(4)), + PINCTRL_CONF_DESC(50, REG_GPIO_H_E2, BIT(5)), + PINCTRL_CONF_DESC(51, REG_GPIO_H_E2, BIT(6)), + PINCTRL_CONF_DESC(52, REG_GPIO_H_E2, BIT(7)), + PINCTRL_CONF_DESC(53, REG_GPIO_H_E2, BIT(8)), + PINCTRL_CONF_DESC(54, REG_GPIO_H_E2, BIT(9)), + PINCTRL_CONF_DESC(55, REG_GPIO_H_E2, BIT(10)), + PINCTRL_CONF_DESC(56, REG_GPIO_H_E2, BIT(11)), + PINCTRL_CONF_DESC(57, REG_GPIO_H_E2, BIT(12)), + PINCTRL_CONF_DESC(58, REG_GPIO_H_E2, BIT(13)), + PINCTRL_CONF_DESC(59, REG_GPIO_H_E2, BIT(14)), + PINCTRL_CONF_DESC(61, REG_I2C_SDA_E2, PCIE0_RESET_E2_MASK), + PINCTRL_CONF_DESC(62, REG_I2C_SDA_E2, PCIE1_RESET_E2_MASK), + PINCTRL_CONF_DESC(63, REG_I2C_SDA_E2, PCIE2_RESET_E2_MASK), +}; + +static const struct airoha_pinctrl_conf airoha_pinctrl_drive_e4_conf[] = { + PINCTRL_CONF_DESC(0, REG_I2C_SDA_E4, UART1_TXD_E4_MASK), + PINCTRL_CONF_DESC(1, REG_I2C_SDA_E4, UART1_RXD_E4_MASK), + PINCTRL_CONF_DESC(2, REG_I2C_SDA_E4, I2C_SDA_E4_MASK), + PINCTRL_CONF_DESC(3, REG_I2C_SDA_E4, I2C_SCL_E4_MASK), + PINCTRL_CONF_DESC(4, REG_I2C_SDA_E4, SPI_CS0_E4_MASK), + PINCTRL_CONF_DESC(5, REG_I2C_SDA_E4, SPI_CLK_E4_MASK), + PINCTRL_CONF_DESC(6, REG_I2C_SDA_E4, SPI_MOSI_E4_MASK), + PINCTRL_CONF_DESC(7, REG_I2C_SDA_E4, SPI_MISO_E4_MASK), + PINCTRL_CONF_DESC(13, REG_GPIO_L_E4, BIT(0)), + PINCTRL_CONF_DESC(14, REG_GPIO_L_E4, BIT(1)), + PINCTRL_CONF_DESC(15, REG_GPIO_L_E4, BIT(2)), + PINCTRL_CONF_DESC(16, REG_GPIO_L_E4, BIT(3)), + PINCTRL_CONF_DESC(17, REG_GPIO_L_E4, BIT(4)), + PINCTRL_CONF_DESC(18, REG_GPIO_L_E4, BIT(5)), + PINCTRL_CONF_DESC(19, REG_GPIO_L_E4, BIT(6)), + PINCTRL_CONF_DESC(20, REG_GPIO_L_E4, BIT(7)), + PINCTRL_CONF_DESC(21, REG_GPIO_L_E4, BIT(8)), + PINCTRL_CONF_DESC(22, REG_GPIO_L_E4, BIT(9)), + PINCTRL_CONF_DESC(23, REG_GPIO_L_E4, BIT(10)), + PINCTRL_CONF_DESC(24, REG_GPIO_L_E4, BIT(11)), + PINCTRL_CONF_DESC(25, REG_GPIO_L_E4, BIT(12)), + PINCTRL_CONF_DESC(26, REG_GPIO_L_E4, BIT(13)), + PINCTRL_CONF_DESC(27, REG_GPIO_L_E4, BIT(14)), + PINCTRL_CONF_DESC(28, REG_GPIO_L_E4, BIT(15)), + PINCTRL_CONF_DESC(29, REG_GPIO_L_E4, BIT(16)), + PINCTRL_CONF_DESC(30, REG_GPIO_L_E4, BIT(17)), + PINCTRL_CONF_DESC(31, REG_GPIO_L_E4, BIT(18)), + PINCTRL_CONF_DESC(32, REG_GPIO_L_E4, BIT(18)), + PINCTRL_CONF_DESC(33, REG_GPIO_L_E4, BIT(20)), + PINCTRL_CONF_DESC(34, REG_GPIO_L_E4, BIT(21)), + PINCTRL_CONF_DESC(35, REG_GPIO_L_E4, BIT(22)), + PINCTRL_CONF_DESC(36, REG_GPIO_L_E4, BIT(23)), + PINCTRL_CONF_DESC(37, REG_GPIO_L_E4, BIT(24)), + PINCTRL_CONF_DESC(38, REG_GPIO_L_E4, BIT(25)), + PINCTRL_CONF_DESC(39, REG_GPIO_L_E4, BIT(26)), + PINCTRL_CONF_DESC(40, REG_GPIO_L_E4, BIT(27)), + PINCTRL_CONF_DESC(41, REG_GPIO_L_E4, BIT(28)), + PINCTRL_CONF_DESC(42, REG_GPIO_L_E4, BIT(29)), + PINCTRL_CONF_DESC(43, REG_GPIO_L_E4, BIT(30)), + PINCTRL_CONF_DESC(44, REG_GPIO_L_E4, BIT(31)), + PINCTRL_CONF_DESC(45, REG_GPIO_H_E4, BIT(0)), + PINCTRL_CONF_DESC(46, REG_GPIO_H_E4, BIT(1)), + PINCTRL_CONF_DESC(47, REG_GPIO_H_E4, BIT(2)), + PINCTRL_CONF_DESC(48, REG_GPIO_H_E4, BIT(3)), + PINCTRL_CONF_DESC(49, REG_GPIO_H_E4, BIT(4)), + PINCTRL_CONF_DESC(50, REG_GPIO_H_E4, BIT(5)), + PINCTRL_CONF_DESC(51, REG_GPIO_H_E4, BIT(6)), + PINCTRL_CONF_DESC(52, REG_GPIO_H_E4, BIT(7)), + PINCTRL_CONF_DESC(53, REG_GPIO_H_E4, BIT(8)), + PINCTRL_CONF_DESC(54, REG_GPIO_H_E4, BIT(9)), + PINCTRL_CONF_DESC(55, REG_GPIO_H_E4, BIT(10)), + PINCTRL_CONF_DESC(56, REG_GPIO_H_E4, BIT(11)), + PINCTRL_CONF_DESC(57, REG_GPIO_H_E4, BIT(12)), + PINCTRL_CONF_DESC(58, REG_GPIO_H_E4, BIT(13)), + PINCTRL_CONF_DESC(59, REG_GPIO_H_E4, BIT(14)), + PINCTRL_CONF_DESC(61, REG_I2C_SDA_E4, PCIE0_RESET_E4_MASK), + PINCTRL_CONF_DESC(62, REG_I2C_SDA_E4, PCIE1_RESET_E4_MASK), + PINCTRL_CONF_DESC(63, REG_I2C_SDA_E4, PCIE2_RESET_E4_MASK), +}; + +static const struct airoha_pinctrl_conf airoha_pinctrl_pcie_rst_od_conf[] = { + PINCTRL_CONF_DESC(61, REG_PCIE_RESET_OD, PCIE0_RESET_OD_MASK), + PINCTRL_CONF_DESC(62, REG_PCIE_RESET_OD, PCIE1_RESET_OD_MASK), + PINCTRL_CONF_DESC(63, REG_PCIE_RESET_OD, PCIE2_RESET_OD_MASK), +}; + +static int airoha_convert_pin_to_reg_offset(struct pinctrl_dev *pctrl_dev, + struct pinctrl_gpio_range *range, + int pin) +{ + if (!range) + range = pinctrl_find_gpio_range_from_pin_nolock(pctrl_dev, + pin); + if (!range) + return -EINVAL; + + return pin - range->pin_base; +} + +/* gpio callbacks */ +static void airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio, + int value) +{ + struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip); + u32 offset = gpio % AIROHA_PIN_BANK_SIZE; + u8 index = gpio / AIROHA_PIN_BANK_SIZE; + + regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.data[index], + BIT(offset), value ? BIT(offset) : 0); +} + +static int airoha_gpio_get(struct gpio_chip *chip, unsigned int gpio) +{ + struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip); + u32 val, pin = gpio % AIROHA_PIN_BANK_SIZE; + u8 index = gpio / AIROHA_PIN_BANK_SIZE; + int err; + + err = regmap_read(pinctrl->regmap, + pinctrl->gpiochip.data[index], &val); + + return err ? err : !!(val & BIT(pin)); +} + +static int airoha_gpio_direction_output(struct gpio_chip *chip, + unsigned int gpio, int value) +{ + int err; + + err = pinctrl_gpio_direction_output(chip, gpio); + if (err) + return err; + + airoha_gpio_set(chip, gpio, value); + + return 0; +} + +/* irq callbacks */ +static void airoha_irq_unmask(struct irq_data *data) +{ + u8 offset = data->hwirq % AIROHA_REG_GPIOCTRL_NUM_PIN; + u8 index = data->hwirq / AIROHA_REG_GPIOCTRL_NUM_PIN; + u32 mask = GENMASK(2 * offset + 1, 2 * offset); + struct airoha_pinctrl_gpiochip *gpiochip; + struct airoha_pinctrl *pinctrl; + u32 val = BIT(2 * offset); + + gpiochip = irq_data_get_irq_chip_data(data); + if (WARN_ON_ONCE(data->hwirq >= ARRAY_SIZE(gpiochip->irq_type))) + return; + + pinctrl = container_of(gpiochip, struct airoha_pinctrl, gpiochip); + switch (gpiochip->irq_type[data->hwirq]) { + case IRQ_TYPE_LEVEL_LOW: + val = val << 1; + fallthrough; + case IRQ_TYPE_LEVEL_HIGH: + regmap_update_bits(pinctrl->regmap, gpiochip->level[index], + mask, val); + break; + case IRQ_TYPE_EDGE_FALLING: + val = val << 1; + fallthrough; + case IRQ_TYPE_EDGE_RISING: + regmap_update_bits(pinctrl->regmap, gpiochip->edge[index], + mask, val); + break; + case IRQ_TYPE_EDGE_BOTH: + regmap_set_bits(pinctrl->regmap, gpiochip->edge[index], mask); + break; + default: + break; + } +} + +static void airoha_irq_mask(struct irq_data *data) +{ + u8 offset = data->hwirq % AIROHA_REG_GPIOCTRL_NUM_PIN; + u8 index = data->hwirq / AIROHA_REG_GPIOCTRL_NUM_PIN; + u32 mask = GENMASK(2 * offset + 1, 2 * offset); + struct airoha_pinctrl_gpiochip *gpiochip; + struct airoha_pinctrl *pinctrl; + + gpiochip = irq_data_get_irq_chip_data(data); + pinctrl = container_of(gpiochip, struct airoha_pinctrl, gpiochip); + + regmap_clear_bits(pinctrl->regmap, gpiochip->level[index], mask); + regmap_clear_bits(pinctrl->regmap, gpiochip->edge[index], mask); +} + +static int airoha_irq_type(struct irq_data *data, unsigned int type) +{ + struct airoha_pinctrl_gpiochip *gpiochip; + + gpiochip = irq_data_get_irq_chip_data(data); + if (data->hwirq >= ARRAY_SIZE(gpiochip->irq_type)) + return -EINVAL; + + if (type == IRQ_TYPE_PROBE) { + if (gpiochip->irq_type[data->hwirq]) + return 0; + + type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; + } + gpiochip->irq_type[data->hwirq] = type & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static irqreturn_t airoha_irq_handler(int irq, void *data) +{ + struct airoha_pinctrl *pinctrl = data; + bool handled = false; + int i; + + for (i = 0; i < ARRAY_SIZE(irq_status_regs); i++) { + struct gpio_irq_chip *girq = &pinctrl->gpiochip.chip.irq; + u32 regmap; + unsigned long status; + int irq; + + if (regmap_read(pinctrl->regmap, pinctrl->gpiochip.status[i], + ®map)) + continue; + + status = regmap; + for_each_set_bit(irq, &status, AIROHA_PIN_BANK_SIZE) { + u32 offset = irq + i * AIROHA_PIN_BANK_SIZE; + + generic_handle_irq(irq_find_mapping(girq->domain, + offset)); + regmap_write(pinctrl->regmap, + pinctrl->gpiochip.status[i], BIT(irq)); + } + handled |= !!status; + } + + return handled ? IRQ_HANDLED : IRQ_NONE; +} + +static const struct irq_chip airoha_gpio_irq_chip = { + .name = "airoha-gpio-irq", + .irq_unmask = airoha_irq_unmask, + .irq_mask = airoha_irq_mask, + .irq_mask_ack = airoha_irq_mask, + .irq_set_type = airoha_irq_type, + .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_IMMUTABLE, +}; + +static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl, + struct platform_device *pdev) +{ + struct airoha_pinctrl_gpiochip *chip = &pinctrl->gpiochip; + struct gpio_chip *gc = &chip->chip; + struct gpio_irq_chip *girq = &gc->irq; + struct device *dev = &pdev->dev; + int irq, err; + + chip->data = gpio_data_regs; + chip->dir = gpio_dir_regs; + chip->out = gpio_out_regs; + chip->status = irq_status_regs; + chip->level = irq_level_regs; + chip->edge = irq_edge_regs; + + gc->parent = dev; + gc->label = dev_name(dev); + gc->request = gpiochip_generic_request; + gc->free = gpiochip_generic_free; + gc->direction_input = pinctrl_gpio_direction_input; + gc->direction_output = airoha_gpio_direction_output; + gc->set = airoha_gpio_set; + gc->get = airoha_gpio_get; + gc->base = -1; + gc->ngpio = AIROHA_NUM_PINS; + + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; + gpio_irq_chip_set_chip(girq, &airoha_gpio_irq_chip); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, airoha_irq_handler, IRQF_SHARED, + dev_name(dev), pinctrl); + if (err) { + dev_err(dev, "error requesting irq %d: %d\n", irq, err); + return err; + } + + return devm_gpiochip_add_data(dev, gc, pinctrl); +} + +/* pinmux callbacks */ +static int airoha_pinmux_set_mux(struct pinctrl_dev *pctrl_dev, + unsigned int selector, + unsigned int group) +{ + struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + const struct airoha_pinctrl_func *func; + struct function_desc *desc; + struct group_desc *grp; + int i; + + desc = pinmux_generic_get_function(pctrl_dev, selector); + if (!desc) + return -EINVAL; + + grp = pinctrl_generic_get_group(pctrl_dev, group); + if (!grp) + return -EINVAL; + + dev_dbg(pctrl_dev->dev, "enable function %s group %s\n", + desc->func.name, grp->grp.name); + + func = desc->data; + for (i = 0; i < func->group_size; i++) { + const struct airoha_pinctrl_func_group *group; + int j; + + group = &func->groups[i]; + if (strcmp(group->name, grp->grp.name)) + continue; + + for (j = 0; j < group->regmap_size; j++) { + switch (group->regmap[j].mux) { + case AIROHA_FUNC_PWM_EXT_MUX: + case AIROHA_FUNC_PWM_MUX: + regmap_update_bits(pinctrl->regmap, + group->regmap[j].offset, + group->regmap[j].mask, + group->regmap[j].val); + break; + default: + regmap_update_bits(pinctrl->chip_scu, + group->regmap[j].offset, + group->regmap[j].mask, + group->regmap[j].val); + break; + } + } + return 0; + } + + return -EINVAL; +} + +static int airoha_pinmux_set_direction(struct pinctrl_dev *pctrl_dev, + struct pinctrl_gpio_range *range, + unsigned int p, bool input) +{ + struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + u32 mask, index; + int err, pin; + + pin = airoha_convert_pin_to_reg_offset(pctrl_dev, range, p); + if (pin < 0) + return pin; + + /* set output enable */ + mask = BIT(pin % AIROHA_PIN_BANK_SIZE); + index = pin / AIROHA_PIN_BANK_SIZE; + err = regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.out[index], + mask, !input ? mask : 0); + if (err) + return err; + + /* set direction */ + mask = BIT(2 * (pin % AIROHA_REG_GPIOCTRL_NUM_PIN)); + index = pin / AIROHA_REG_GPIOCTRL_NUM_PIN; + return regmap_update_bits(pinctrl->regmap, + pinctrl->gpiochip.dir[index], mask, + !input ? mask : 0); +} + +static const struct pinmux_ops airoha_pmxops = { + .get_functions_count = pinmux_generic_get_function_count, + .get_function_name = pinmux_generic_get_function_name, + .get_function_groups = pinmux_generic_get_function_groups, + .gpio_set_direction = airoha_pinmux_set_direction, + .set_mux = airoha_pinmux_set_mux, + .strict = true, +}; + +/* pinconf callbacks */ +static const struct airoha_pinctrl_reg * +airoha_pinctrl_get_conf_reg(const struct airoha_pinctrl_conf *conf, + int conf_size, int pin) +{ + int i; + + for (i = 0; i < conf_size; i++) { + if (conf[i].pin == pin) + return &conf[i].reg; + } + + return NULL; +} + +static int airoha_pinctrl_get_conf(struct airoha_pinctrl *pinctrl, + const struct airoha_pinctrl_conf *conf, + int conf_size, int pin, u32 *val) +{ + const struct airoha_pinctrl_reg *reg; + + reg = airoha_pinctrl_get_conf_reg(conf, conf_size, pin); + if (!reg) + return -EINVAL; + + if (regmap_read(pinctrl->chip_scu, reg->offset, val)) + return -EINVAL; + + *val = (*val & reg->mask) >> __ffs(reg->mask); + + return 0; +} + +static int airoha_pinctrl_set_conf(struct airoha_pinctrl *pinctrl, + const struct airoha_pinctrl_conf *conf, + int conf_size, int pin, u32 val) +{ + const struct airoha_pinctrl_reg *reg = NULL; + + reg = airoha_pinctrl_get_conf_reg(conf, conf_size, pin); + if (!reg) + return -EINVAL; + + + if (regmap_update_bits(pinctrl->chip_scu, reg->offset, reg->mask, + val << __ffs(reg->mask))) + return -EINVAL; + + return 0; +} + +#define airoha_pinctrl_get_pullup_conf(pinctrl, pin, val) \ + airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_pullup_conf, \ + ARRAY_SIZE(airoha_pinctrl_pullup_conf), \ + (pin), (val)) +#define airoha_pinctrl_get_pulldown_conf(pinctrl, pin, val) \ + airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_pulldown_conf, \ + ARRAY_SIZE(airoha_pinctrl_pulldown_conf), \ + (pin), (val)) +#define airoha_pinctrl_get_drive_e2_conf(pinctrl, pin, val) \ + airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_drive_e2_conf, \ + ARRAY_SIZE(airoha_pinctrl_drive_e2_conf), \ + (pin), (val)) +#define airoha_pinctrl_get_drive_e4_conf(pinctrl, pin, val) \ + airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_drive_e4_conf, \ + ARRAY_SIZE(airoha_pinctrl_drive_e4_conf), \ + (pin), (val)) +#define airoha_pinctrl_get_pcie_rst_od_conf(pinctrl, pin, val) \ + airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_pcie_rst_od_conf, \ + ARRAY_SIZE(airoha_pinctrl_pcie_rst_od_conf), \ + (pin), (val)) +#define airoha_pinctrl_set_pullup_conf(pinctrl, pin, val) \ + airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_pullup_conf, \ + ARRAY_SIZE(airoha_pinctrl_pullup_conf), \ + (pin), (val)) +#define airoha_pinctrl_set_pulldown_conf(pinctrl, pin, val) \ + airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_pulldown_conf, \ + ARRAY_SIZE(airoha_pinctrl_pulldown_conf), \ + (pin), (val)) +#define airoha_pinctrl_set_drive_e2_conf(pinctrl, pin, val) \ + airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_drive_e2_conf, \ + ARRAY_SIZE(airoha_pinctrl_drive_e2_conf), \ + (pin), (val)) +#define airoha_pinctrl_set_drive_e4_conf(pinctrl, pin, val) \ + airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_drive_e4_conf, \ + ARRAY_SIZE(airoha_pinctrl_drive_e4_conf), \ + (pin), (val)) +#define airoha_pinctrl_set_pcie_rst_od_conf(pinctrl, pin, val) \ + airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_pcie_rst_od_conf, \ + ARRAY_SIZE(airoha_pinctrl_pcie_rst_od_conf), \ + (pin), (val)) + +static int airoha_pinconf_get_direction(struct pinctrl_dev *pctrl_dev, u32 p) +{ + struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + u32 val, mask; + int err, pin; + u8 index; + + pin = airoha_convert_pin_to_reg_offset(pctrl_dev, NULL, p); + if (pin < 0) + return pin; + + index = pin / AIROHA_REG_GPIOCTRL_NUM_PIN; + err = regmap_read(pinctrl->regmap, pinctrl->gpiochip.dir[index], &val); + if (err) + return err; + + mask = BIT(2 * (pin % AIROHA_REG_GPIOCTRL_NUM_PIN)); + return val & mask ? PIN_CONFIG_OUTPUT_ENABLE : PIN_CONFIG_INPUT_ENABLE; +} + +static int airoha_pinconf_get(struct pinctrl_dev *pctrl_dev, + unsigned int pin, unsigned long *config) +{ + struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + enum pin_config_param param = pinconf_to_config_param(*config); + u32 arg; + + switch (param) { + case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: { + u32 pull_up, pull_down; + + if (airoha_pinctrl_get_pullup_conf(pinctrl, pin, &pull_up) || + airoha_pinctrl_get_pulldown_conf(pinctrl, pin, &pull_down)) + return -EINVAL; + + if (param == PIN_CONFIG_BIAS_PULL_UP && + !(pull_up && !pull_down)) + return -EINVAL; + else if (param == PIN_CONFIG_BIAS_PULL_DOWN && + !(pull_down && !pull_up)) + return -EINVAL; + else if (pull_up || pull_down) + return -EINVAL; + + arg = 1; + break; + } + case PIN_CONFIG_DRIVE_STRENGTH: { + u32 e2, e4; + + if (airoha_pinctrl_get_drive_e2_conf(pinctrl, pin, &e2) || + airoha_pinctrl_get_drive_e4_conf(pinctrl, pin, &e4)) + return -EINVAL; + + arg = e4 << 1 | e2; + break; + } + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + if (airoha_pinctrl_get_pcie_rst_od_conf(pinctrl, pin, &arg)) + return -EINVAL; + break; + case PIN_CONFIG_OUTPUT_ENABLE: + case PIN_CONFIG_INPUT_ENABLE: + arg = airoha_pinconf_get_direction(pctrl_dev, pin); + if (arg != param) + return -EINVAL; + + arg = 1; + break; + default: + return -EOPNOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int airoha_pinconf_set_pin_value(struct pinctrl_dev *pctrl_dev, + unsigned int p, bool value) +{ + struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + int pin; + + pin = airoha_convert_pin_to_reg_offset(pctrl_dev, NULL, p); + if (pin < 0) + return pin; + + airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value); + + return 0; +} + +static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + int i; + + for (i = 0; i < num_configs; i++) { + u32 param = pinconf_to_config_param(configs[i]); + u32 arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + airoha_pinctrl_set_pulldown_conf(pinctrl, pin, 0); + airoha_pinctrl_set_pullup_conf(pinctrl, pin, 0); + break; + case PIN_CONFIG_BIAS_PULL_UP: + airoha_pinctrl_set_pulldown_conf(pinctrl, pin, 0); + airoha_pinctrl_set_pullup_conf(pinctrl, pin, 1); + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + airoha_pinctrl_set_pulldown_conf(pinctrl, pin, 1); + airoha_pinctrl_set_pullup_conf(pinctrl, pin, 0); + break; + case PIN_CONFIG_DRIVE_STRENGTH: { + u32 e2 = 0, e4 = 0; + + switch (arg) { + case MTK_DRIVE_2mA: + break; + case MTK_DRIVE_4mA: + e2 = 1; + break; + case MTK_DRIVE_6mA: + e4 = 1; + break; + case MTK_DRIVE_8mA: + e2 = 1; + e4 = 1; + break; + default: + return -EINVAL; + } + + airoha_pinctrl_set_drive_e2_conf(pinctrl, pin, e2); + airoha_pinctrl_set_drive_e4_conf(pinctrl, pin, e4); + break; + } + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + airoha_pinctrl_set_pcie_rst_od_conf(pinctrl, pin, !!arg); + break; + case PIN_CONFIG_OUTPUT_ENABLE: + case PIN_CONFIG_INPUT_ENABLE: + case PIN_CONFIG_OUTPUT: { + bool input = param == PIN_CONFIG_INPUT_ENABLE; + int err; + + err = airoha_pinmux_set_direction(pctrl_dev, NULL, pin, + input); + if (err) + return err; + + if (param == PIN_CONFIG_OUTPUT) { + err = airoha_pinconf_set_pin_value(pctrl_dev, + pin, !!arg); + if (err) + return err; + } + break; + } + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int airoha_pinconf_group_get(struct pinctrl_dev *pctrl_dev, + unsigned int group, unsigned long *config) +{ + u32 cur_config = 0; + int i; + + for (i = 0; i < airoha_pinctrl_groups[group].npins; i++) { + if (airoha_pinconf_get(pctrl_dev, + airoha_pinctrl_groups[group].pins[i], + config)) + return -EOPNOTSUPP; + + if (i && cur_config != *config) + return -EOPNOTSUPP; + + cur_config = *config; + } + + return 0; +} + +static int airoha_pinconf_group_set(struct pinctrl_dev *pctrl_dev, + unsigned int group, unsigned long *configs, + unsigned int num_configs) +{ + int i; + + for (i = 0; i < airoha_pinctrl_groups[group].npins; i++) { + int err; + + err = airoha_pinconf_set(pctrl_dev, + airoha_pinctrl_groups[group].pins[i], + configs, num_configs); + if (err) + return err; + } + + return 0; +} + +static const struct pinconf_ops airoha_confops = { + .is_generic = true, + .pin_config_get = airoha_pinconf_get, + .pin_config_set = airoha_pinconf_set, + .pin_config_group_get = airoha_pinconf_group_get, + .pin_config_group_set = airoha_pinconf_group_set, + .pin_config_config_dbg_show = pinconf_generic_dump_config, +}; + +static const struct pinctrl_ops airoha_pctlops = { + .get_groups_count = pinctrl_generic_get_group_count, + .get_group_name = pinctrl_generic_get_group_name, + .get_group_pins = pinctrl_generic_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static struct pinctrl_desc airoha_pinctrl_desc = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .pctlops = &airoha_pctlops, + .pmxops = &airoha_pmxops, + .confops = &airoha_confops, + .pins = airoha_pinctrl_pins, + .npins = ARRAY_SIZE(airoha_pinctrl_pins), +}; + +static int airoha_pinctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct airoha_pinctrl *pinctrl; + struct regmap *map; + int err, i; + + pinctrl = devm_kzalloc(dev, sizeof(*pinctrl), GFP_KERNEL); + if (!pinctrl) + return -ENOMEM; + + pinctrl->regmap = device_node_to_regmap(dev->parent->of_node); + if (IS_ERR(pinctrl->regmap)) + return PTR_ERR(pinctrl->regmap); + + map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu"); + if (IS_ERR(map)) + return PTR_ERR(map); + + pinctrl->chip_scu = map; + + err = devm_pinctrl_register_and_init(dev, &airoha_pinctrl_desc, + pinctrl, &pinctrl->ctrl); + if (err) + return err; + + /* build pin groups */ + for (i = 0; i < ARRAY_SIZE(airoha_pinctrl_groups); i++) { + const struct pingroup *grp = &airoha_pinctrl_groups[i]; + + err = pinctrl_generic_add_group(pinctrl->ctrl, grp->name, + grp->pins, grp->npins, + (void *)grp); + if (err < 0) { + dev_err(&pdev->dev, "Failed to register group %s\n", + grp->name); + return err; + } + } + + /* build functions */ + for (i = 0; i < ARRAY_SIZE(airoha_pinctrl_funcs); i++) { + const struct airoha_pinctrl_func *func; + + func = &airoha_pinctrl_funcs[i]; + err = pinmux_generic_add_function(pinctrl->ctrl, + func->desc.func.name, + func->desc.func.groups, + func->desc.func.ngroups, + (void *)func); + if (err < 0) { + dev_err(dev, "Failed to register function %s\n", + func->desc.func.name); + return err; + } + } + + err = pinctrl_enable(pinctrl->ctrl); + if (err) + return err; + + /* build gpio-chip */ + return airoha_pinctrl_add_gpiochip(pinctrl, pdev); +} + +static const struct of_device_id airoha_pinctrl_of_match[] = { + { .compatible = "airoha,en7581-pinctrl" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, airoha_pinctrl_of_match); + +static struct platform_driver airoha_pinctrl_driver = { + .probe = airoha_pinctrl_probe, + .driver = { + .name = "pinctrl-airoha", + .of_match_table = airoha_pinctrl_of_match, + }, +}; +module_platform_driver(airoha_pinctrl_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_AUTHOR("Benjamin Larsson <benjamin.larsson@genexis.eu>"); +MODULE_AUTHOR("Markus Gothe <markus.gothe@genexis.eu>"); +MODULE_DESCRIPTION("Pinctrl driver for Airoha SoC"); diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 68750b6f8e57..4ce2e35a6373 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -1089,7 +1089,7 @@ static struct platform_driver abx500_gpio_driver = { .of_match_table = abx500_gpio_match, }, .probe = abx500_gpio_probe, - .remove_new = abx500_gpio_remove, + .remove = abx500_gpio_remove, }; static int __init abx500_gpio_init(void) diff --git a/drivers/pinctrl/nxp/pinctrl-s32g2.c b/drivers/pinctrl/nxp/pinctrl-s32g2.c index 440ff1879424..c49d28793b69 100644 --- a/drivers/pinctrl/nxp/pinctrl-s32g2.c +++ b/drivers/pinctrl/nxp/pinctrl-s32g2.c @@ -216,6 +216,12 @@ enum s32_pins { S32G_IMCR_CAN1_RXD = 631, S32G_IMCR_CAN2_RXD = 632, S32G_IMCR_CAN3_RXD = 633, + + /* JTAG IMCRs */ + S32G_IMCR_JTAG_TMS = 562, + S32G_IMCR_JTAG_TCK = 572, + S32G_IMCR_JTAG_TDI = 573, + /* GMAC0 */ S32G_IMCR_Ethernet_MDIO = 527, S32G_IMCR_Ethernet_CRS = 526, @@ -229,7 +235,21 @@ enum s32_pins { S32G_IMCR_Ethernet_RX_DV = 530, S32G_IMCR_Ethernet_TX_CLK = 538, S32G_IMCR_Ethernet_REF_CLK = 535, + /* PFE EMAC 0 MII */ + S32G_IMCR_PFE_EMAC_0_MDIO = 837, + S32G_IMCR_PFE_EMAC_0_CRS = 836, + S32G_IMCR_PFE_EMAC_0_COL = 835, + S32G_IMCR_PFE_EMAC_0_RX_D0 = 841, + S32G_IMCR_PFE_EMAC_0_RX_D1 = 842, + S32G_IMCR_PFE_EMAC_0_RX_D2 = 843, + S32G_IMCR_PFE_EMAC_0_RX_D3 = 844, + S32G_IMCR_PFE_EMAC_0_RX_ER = 840, + S32G_IMCR_PFE_EMAC_0_RX_CLK = 839, + S32G_IMCR_PFE_EMAC_0_RX_DV = 845, + S32G_IMCR_PFE_EMAC_0_TX_CLK = 846, + S32G_IMCR_PFE_EMAC_0_REF_CLK = 838, + /* PFE EMAC 1 MII */ S32G_IMCR_PFE_EMAC_1_MDIO = 857, S32G_IMCR_PFE_EMAC_1_CRS = 856, @@ -317,6 +337,13 @@ enum s32_pins { S32G_IMCR_LLCE_CAN13_RXD = 758, S32G_IMCR_LLCE_CAN14_RXD = 759, S32G_IMCR_LLCE_CAN15_RXD = 760, + S32G_IMCR_LLCE_UART0_RXD = 790, + S32G_IMCR_LLCE_UART1_RXD = 791, + S32G_IMCR_LLCE_UART2_RXD = 792, + S32G_IMCR_LLCE_UART3_RXD = 793, + S32G_IMCR_LLCE_LPSPI2_PCS0 = 811, + S32G_IMCR_LLCE_LPSPI2_SCK = 816, + S32G_IMCR_LLCE_LPSPI2_SIN = 817, S32G_IMCR_USB_CLK = 895, S32G_IMCR_USB_DATA0 = 896, S32G_IMCR_USB_DATA1 = 897, @@ -503,6 +530,12 @@ static const struct pinctrl_pin_desc s32_pinctrl_pads_siul2[] = { S32_PINCTRL_PIN(S32G_IMCR_USDHC_DAT7), S32_PINCTRL_PIN(S32G_IMCR_USDHC_DQS), S32_PINCTRL_PIN(S32G_IMCR_CAN0_RXD), + + /* JTAG IMCRs */ + S32_PINCTRL_PIN(S32G_IMCR_JTAG_TMS), + S32_PINCTRL_PIN(S32G_IMCR_JTAG_TCK), + S32_PINCTRL_PIN(S32G_IMCR_JTAG_TDI), + /* GMAC0 */ S32_PINCTRL_PIN(S32G_IMCR_Ethernet_MDIO), S32_PINCTRL_PIN(S32G_IMCR_Ethernet_CRS), @@ -638,6 +671,13 @@ static const struct pinctrl_pin_desc s32_pinctrl_pads_siul2[] = { S32_PINCTRL_PIN(S32G_IMCR_LLCE_CAN13_RXD), S32_PINCTRL_PIN(S32G_IMCR_LLCE_CAN14_RXD), S32_PINCTRL_PIN(S32G_IMCR_LLCE_CAN15_RXD), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_UART0_RXD), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_UART1_RXD), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_UART2_RXD), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_UART3_RXD), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_LPSPI2_PCS0), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_LPSPI2_SCK), + S32_PINCTRL_PIN(S32G_IMCR_LLCE_LPSPI2_SIN), S32_PINCTRL_PIN(S32G_IMCR_CAN1_RXD), S32_PINCTRL_PIN(S32G_IMCR_CAN2_RXD), S32_PINCTRL_PIN(S32G_IMCR_CAN3_RXD), @@ -652,6 +692,18 @@ static const struct pinctrl_pin_desc s32_pinctrl_pads_siul2[] = { S32_PINCTRL_PIN(S32G_IMCR_USB_DATA7), S32_PINCTRL_PIN(S32G_IMCR_USB_DIR), S32_PINCTRL_PIN(S32G_IMCR_USB_NXT), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_MDIO), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_CRS), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_COL), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_D0), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_D1), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_D2), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_D3), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_ER), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_CLK), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_RX_DV), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_TX_CLK), + S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_0_REF_CLK), S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_1_MDIO), S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_1_CRS), S32_PINCTRL_PIN(S32G_IMCR_PFE_EMAC_1_COL), diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 7f66ec73199a..fff6d4209ad5 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -506,7 +506,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) case IRQ_TYPE_EDGE_BOTH: pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); - pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF; + pin_reg |= BOTH_EDGES << ACTIVE_LEVEL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -1204,7 +1204,7 @@ static struct platform_driver amd_gpio_driver = { #endif }, .probe = amd_gpio_probe, - .remove_new = amd_gpio_remove, + .remove = amd_gpio_remove, }; module_platform_driver(amd_gpio_driver); diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index cf59089f2776..667be49c3f48 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -60,12 +60,12 @@ #define DB_TYPE_PRESERVE_HIGH_GLITCH 0x2UL #define DB_TYPE_REMOVE_GLITCH 0x3UL -#define EDGE_TRAGGER 0x0UL +#define EDGE_TRIGGER 0x0UL #define LEVEL_TRIGGER 0x1UL #define ACTIVE_HIGH 0x0UL #define ACTIVE_LOW 0x1UL -#define BOTH_EADGE 0x2UL +#define BOTH_EDGES 0x2UL #define ENABLE_INTERRUPT 0x1UL #define DISABLE_INTERRUPT 0x0UL diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c index dd93f124e0a0..717f9592b28b 100644 --- a/drivers/pinctrl/pinctrl-artpec6.c +++ b/drivers/pinctrl/pinctrl-artpec6.c @@ -988,7 +988,7 @@ static struct platform_driver artpec6_pmx_driver = { .of_match_table = artpec6_pinctrl_match, }, .probe = artpec6_pmx_probe, - .remove_new = artpec6_pmx_remove, + .remove = artpec6_pmx_remove, }; static int __init artpec6_pmx_init(void) diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c index 1374f30166bc..9bf53de20be8 100644 --- a/drivers/pinctrl/pinctrl-aw9523.c +++ b/drivers/pinctrl/pinctrl-aw9523.c @@ -80,7 +80,7 @@ struct aw9523 { struct regmap *regmap; struct mutex i2c_lock; struct gpio_desc *reset_gpio; - struct regulator *vio_vreg; + int vio_vreg; struct pinctrl_dev *pctl; struct gpio_chip gpio; struct aw9523_irq *irq; @@ -550,10 +550,10 @@ static int aw9523_gpio_get(struct gpio_chip *chip, unsigned int offset) /** * _aw9523_gpio_get_multiple - Get I/O state for an entire port - * @regmap: Regmap structure - * @pin: gpiolib pin number + * @awi: Controller data * @regbit: hw pin index, used to retrieve port number * @state: returned port I/O state + * @mask: lines to read values for * * Return: Zero for success or negative number for error */ @@ -972,29 +972,23 @@ static int aw9523_probe(struct i2c_client *client) if (IS_ERR(awi->regmap)) return PTR_ERR(awi->regmap); - awi->vio_vreg = devm_regulator_get_optional(dev, "vio"); - if (IS_ERR(awi->vio_vreg)) { - if (PTR_ERR(awi->vio_vreg) == -EPROBE_DEFER) - return -EPROBE_DEFER; - awi->vio_vreg = NULL; - } else { - ret = regulator_enable(awi->vio_vreg); - if (ret) - return ret; - } + awi->vio_vreg = devm_regulator_get_enable_optional(dev, "vio"); + if (awi->vio_vreg && awi->vio_vreg != -ENODEV) + return awi->vio_vreg; + + ret = devm_mutex_init(dev, &awi->i2c_lock); + if (ret) + return ret; - mutex_init(&awi->i2c_lock); lockdep_set_subclass(&awi->i2c_lock, i2c_adapter_depth(client->adapter)); pdesc = devm_kzalloc(dev, sizeof(*pdesc), GFP_KERNEL); - if (!pdesc) { - ret = -ENOMEM; - goto err_disable_vregs; - } + if (!pdesc) + return -ENOMEM; ret = aw9523_hw_init(awi); if (ret) - goto err_disable_vregs; + return ret; pdesc->name = dev_name(dev); pdesc->owner = THIS_MODULE; @@ -1006,31 +1000,20 @@ static int aw9523_probe(struct i2c_client *client) ret = aw9523_init_gpiochip(awi, pdesc->npins); if (ret) - goto err_disable_vregs; + return ret; if (client->irq) { ret = aw9523_init_irq(awi, client->irq); if (ret) - goto err_disable_vregs; + return ret; } awi->pctl = devm_pinctrl_register(dev, pdesc, awi); - if (IS_ERR(awi->pctl)) { - ret = dev_err_probe(dev, PTR_ERR(awi->pctl), "Cannot register pinctrl"); - goto err_disable_vregs; - } - - ret = devm_gpiochip_add_data(dev, &awi->gpio, awi); - if (ret) - goto err_disable_vregs; - - return ret; + if (IS_ERR(awi->pctl)) + return dev_err_probe(dev, PTR_ERR(awi->pctl), + "Cannot register pinctrl"); -err_disable_vregs: - if (awi->vio_vreg) - regulator_disable(awi->vio_vreg); - mutex_destroy(&awi->i2c_lock); - return ret; + return devm_gpiochip_add_data(dev, &awi->gpio, awi); } static void aw9523_remove(struct i2c_client *client) @@ -1043,19 +1026,15 @@ static void aw9523_remove(struct i2c_client *client) * set the pins to hardware defaults before removing the driver * to leave it in a clean, safe and predictable state. */ - if (awi->vio_vreg) { - regulator_disable(awi->vio_vreg); - } else { + if (awi->vio_vreg == -ENODEV) { mutex_lock(&awi->i2c_lock); aw9523_hw_init(awi); mutex_unlock(&awi->i2c_lock); } - - mutex_destroy(&awi->i2c_lock); } static const struct i2c_device_id aw9523_i2c_id_table[] = { - { "aw9523_i2c", 0 }, + { "aw9523_i2c" }, { } }; MODULE_DEVICE_TABLE(i2c, aw9523_i2c_id_table); diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c index 5096ccdd459e..0d6c2027d4c1 100644 --- a/drivers/pinctrl/pinctrl-cy8c95x0.c +++ b/drivers/pinctrl/pinctrl-cy8c95x0.c @@ -141,7 +141,6 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = { * @nport: Number of Gports in this chip * @gpio_chip: gpiolib chip * @driver_data: private driver data - * @regulator: Pointer to the regulator for the IC * @dev: struct device * @pctldev: pin controller device * @pinctrl_desc: pin controller description @@ -160,10 +159,9 @@ struct cy8c95x0_pinctrl { DECLARE_BITMAP(irq_trig_high, MAX_LINE); DECLARE_BITMAP(push_pull, MAX_LINE); DECLARE_BITMAP(shiftmask, MAX_LINE); - int nport; + unsigned int nport; struct gpio_chip gpio_chip; unsigned long driver_data; - struct regulator *regulator; struct device *dev; struct pinctrl_dev *pctldev; struct pinctrl_desc pinctrl_desc; @@ -612,9 +610,8 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, DECLARE_BITMAP(tmask, MAX_LINE); DECLARE_BITMAP(tval, MAX_LINE); int write_val; - int ret = 0; - int i; u8 bits; + int ret; /* Add the 4 bit gap of Gport2 */ bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE); @@ -625,7 +622,7 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, bitmap_shift_left(tval, tval, 4, MAX_LINE); bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3); - for (i = 0; i < chip->nport; i++) { + for (unsigned int i = 0; i < chip->nport; i++) { /* Skip over unused banks */ bits = bitmap_get_value8(tmask, i * BANK_SZ); if (!bits) @@ -634,15 +631,13 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, write_val = bitmap_get_value8(tval, i * BANK_SZ); ret = cy8c95x0_regmap_update_bits(chip, reg, i, bits, write_val); - if (ret < 0) - goto out; + if (ret < 0) { + dev_err(chip->dev, "failed writing register %d, port %u: err %d\n", reg, i, ret); + return ret; + } } -out: - if (ret < 0) - dev_err(chip->dev, "failed writing register %d, port %d: err %d\n", reg, i, ret); - - return ret; + return 0; } static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, @@ -652,9 +647,8 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, DECLARE_BITMAP(tval, MAX_LINE); DECLARE_BITMAP(tmp, MAX_LINE); int read_val; - int ret = 0; - int i; u8 bits; + int ret; /* Add the 4 bit gap of Gport2 */ bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE); @@ -665,15 +659,17 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, bitmap_shift_left(tval, tval, 4, MAX_LINE); bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3); - for (i = 0; i < chip->nport; i++) { + for (unsigned int i = 0; i < chip->nport; i++) { /* Skip over unused banks */ bits = bitmap_get_value8(tmask, i * BANK_SZ); if (!bits) continue; ret = cy8c95x0_regmap_read(chip, reg, i, &read_val); - if (ret < 0) - goto out; + if (ret < 0) { + dev_err(chip->dev, "failed reading register %d, port %u: err %d\n", reg, i, ret); + return ret; + } read_val &= bits; read_val |= bitmap_get_value8(tval, i * BANK_SZ) & ~bits; @@ -684,11 +680,7 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg, bitmap_shift_right(tmp, tval, 4, MAX_LINE); bitmap_replace(val, tmp, tval, chip->shiftmask, MAX_LINE); -out: - if (ret < 0) - dev_err(chip->dev, "failed reading register %d, port %d: err %d\n", reg, i, ret); - - return ret; + return 0; } static int cy8c95x0_gpio_direction_input(struct gpio_chip *gc, unsigned int off) @@ -754,14 +746,12 @@ static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off) ret = cy8c95x0_regmap_read(chip, CY8C95X0_DIRECTION, port, ®_val); if (ret < 0) - goto out; + return ret; if (reg_val & bit) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; -out: - return ret; } static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip, @@ -823,8 +813,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip, case PIN_CONFIG_SLEEP_HARDWARE_STATE: case PIN_CONFIG_SLEW_RATE: default: - ret = -ENOTSUPP; - goto out; + return -ENOTSUPP; } /* * Writing 1 to one of the drive mode registers will automatically @@ -832,7 +821,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip, */ ret = cy8c95x0_regmap_read(chip, reg, port, ®_val); if (ret < 0) - goto out; + return ret; if (reg_val & bit) arg = 1; @@ -840,8 +829,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip, arg = !arg; *config = pinconf_to_config_packed(param, (u16)arg); -out: - return ret; + return 0; } static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip, @@ -853,7 +841,6 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip, unsigned long param = pinconf_to_config_param(config); unsigned long arg = pinconf_to_config_argument(config); unsigned int reg; - int ret; switch (param) { case PIN_CONFIG_BIAS_PULL_UP: @@ -884,22 +871,17 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip, reg = CY8C95X0_PWMSEL; break; case PIN_CONFIG_OUTPUT_ENABLE: - ret = cy8c95x0_pinmux_direction(chip, off, !arg); - goto out; + return cy8c95x0_pinmux_direction(chip, off, !arg); case PIN_CONFIG_INPUT_ENABLE: - ret = cy8c95x0_pinmux_direction(chip, off, arg); - goto out; + return cy8c95x0_pinmux_direction(chip, off, arg); default: - ret = -ENOTSUPP; - goto out; + return -ENOTSUPP; } /* * Writing 1 to one of the drive mode registers will automatically * clear conflicting set bits in the other drive mode registers. */ - ret = cy8c95x0_regmap_write_bits(chip, reg, port, bit, bit); -out: - return ret; + return cy8c95x0_regmap_write_bits(chip, reg, port, bit, bit); } static int cy8c95x0_gpio_get_multiple(struct gpio_chip *gc, @@ -1424,32 +1406,30 @@ static int cy8c95x0_detect(struct i2c_client *client, } dev_info(&client->dev, "Found a %s chip at 0x%02x.\n", name, client->addr); - strscpy(info->type, name, I2C_NAME_SIZE); + strscpy(info->type, name); return 0; } static int cy8c95x0_probe(struct i2c_client *client) { + struct device *dev = &client->dev; struct cy8c95x0_pinctrl *chip; struct regmap_config regmap_conf; struct regmap_range_cfg regmap_range_conf; - struct regulator *reg; int ret; - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; - chip->dev = &client->dev; + chip->dev = dev; /* Set the device type */ chip->driver_data = (uintptr_t)i2c_get_match_data(client); if (!chip->driver_data) return -ENODEV; - i2c_set_clientdata(client, chip); - chip->tpin = chip->driver_data & CY8C95X0_GPIO_MASK; chip->nport = DIV_ROUND_UP(CY8C95X0_PIN_TO_OFFSET(chip->tpin), BANK_SZ); @@ -1457,46 +1437,34 @@ static int cy8c95x0_probe(struct i2c_client *client) switch (chip->tpin) { case 20: - strscpy(chip->name, cy8c95x0_id[0].name, I2C_NAME_SIZE); + strscpy(chip->name, cy8c95x0_id[0].name); regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE; break; case 40: - strscpy(chip->name, cy8c95x0_id[1].name, I2C_NAME_SIZE); + strscpy(chip->name, cy8c95x0_id[1].name); regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE; break; case 60: - strscpy(chip->name, cy8c95x0_id[2].name, I2C_NAME_SIZE); + strscpy(chip->name, cy8c95x0_id[2].name); regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE; break; default: return -ENODEV; } - reg = devm_regulator_get(&client->dev, "vdd"); - if (IS_ERR(reg)) { - if (PTR_ERR(reg) == -EPROBE_DEFER) - return -EPROBE_DEFER; - } else { - ret = regulator_enable(reg); - if (ret) { - dev_err(&client->dev, "failed to enable regulator vdd: %d\n", ret); - return ret; - } - chip->regulator = reg; - } + ret = devm_regulator_get_enable(dev, "vdd"); + if (ret) + return dev_err_probe(dev, ret, "failed to enable regulator vdd\n"); /* bring the chip out of reset if reset pin is provided */ - chip->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(chip->gpio_reset)) { - ret = dev_err_probe(chip->dev, PTR_ERR(chip->gpio_reset), - "Failed to get GPIO 'reset'\n"); - goto err_exit; - } else if (chip->gpio_reset) { - usleep_range(1000, 2000); + chip->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(chip->gpio_reset)) + return dev_err_probe(dev, PTR_ERR(chip->gpio_reset), "Failed to get GPIO 'reset'\n"); + gpiod_set_consumer_name(chip->gpio_reset, "CY8C95X0 RESET"); + if (chip->gpio_reset) { + fsleep(1000); gpiod_set_value_cansleep(chip->gpio_reset, 0); - usleep_range(250000, 300000); - - gpiod_set_consumer_name(chip->gpio_reset, "CY8C95X0 RESET"); + fsleep(250000); } /* Regmap for direct and paged registers */ @@ -1506,10 +1474,8 @@ static int cy8c95x0_probe(struct i2c_client *client) regmap_conf.num_reg_defaults_raw = regmap_range_conf.range_max; chip->regmap = devm_regmap_init_i2c(client, ®map_conf); - if (IS_ERR(chip->regmap)) { - ret = PTR_ERR(chip->regmap); - goto err_exit; - } + if (IS_ERR(chip->regmap)) + return PTR_ERR(chip->regmap); bitmap_zero(chip->push_pull, MAX_LINE); bitmap_zero(chip->shiftmask, MAX_LINE); @@ -1525,31 +1491,14 @@ static int cy8c95x0_probe(struct i2c_client *client) if (client->irq) { ret = cy8c95x0_irq_setup(chip, client->irq); if (ret) - goto err_exit; + return ret; } ret = cy8c95x0_setup_pinctrl(chip); if (ret) - goto err_exit; - - ret = cy8c95x0_setup_gpiochip(chip); - if (ret) - goto err_exit; - - return 0; - -err_exit: - if (!IS_ERR_OR_NULL(chip->regulator)) - regulator_disable(chip->regulator); - return ret; -} - -static void cy8c95x0_remove(struct i2c_client *client) -{ - struct cy8c95x0_pinctrl *chip = i2c_get_clientdata(client); + return ret; - if (!IS_ERR_OR_NULL(chip->regulator)) - regulator_disable(chip->regulator); + return cy8c95x0_setup_gpiochip(chip); } static const struct acpi_device_id cy8c95x0_acpi_ids[] = { @@ -1565,7 +1514,6 @@ static struct i2c_driver cy8c95x0_driver = { .acpi_match_table = cy8c95x0_acpi_ids, }, .probe = cy8c95x0_probe, - .remove = cy8c95x0_remove, .id_table = cy8c95x0_id, .detect = cy8c95x0_detect, }; diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index 0f6b55fec31d..eddb01796a83 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -96,8 +96,6 @@ struct k210_fpioa_data { struct k210_fpioa __iomem *fpioa; struct regmap *sysctl_map; u32 power_offset; - struct clk *clk; - struct clk *pclk; }; #define K210_PIN_NAME(i) ("IO_" #i) @@ -183,7 +181,7 @@ static const u32 k210_pinconf_mode_id_to_mode[] = { [K210_PC_DEFAULT_INT13] = K210_PC_MODE_IN | K210_PC_PU, }; -#undef DEFAULT +#undef K210_PC_DEFAULT /* * Pin functions configuration information. @@ -925,6 +923,7 @@ static int k210_fpioa_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct k210_fpioa_data *pdata; + struct clk *clk, *pclk; dev_info(dev, "K210 FPIOA pin controller\n"); @@ -939,13 +938,13 @@ static int k210_fpioa_probe(struct platform_device *pdev) if (IS_ERR(pdata->fpioa)) return PTR_ERR(pdata->fpioa); - pdata->clk = devm_clk_get_enabled(dev, "ref"); - if (IS_ERR(pdata->clk)) - return PTR_ERR(pdata->clk); + clk = devm_clk_get_enabled(dev, "ref"); + if (IS_ERR(clk)) + return PTR_ERR(clk); - pdata->pclk = devm_clk_get_optional_enabled(dev, "pclk"); - if (IS_ERR(pdata->pclk)) - return PTR_ERR(pdata->pclk); + pclk = devm_clk_get_optional_enabled(dev, "pclk"); + if (IS_ERR(pclk)) + return PTR_ERR(pclk); pdata->sysctl_map = syscon_regmap_lookup_by_phandle_args(np, diff --git a/drivers/pinctrl/pinctrl-k230.c b/drivers/pinctrl/pinctrl-k230.c new file mode 100644 index 000000000000..a9b4627b46b0 --- /dev/null +++ b/drivers/pinctrl/pinctrl-k230.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +/* + * Copyright (C) 2024 Canaan Bright Sight Co. Ltd + * Copyright (C) 2024 Ze Huang <18771902331@163.com> + */ +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/regmap.h> +#include <linux/seq_file.h> + +#include "core.h" +#include "pinconf.h" + +#define K230_NPINS 64 + +#define K230_SHIFT_ST (0) +#define K230_SHIFT_DS (1) +#define K230_SHIFT_BIAS (5) +#define K230_SHIFT_PD (5) +#define K230_SHIFT_PU (6) +#define K230_SHIFT_OE (7) +#define K230_SHIFT_IE (8) +#define K230_SHIFT_MSC (9) +#define K230_SHIFT_SL (10) +#define K230_SHIFT_SEL (11) + +#define K230_PC_ST BIT(0) +#define K230_PC_DS GENMASK(4, 1) +#define K230_PC_PD BIT(5) +#define K230_PC_PU BIT(6) +#define K230_PC_BIAS GENMASK(6, 5) +#define K230_PC_OE BIT(7) +#define K230_PC_IE BIT(8) +#define K230_PC_MSC BIT(9) +#define K230_PC_SL BIT(10) +#define K230_PC_SEL GENMASK(13, 11) + +struct k230_pin_conf { + unsigned int func; + unsigned long *configs; + unsigned int nconfigs; +}; + +struct k230_pin_group { + const char *name; + unsigned int *pins; + unsigned int num_pins; + + struct k230_pin_conf *data; +}; + +struct k230_pmx_func { + const char *name; + const char **groups; + unsigned int *group_idx; + unsigned int ngroups; +}; + +struct k230_pinctrl { + struct pinctrl_desc pctl; + struct pinctrl_dev *pctl_dev; + struct regmap *regmap_base; + void __iomem *base; + struct k230_pin_group *groups; + unsigned int ngroups; + struct k230_pmx_func *functions; + unsigned int nfunctions; +}; + +static const struct regmap_config k230_regmap_config = { + .name = "canaan,pinctrl", + .reg_bits = 32, + .val_bits = 32, + .max_register = 0x100, + .reg_stride = 4, +}; + +static int k230_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + + return info->ngroups; +} + +static const char *k230_get_group_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + + return info->groups[selector].name; +} + +static int k230_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + + if (selector >= info->ngroups) + return -EINVAL; + + *pins = info->groups[selector].pins; + *num_pins = info->groups[selector].num_pins; + + return 0; +} + +static inline const struct k230_pmx_func *k230_name_to_funtion( + const struct k230_pinctrl *info, const char *name) +{ + unsigned int i; + + for (i = 0; i < info->nfunctions; i++) { + if (!strcmp(info->functions[i].name, name)) + return &info->functions[i]; + } + + return NULL; +} + +static struct pinctrl_pin_desc k230_pins[] = { + PINCTRL_PIN(0, "IO0"), PINCTRL_PIN(1, "IO1"), PINCTRL_PIN(2, "IO2"), + PINCTRL_PIN(3, "IO3"), PINCTRL_PIN(4, "IO4"), PINCTRL_PIN(5, "IO5"), + PINCTRL_PIN(6, "IO6"), PINCTRL_PIN(7, "IO7"), PINCTRL_PIN(8, "IO8"), + PINCTRL_PIN(9, "IO9"), PINCTRL_PIN(10, "IO10"), PINCTRL_PIN(11, "IO11"), + PINCTRL_PIN(12, "IO12"), PINCTRL_PIN(13, "IO13"), PINCTRL_PIN(14, "IO14"), + PINCTRL_PIN(15, "IO15"), PINCTRL_PIN(16, "IO16"), PINCTRL_PIN(17, "IO17"), + PINCTRL_PIN(18, "IO18"), PINCTRL_PIN(19, "IO19"), PINCTRL_PIN(20, "IO20"), + PINCTRL_PIN(21, "IO21"), PINCTRL_PIN(22, "IO22"), PINCTRL_PIN(23, "IO23"), + PINCTRL_PIN(24, "IO24"), PINCTRL_PIN(25, "IO25"), PINCTRL_PIN(26, "IO26"), + PINCTRL_PIN(27, "IO27"), PINCTRL_PIN(28, "IO28"), PINCTRL_PIN(29, "IO29"), + PINCTRL_PIN(30, "IO30"), PINCTRL_PIN(31, "IO31"), PINCTRL_PIN(32, "IO32"), + PINCTRL_PIN(33, "IO33"), PINCTRL_PIN(34, "IO34"), PINCTRL_PIN(35, "IO35"), + PINCTRL_PIN(36, "IO36"), PINCTRL_PIN(37, "IO37"), PINCTRL_PIN(38, "IO38"), + PINCTRL_PIN(39, "IO39"), PINCTRL_PIN(40, "IO40"), PINCTRL_PIN(41, "IO41"), + PINCTRL_PIN(42, "IO42"), PINCTRL_PIN(43, "IO43"), PINCTRL_PIN(44, "IO44"), + PINCTRL_PIN(45, "IO45"), PINCTRL_PIN(46, "IO46"), PINCTRL_PIN(47, "IO47"), + PINCTRL_PIN(48, "IO48"), PINCTRL_PIN(49, "IO49"), PINCTRL_PIN(50, "IO50"), + PINCTRL_PIN(51, "IO51"), PINCTRL_PIN(52, "IO52"), PINCTRL_PIN(53, "IO53"), + PINCTRL_PIN(54, "IO54"), PINCTRL_PIN(55, "IO55"), PINCTRL_PIN(56, "IO56"), + PINCTRL_PIN(57, "IO57"), PINCTRL_PIN(58, "IO58"), PINCTRL_PIN(59, "IO59"), + PINCTRL_PIN(60, "IO60"), PINCTRL_PIN(61, "IO61"), PINCTRL_PIN(62, "IO62"), + PINCTRL_PIN(63, "IO63") +}; + +static void k230_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned int offset) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + u32 val, bias, drive, input, slew, schmitt, power; + struct k230_pin_group *grp = k230_pins[offset].drv_data; + static const char * const biasing[] = { + "pull none", "pull down", "pull up", "" }; + static const char * const enable[] = { + "disable", "enable" }; + static const char * const power_source[] = { + "3V3", "1V8" }; + + regmap_read(info->regmap_base, offset * 4, &val); + + drive = (val & K230_PC_DS) >> K230_SHIFT_DS; + bias = (val & K230_PC_BIAS) >> K230_SHIFT_BIAS; + input = (val & K230_PC_IE) >> K230_SHIFT_IE; + slew = (val & K230_PC_SL) >> K230_SHIFT_SL; + schmitt = (val & K230_PC_ST) >> K230_SHIFT_ST; + power = (val & K230_PC_MSC) >> K230_SHIFT_MSC; + + seq_printf(s, "%s - strength %d - %s - %s - slewrate %s - schmitt %s - %s", + grp ? grp->name : "unknown", + drive, + biasing[bias], + input ? "input" : "output", + enable[slew], + enable[schmitt], + power_source[power]); +} + +static int k230_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np_config, + struct pinctrl_map **map, + unsigned int *num_maps) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = info->pctl_dev->dev; + const struct k230_pmx_func *func; + const struct k230_pin_group *grp; + struct pinctrl_map *new_map; + int map_num, i, j, idx; + unsigned int grp_id; + + func = k230_name_to_funtion(info, np_config->name); + if (!func) { + dev_err(dev, "function %s not found\n", np_config->name); + return -EINVAL; + } + + map_num = 0; + for (i = 0; i < func->ngroups; ++i) { + grp_id = func->group_idx[i]; + /* npins of config map plus a mux map */ + map_num += info->groups[grp_id].num_pins + 1; + } + + new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL); + if (!new_map) + return -ENOMEM; + *map = new_map; + *num_maps = map_num; + + idx = 0; + for (i = 0; i < func->ngroups; ++i) { + grp_id = func->group_idx[i]; + grp = &info->groups[grp_id]; + new_map[idx].type = PIN_MAP_TYPE_MUX_GROUP; + new_map[idx].data.mux.group = grp->name; + new_map[idx].data.mux.function = np_config->name; + idx++; + + for (j = 0; j < grp->num_pins; ++j) { + new_map[idx].type = PIN_MAP_TYPE_CONFIGS_PIN; + new_map[idx].data.configs.group_or_pin = + pin_get_name(pctldev, grp->pins[j]); + new_map[idx].data.configs.configs = + grp->data[j].configs; + new_map[idx].data.configs.num_configs = + grp->data[j].nconfigs; + idx++; + } + } + + return 0; +} + +static void k230_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned int num_maps) +{ + kfree(map); +} + +static const struct pinctrl_ops k230_pctrl_ops = { + .get_groups_count = k230_get_groups_count, + .get_group_name = k230_get_group_name, + .get_group_pins = k230_get_group_pins, + .pin_dbg_show = k230_pinctrl_pin_dbg_show, + .dt_node_to_map = k230_dt_node_to_map, + .dt_free_map = k230_dt_free_map, +}; + +static int k230_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + unsigned int val, arg; + + regmap_read(info->regmap_base, pin * 4, &val); + + switch (param) { + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + arg = (val & K230_PC_ST) ? 1 : 0; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + arg = (val & K230_PC_DS) >> K230_SHIFT_DS; + break; + case PIN_CONFIG_BIAS_DISABLE: + arg = (val & K230_PC_BIAS) ? 0 : 1; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + arg = (val & K230_PC_PD) ? 1 : 0; + break; + case PIN_CONFIG_BIAS_PULL_UP: + arg = (val & K230_PC_PU) ? 1 : 0; + break; + case PIN_CONFIG_OUTPUT_ENABLE: + arg = (val & K230_PC_OE) ? 1 : 0; + break; + case PIN_CONFIG_INPUT_ENABLE: + arg = (val & K230_PC_IE) ? 1 : 0; + break; + case PIN_CONFIG_POWER_SOURCE: + arg = (val & K230_PC_MSC) ? 1 : 0; + break; + case PIN_CONFIG_SLEW_RATE: + arg = (val & K230_PC_SL) ? 1 : 0; + break; + default: + return -EINVAL; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int k230_pinconf_set_param(struct pinctrl_dev *pctldev, unsigned int pin, + enum pin_config_param param, unsigned int arg) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + unsigned int val; + + regmap_read(info->regmap_base, pin * 4, &val); + + switch (param) { + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (arg) + val |= K230_PC_ST; + else + val &= ~K230_PC_ST; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + val &= ~K230_PC_DS; + val |= (arg << K230_SHIFT_DS) & K230_PC_DS; + break; + case PIN_CONFIG_BIAS_DISABLE: + val &= ~K230_PC_BIAS; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + if (!arg) + return -EINVAL; + val |= K230_PC_PD; + break; + case PIN_CONFIG_BIAS_PULL_UP: + if (!arg) + return -EINVAL; + val |= K230_PC_PU; + break; + case PIN_CONFIG_OUTPUT_ENABLE: + if (!arg) + return -EINVAL; + val |= K230_PC_OE; + break; + case PIN_CONFIG_INPUT_ENABLE: + if (!arg) + return -EINVAL; + val |= K230_PC_IE; + break; + case PIN_CONFIG_POWER_SOURCE: + if (arg) + val |= K230_PC_MSC; + else + val &= ~K230_PC_MSC; + break; + case PIN_CONFIG_SLEW_RATE: + if (arg) + val |= K230_PC_SL; + else + val &= ~K230_PC_SL; + break; + default: + return -EINVAL; + } + + regmap_write(info->regmap_base, pin * 4, val); + + return 0; +} + +static int k230_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = info->pctl_dev->dev; + enum pin_config_param param; + unsigned int arg, i; + int ret; + + if (pin >= K230_NPINS) { + dev_err(dev, "pin number out of range\n"); + return -EINVAL; + } + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + ret = k230_pinconf_set_param(pctldev, pin, param, arg); + if (ret) + return ret; + } + + return 0; +} + +static void k230_pconf_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned int pin) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + unsigned int val; + + regmap_read(info->regmap_base, pin * 4, &val); + + seq_printf(s, " 0x%08x", val); +} + +static const struct pinconf_ops k230_pinconf_ops = { + .is_generic = true, + .pin_config_get = k230_pinconf_get, + .pin_config_set = k230_pinconf_set, + .pin_config_dbg_show = k230_pconf_dbg_show, +}; + +static int k230_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + + return info->nfunctions; +} + +static const char *k230_get_fname(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + + return info->functions[selector].name; +} + +static int k230_get_groups(struct pinctrl_dev *pctldev, unsigned int selector, + const char * const **groups, unsigned int *num_groups) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + + *groups = info->functions[selector].groups; + *num_groups = info->functions[selector].ngroups; + + return 0; +} + +static int k230_set_mux(struct pinctrl_dev *pctldev, unsigned int selector, + unsigned int group) +{ + struct k230_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + const struct k230_pin_conf *data = info->groups[group].data; + struct k230_pin_group *grp = &info->groups[group]; + const unsigned int *pins = grp->pins; + struct regmap *regmap; + unsigned int value, mask; + int cnt, reg; + + regmap = info->regmap_base; + + for (cnt = 0; cnt < grp->num_pins; cnt++) { + reg = pins[cnt] * 4; + value = data[cnt].func << K230_SHIFT_SEL; + mask = K230_PC_SEL; + regmap_update_bits(regmap, reg, mask, value); + k230_pins[pins[cnt]].drv_data = grp; + } + + return 0; +} + +static const struct pinmux_ops k230_pmxops = { + .get_functions_count = k230_get_functions_count, + .get_function_name = k230_get_fname, + .get_function_groups = k230_get_groups, + .set_mux = k230_set_mux, + .strict = true, +}; + +static int k230_pinctrl_parse_groups(struct device_node *np, + struct k230_pin_group *grp, + struct k230_pinctrl *info, + unsigned int index) +{ + struct device *dev = info->pctl_dev->dev; + const __be32 *list; + int size, i, ret; + + grp->name = np->name; + + list = of_get_property(np, "pinmux", &size); + size /= sizeof(*list); + + grp->num_pins = size; + grp->pins = devm_kcalloc(dev, grp->num_pins, sizeof(*grp->pins), + GFP_KERNEL); + grp->data = devm_kcalloc(dev, grp->num_pins, sizeof(*grp->data), + GFP_KERNEL); + if (!grp->pins || !grp->data) + return -ENOMEM; + + for (i = 0; i < size; i++) { + unsigned int mux_data = be32_to_cpu(*list++); + + grp->pins[i] = (mux_data >> 8); + grp->data[i].func = (mux_data & 0xff); + + ret = pinconf_generic_parse_dt_config(np, NULL, + &grp->data[i].configs, + &grp->data[i].nconfigs); + if (ret) + return ret; + } + + return 0; +} + +static int k230_pinctrl_parse_functions(struct device_node *np, + struct k230_pinctrl *info, + unsigned int index) +{ + struct device *dev = info->pctl_dev->dev; + struct k230_pmx_func *func; + struct k230_pin_group *grp; + static unsigned int idx, i; + int ret; + + func = &info->functions[index]; + + func->name = np->name; + func->ngroups = of_get_child_count(np); + if (func->ngroups <= 0) + return 0; + + func->groups = devm_kcalloc(dev, func->ngroups, + sizeof(*func->groups), GFP_KERNEL); + func->group_idx = devm_kcalloc(dev, func->ngroups, + sizeof(*func->group_idx), GFP_KERNEL); + if (!func->groups || !func->group_idx) + return -ENOMEM; + + i = 0; + + for_each_child_of_node_scoped(np, child) { + func->groups[i] = child->name; + func->group_idx[i] = idx; + grp = &info->groups[idx]; + idx++; + ret = k230_pinctrl_parse_groups(child, grp, info, i++); + if (ret) + return ret; + } + + return 0; +} + +static void k230_pinctrl_child_count(struct k230_pinctrl *info, + struct device_node *np) +{ + for_each_child_of_node_scoped(np, child) { + info->nfunctions++; + info->ngroups += of_get_child_count(child); + } +} + +static int k230_pinctrl_parse_dt(struct platform_device *pdev, + struct k230_pinctrl *info) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + unsigned int i; + int ret; + + k230_pinctrl_child_count(info, np); + + info->functions = devm_kcalloc(dev, info->nfunctions, + sizeof(*info->functions), GFP_KERNEL); + info->groups = devm_kcalloc(dev, info->ngroups, + sizeof(*info->groups), GFP_KERNEL); + if (!info->functions || !info->groups) + return -ENOMEM; + + i = 0; + + for_each_child_of_node_scoped(np, child) { + ret = k230_pinctrl_parse_functions(child, info, i++); + if (ret) { + dev_err(dev, "failed to parse function\n"); + return ret; + } + } + + return 0; +} + +static int k230_pinctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct k230_pinctrl *info; + struct pinctrl_desc *pctl; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + pctl = &info->pctl; + + pctl->name = "k230-pinctrl"; + pctl->owner = THIS_MODULE; + pctl->pins = k230_pins; + pctl->npins = ARRAY_SIZE(k230_pins); + pctl->pctlops = &k230_pctrl_ops; + pctl->pmxops = &k230_pmxops; + pctl->confops = &k230_pinconf_ops; + + info->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(info->base)) + return PTR_ERR(info->base); + + info->regmap_base = devm_regmap_init_mmio(dev, info->base, + &k230_regmap_config); + if (IS_ERR(info->regmap_base)) + return dev_err_probe(dev, PTR_ERR(info->regmap_base), + "failed to init regmap\n"); + + info->pctl_dev = devm_pinctrl_register(dev, pctl, info); + if (IS_ERR(info->pctl_dev)) + return dev_err_probe(dev, PTR_ERR(info->pctl_dev), + "devm_pinctrl_register failed\n"); + + k230_pinctrl_parse_dt(pdev, info); + + return 0; +} + +static const struct of_device_id k230_dt_ids[] = { + { .compatible = "canaan,k230-pinctrl", }, + { /* sintenel */ } +}; +MODULE_DEVICE_TABLE(of, k230_dt_ids); + +static struct platform_driver k230_pinctrl_driver = { + .probe = k230_pinctrl_probe, + .driver = { + .name = "k230-pinctrl", + .of_match_table = k230_dt_ids, + }, +}; +module_platform_driver(k230_pinctrl_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ze Huang <18771902331@163.com>"); +MODULE_DESCRIPTION("Canaan K230 pinctrl driver"); diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index d1ab8450ea93..61532a7a612a 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -57,6 +57,8 @@ enum { FUNC_CAN1, FUNC_CLKMON, FUNC_NONE, + FUNC_FAN, + FUNC_FC, FUNC_FC0_a, FUNC_FC0_b, FUNC_FC0_c, @@ -71,6 +73,7 @@ enum { FUNC_FC4_a, FUNC_FC4_b, FUNC_FC4_c, + FUNC_FC_SHRD, FUNC_FC_SHRD0, FUNC_FC_SHRD1, FUNC_FC_SHRD2, @@ -92,6 +95,7 @@ enum { FUNC_FC_SHRD18, FUNC_FC_SHRD19, FUNC_FC_SHRD20, + FUNC_FUSA, FUNC_GPIO, FUNC_IB_TRG_a, FUNC_IB_TRG_b, @@ -108,6 +112,8 @@ enum { FUNC_IRQ1, FUNC_IRQ1_IN, FUNC_IRQ1_OUT, + FUNC_IRQ3, + FUNC_IRQ4, FUNC_EXT_IRQ, FUNC_MIIM, FUNC_MIIM_a, @@ -115,12 +121,14 @@ enum { FUNC_MIIM_c, FUNC_MIIM_Sa, FUNC_MIIM_Sb, + FUNC_MIIM_IRQ, FUNC_OB_TRG, FUNC_OB_TRG_a, FUNC_OB_TRG_b, FUNC_PHY_LED, FUNC_PCI_WAKE, FUNC_MD, + FUNC_PCIE_PERST, FUNC_PTP0, FUNC_PTP1, FUNC_PTP2, @@ -152,6 +160,7 @@ enum { FUNC_SGPIO_b, FUNC_SI, FUNC_SI2, + FUNC_SYNCE, FUNC_TACHO, FUNC_TACHO_a, FUNC_TACHO_b, @@ -170,6 +179,10 @@ enum { FUNC_USB_S_a, FUNC_USB_S_b, FUNC_USB_S_c, + FUNC_USB_POWER, + FUNC_USB2PHY_RST, + FUNC_USB_OVER_DETECT, + FUNC_USB_ULPI, FUNC_PLL_STAT, FUNC_EMMC, FUNC_EMMC_SD, @@ -184,6 +197,8 @@ static const char *const ocelot_function_names[] = { [FUNC_CAN1] = "can1", [FUNC_CLKMON] = "clkmon", [FUNC_NONE] = "none", + [FUNC_FAN] = "fan", + [FUNC_FC] = "fc", [FUNC_FC0_a] = "fc0_a", [FUNC_FC0_b] = "fc0_b", [FUNC_FC0_c] = "fc0_c", @@ -198,6 +213,7 @@ static const char *const ocelot_function_names[] = { [FUNC_FC4_a] = "fc4_a", [FUNC_FC4_b] = "fc4_b", [FUNC_FC4_c] = "fc4_c", + [FUNC_FC_SHRD] = "fc_shrd", [FUNC_FC_SHRD0] = "fc_shrd0", [FUNC_FC_SHRD1] = "fc_shrd1", [FUNC_FC_SHRD2] = "fc_shrd2", @@ -219,6 +235,7 @@ static const char *const ocelot_function_names[] = { [FUNC_FC_SHRD18] = "fc_shrd18", [FUNC_FC_SHRD19] = "fc_shrd19", [FUNC_FC_SHRD20] = "fc_shrd20", + [FUNC_FUSA] = "fusa", [FUNC_GPIO] = "gpio", [FUNC_IB_TRG_a] = "ib_trig_a", [FUNC_IB_TRG_b] = "ib_trig_b", @@ -235,6 +252,8 @@ static const char *const ocelot_function_names[] = { [FUNC_IRQ1] = "irq1", [FUNC_IRQ1_IN] = "irq1_in", [FUNC_IRQ1_OUT] = "irq1_out", + [FUNC_IRQ3] = "irq3", + [FUNC_IRQ4] = "irq4", [FUNC_EXT_IRQ] = "ext_irq", [FUNC_MIIM] = "miim", [FUNC_MIIM_a] = "miim_a", @@ -242,8 +261,10 @@ static const char *const ocelot_function_names[] = { [FUNC_MIIM_c] = "miim_c", [FUNC_MIIM_Sa] = "miim_slave_a", [FUNC_MIIM_Sb] = "miim_slave_b", + [FUNC_MIIM_IRQ] = "miim_irq", [FUNC_PHY_LED] = "phy_led", [FUNC_PCI_WAKE] = "pci_wake", + [FUNC_PCIE_PERST] = "pcie_perst", [FUNC_MD] = "md", [FUNC_OB_TRG] = "ob_trig", [FUNC_OB_TRG_a] = "ob_trig_a", @@ -279,6 +300,7 @@ static const char *const ocelot_function_names[] = { [FUNC_SGPIO_b] = "sgpio_b", [FUNC_SI] = "si", [FUNC_SI2] = "si2", + [FUNC_SYNCE] = "synce", [FUNC_TACHO] = "tacho", [FUNC_TACHO_a] = "tacho_a", [FUNC_TACHO_b] = "tacho_b", @@ -294,6 +316,10 @@ static const char *const ocelot_function_names[] = { [FUNC_USB_S_a] = "usb_slave_a", [FUNC_USB_S_b] = "usb_slave_b", [FUNC_USB_S_c] = "usb_slave_c", + [FUNC_USB_POWER] = "usb_power", + [FUNC_USB2PHY_RST] = "usb2phy_rst", + [FUNC_USB_OVER_DETECT] = "usb_over_detect", + [FUNC_USB_ULPI] = "usb_ulpi", [FUNC_UART] = "uart", [FUNC_UART2] = "uart2", [FUNC_UART3] = "uart3", @@ -1136,6 +1162,165 @@ static const struct pinctrl_pin_desc lan966x_pins[] = { LAN966X_PIN(77), }; +#define LAN969X_P(p, f0, f1, f2, f3, f4, f5, f6, f7) \ +static struct ocelot_pin_caps lan969x_pin_##p = { \ + .pin = p, \ + .functions = { \ + FUNC_##f0, FUNC_##f1, FUNC_##f2, \ + FUNC_##f3 \ + }, \ + .a_functions = { \ + FUNC_##f4, FUNC_##f5, FUNC_##f6, \ + FUNC_##f7 \ + }, \ +} + +/* Pinmuxing table taken from data sheet */ +/* Pin FUNC0 FUNC1 FUNC2 FUNC3 FUNC4 FUNC5 FUNC6 FUNC7 */ +LAN969X_P(0, GPIO, IRQ0, FC_SHRD, PCIE_PERST, NONE, NONE, NONE, R); +LAN969X_P(1, GPIO, IRQ1, FC_SHRD, USB_POWER, NONE, NONE, NONE, R); +LAN969X_P(2, GPIO, FC, NONE, NONE, NONE, NONE, NONE, R); +LAN969X_P(3, GPIO, FC, NONE, NONE, NONE, NONE, NONE, R); +LAN969X_P(4, GPIO, FC, NONE, NONE, NONE, NONE, NONE, R); +LAN969X_P(5, GPIO, SGPIO_a, NONE, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(6, GPIO, SGPIO_a, NONE, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(7, GPIO, SGPIO_a, NONE, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(8, GPIO, SGPIO_a, NONE, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(9, GPIO, MIIM, MIIM_Sa, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(10, GPIO, MIIM, MIIM_Sa, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(11, GPIO, MIIM_IRQ, MIIM_Sa, CLKMON, NONE, NONE, NONE, R); +LAN969X_P(12, GPIO, IRQ3, FC_SHRD, USB2PHY_RST, NONE, NONE, NONE, R); +LAN969X_P(13, GPIO, IRQ4, FC_SHRD, USB_OVER_DETECT, NONE, NONE, NONE, R); +LAN969X_P(14, GPIO, EMMC_SD, QSPI1, FC, NONE, NONE, NONE, R); +LAN969X_P(15, GPIO, EMMC_SD, QSPI1, FC, NONE, NONE, NONE, R); +LAN969X_P(16, GPIO, EMMC_SD, QSPI1, FC, NONE, NONE, NONE, R); +LAN969X_P(17, GPIO, EMMC_SD, QSPI1, PTPSYNC_0, USB_POWER, NONE, NONE, R); +LAN969X_P(18, GPIO, EMMC_SD, QSPI1, PTPSYNC_1, USB2PHY_RST, NONE, NONE, R); +LAN969X_P(19, GPIO, EMMC_SD, QSPI1, PTPSYNC_2, USB_OVER_DETECT, NONE, NONE, R); +LAN969X_P(20, GPIO, EMMC_SD, NONE, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(21, GPIO, EMMC_SD, NONE, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(22, GPIO, EMMC_SD, NONE, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(23, GPIO, EMMC_SD, NONE, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(24, GPIO, EMMC_SD, NONE, NONE, NONE, NONE, NONE, R); +LAN969X_P(25, GPIO, FAN, FUSA, CAN0_a, QSPI1, NONE, NONE, R); +LAN969X_P(26, GPIO, FAN, FUSA, CAN0_a, QSPI1, NONE, NONE, R); +LAN969X_P(27, GPIO, SYNCE, FC, MIIM, QSPI1, NONE, NONE, R); +LAN969X_P(28, GPIO, SYNCE, FC, MIIM, QSPI1, NONE, NONE, R); +LAN969X_P(29, GPIO, SYNCE, FC, MIIM_IRQ, QSPI1, NONE, NONE, R); +LAN969X_P(30, GPIO, PTPSYNC_0, USB_ULPI, FC_SHRD, QSPI1, NONE, NONE, R); +LAN969X_P(31, GPIO, PTPSYNC_1, USB_ULPI, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(32, GPIO, PTPSYNC_2, USB_ULPI, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(33, GPIO, SD, USB_ULPI, FC_SHRD, NONE, NONE, NONE, R); +LAN969X_P(34, GPIO, SD, USB_ULPI, CAN1, FC_SHRD, NONE, NONE, R); +LAN969X_P(35, GPIO, SD, USB_ULPI, CAN1, FC_SHRD, NONE, NONE, R); +LAN969X_P(36, GPIO, SD, USB_ULPI, PCIE_PERST, FC_SHRD, NONE, NONE, R); +LAN969X_P(37, GPIO, SD, USB_ULPI, CAN0_b, NONE, NONE, NONE, R); +LAN969X_P(38, GPIO, SD, USB_ULPI, CAN0_b, NONE, NONE, NONE, R); +LAN969X_P(39, GPIO, SD, USB_ULPI, MIIM, NONE, NONE, NONE, R); +LAN969X_P(40, GPIO, SD, USB_ULPI, MIIM, NONE, NONE, NONE, R); +LAN969X_P(41, GPIO, SD, USB_ULPI, MIIM_IRQ, NONE, NONE, NONE, R); +LAN969X_P(42, GPIO, PTPSYNC_3, CAN1, NONE, NONE, NONE, NONE, R); +LAN969X_P(43, GPIO, PTPSYNC_4, CAN1, NONE, NONE, NONE, NONE, R); +LAN969X_P(44, GPIO, PTPSYNC_5, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(45, GPIO, PTPSYNC_6, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(46, GPIO, PTPSYNC_7, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(47, GPIO, NONE, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(48, GPIO, NONE, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(49, GPIO, NONE, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(50, GPIO, NONE, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(51, GPIO, NONE, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(52, GPIO, FAN, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(53, GPIO, FAN, SFP_SD, NONE, NONE, NONE, NONE, R); +LAN969X_P(54, GPIO, SYNCE, FC, NONE, NONE, NONE, NONE, R); +LAN969X_P(55, GPIO, SYNCE, FC, NONE, NONE, NONE, NONE, R); +LAN969X_P(56, GPIO, SYNCE, FC, NONE, NONE, NONE, NONE, R); +LAN969X_P(57, GPIO, SFP_SD, FC_SHRD, TWI, PTPSYNC_3, NONE, NONE, R); +LAN969X_P(58, GPIO, SFP_SD, FC_SHRD, TWI, PTPSYNC_4, NONE, NONE, R); +LAN969X_P(59, GPIO, SFP_SD, FC_SHRD, TWI, PTPSYNC_5, NONE, NONE, R); +LAN969X_P(60, GPIO, SFP_SD, FC_SHRD, TWI, PTPSYNC_6, NONE, NONE, R); +LAN969X_P(61, GPIO, MIIM, FC_SHRD, TWI, NONE, NONE, NONE, R); +LAN969X_P(62, GPIO, MIIM, FC_SHRD, TWI, NONE, NONE, NONE, R); +LAN969X_P(63, GPIO, MIIM_IRQ, FC_SHRD, TWI, NONE, NONE, NONE, R); +LAN969X_P(64, GPIO, FC, FC_SHRD, TWI, NONE, NONE, NONE, R); +LAN969X_P(65, GPIO, FC, FC_SHRD, TWI, NONE, NONE, NONE, R); +LAN969X_P(66, GPIO, FC, FC_SHRD, TWI, NONE, NONE, NONE, R); + +#define LAN969X_PIN(n) { \ + .number = n, \ + .name = "GPIO_"#n, \ + .drv_data = &lan969x_pin_##n \ +} + +static const struct pinctrl_pin_desc lan969x_pins[] = { + LAN969X_PIN(0), + LAN969X_PIN(1), + LAN969X_PIN(2), + LAN969X_PIN(3), + LAN969X_PIN(4), + LAN969X_PIN(5), + LAN969X_PIN(6), + LAN969X_PIN(7), + LAN969X_PIN(8), + LAN969X_PIN(9), + LAN969X_PIN(10), + LAN969X_PIN(11), + LAN969X_PIN(12), + LAN969X_PIN(13), + LAN969X_PIN(14), + LAN969X_PIN(15), + LAN969X_PIN(16), + LAN969X_PIN(17), + LAN969X_PIN(18), + LAN969X_PIN(19), + LAN969X_PIN(20), + LAN969X_PIN(21), + LAN969X_PIN(22), + LAN969X_PIN(23), + LAN969X_PIN(24), + LAN969X_PIN(25), + LAN969X_PIN(26), + LAN969X_PIN(27), + LAN969X_PIN(28), + LAN969X_PIN(29), + LAN969X_PIN(30), + LAN969X_PIN(31), + LAN969X_PIN(32), + LAN969X_PIN(33), + LAN969X_PIN(34), + LAN969X_PIN(35), + LAN969X_PIN(36), + LAN969X_PIN(37), + LAN969X_PIN(38), + LAN969X_PIN(39), + LAN969X_PIN(40), + LAN969X_PIN(41), + LAN969X_PIN(42), + LAN969X_PIN(43), + LAN969X_PIN(44), + LAN969X_PIN(45), + LAN969X_PIN(46), + LAN969X_PIN(47), + LAN969X_PIN(48), + LAN969X_PIN(49), + LAN969X_PIN(50), + LAN969X_PIN(51), + LAN969X_PIN(52), + LAN969X_PIN(53), + LAN969X_PIN(54), + LAN969X_PIN(55), + LAN969X_PIN(56), + LAN969X_PIN(57), + LAN969X_PIN(58), + LAN969X_PIN(59), + LAN969X_PIN(60), + LAN969X_PIN(61), + LAN969X_PIN(62), + LAN969X_PIN(63), + LAN969X_PIN(64), + LAN969X_PIN(65), + LAN969X_PIN(66), +}; + static int ocelot_get_functions_count(struct pinctrl_dev *pctldev) { return ARRAY_SIZE(ocelot_function_names); @@ -1682,6 +1867,23 @@ static struct ocelot_match_data lan966x_desc = { }, }; +static struct ocelot_match_data lan969x_desc = { + .desc = { + .name = "lan969x-pinctrl", + .pins = lan969x_pins, + .npins = ARRAY_SIZE(lan969x_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &lan966x_pmx_ops, + .confops = &ocelot_confops, + .owner = THIS_MODULE, + }, + .pincfg_data = { + .pd_bit = BIT(3), + .pu_bit = BIT(2), + .drive_bits = GENMASK(1, 0), + }, +}; + static int ocelot_create_group_func_map(struct device *dev, struct ocelot_pinctrl *info) { @@ -2014,6 +2216,7 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = { { .compatible = "mscc,servalt-pinctrl", .data = &servalt_desc }, { .compatible = "microchip,sparx5-pinctrl", .data = &sparx5_desc }, { .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc }, + { .compatible = "microchip,lan9691-pinctrl", .data = &lan969x_desc }, {}, }; MODULE_DEVICE_TABLE(of, ocelot_pinctrl_of_match); diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 5c1bc4d5b662..36d4eaf0ebd1 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3227,7 +3227,9 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np, /* we do not check return since it's safe node passed down */ size /= sizeof(*list); if (!size || size % 4) - return dev_err_probe(dev, -EINVAL, "wrong pins number or pins and configs should be by 4\n"); + return dev_err_probe(dev, -EINVAL, + "%pOF: rockchip,pins: expected one or more of <bank pin mux CONFIG>, got %d args instead\n", + np, size); grp->npins = size / 4; @@ -4219,7 +4221,7 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = { static struct platform_driver rockchip_pinctrl_driver = { .probe = rockchip_pinctrl_probe, - .remove_new = rockchip_pinctrl_remove, + .remove = rockchip_pinctrl_remove, .driver = { .name = "rockchip-pinctrl", .pm = &rockchip_pinctrl_dev_pm_ops, diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 2ec599e383e4..5be14dc979e2 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1966,6 +1966,7 @@ static const struct pcs_soc_data pinconf_single = { }; static const struct of_device_id pcs_of_match[] = { + { .compatible = "marvell,pxa1908-padconf", .data = &pinconf_single }, { .compatible = "ti,am437-padconf", .data = &pinctrl_single_am437x }, { .compatible = "ti,am654-padconf", .data = &pinctrl_single_am654 }, { .compatible = "ti,dra7-padconf", .data = &pinctrl_single_dra7 }, @@ -1981,7 +1982,7 @@ MODULE_DEVICE_TABLE(of, pcs_of_match); static struct platform_driver pcs_driver = { .probe = pcs_probe, - .remove_new = pcs_remove, + .remove = pcs_remove, .driver = { .name = DRIVER_NAME, .of_match_table = pcs_of_match, diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c index d2c5321dd025..bfeb2edbeec5 100644 --- a/drivers/pinctrl/pinctrl-stmfx.c +++ b/drivers/pinctrl/pinctrl-stmfx.c @@ -855,7 +855,7 @@ static struct platform_driver stmfx_pinctrl_driver = { .pm = &stmfx_pinctrl_dev_pm_ops, }, .probe = stmfx_pinctrl_probe, - .remove_new = stmfx_pinctrl_remove, + .remove = stmfx_pinctrl_remove, }; module_platform_driver(stmfx_pinctrl_driver); diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index fd0331a87cda..ab87de319ad8 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1105,7 +1105,7 @@ static const struct regmap_config sx150x_regmap_config = { .reg_bits = 8, .val_bits = 32, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .reg_read = sx150x_regmap_reg_read, .reg_write = sx150x_regmap_reg_write, diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c index 4f98f72565f4..d6bb8f58978d 100644 --- a/drivers/pinctrl/pinctrl-tb10x.c +++ b/drivers/pinctrl/pinctrl-tb10x.c @@ -820,7 +820,7 @@ MODULE_DEVICE_TABLE(of, tb10x_pinctrl_dt_ids); static struct platform_driver tb10x_pinctrl_pdrv = { .probe = tb10x_pinctrl_probe, - .remove_new = tb10x_pinctrl_remove, + .remove = tb10x_pinctrl_remove, .driver = { .name = "tb10x_pinctrl", .of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids), diff --git a/drivers/pinctrl/pinctrl-th1520.c b/drivers/pinctrl/pinctrl-th1520.c new file mode 100644 index 000000000000..e641bad6728c --- /dev/null +++ b/drivers/pinctrl/pinctrl-th1520.c @@ -0,0 +1,918 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl driver for the T-Head TH1520 SoC + * + * Copyright (C) 2023 Emil Renner Berthing <emil.renner.berthing@canonical.com> + */ + +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> + +#include <linux/of_address.h> +#include <linux/of_device.h> + +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include "core.h" +#include "pinmux.h" +#include "pinconf.h" + +#define TH1520_PADCFG_IE BIT(9) +#define TH1520_PADCFG_SL BIT(8) +#define TH1520_PADCFG_ST BIT(7) +#define TH1520_PADCFG_SPU BIT(6) +#define TH1520_PADCFG_PS BIT(5) +#define TH1520_PADCFG_PE BIT(4) +#define TH1520_PADCFG_BIAS (TH1520_PADCFG_SPU | TH1520_PADCFG_PS | TH1520_PADCFG_PE) +#define TH1520_PADCFG_DS GENMASK(3, 0) + +#define TH1520_PULL_DOWN_OHM 44000 /* typ. 44kOhm */ +#define TH1520_PULL_UP_OHM 48000 /* typ. 48kOhm */ +#define TH1520_PULL_STRONG_OHM 2100 /* typ. 2.1kOhm */ + +#define TH1520_PAD_NO_PADCFG BIT(30) +#define TH1520_PAD_MUXDATA GENMASK(29, 0) + +struct th1520_pad_group { + const char *name; + const struct pinctrl_pin_desc *pins; + unsigned int npins; +}; + +struct th1520_pinctrl { + struct pinctrl_desc desc; + struct mutex mutex; /* serialize adding functions */ + raw_spinlock_t lock; /* serialize register access */ + void __iomem *base; + struct pinctrl_dev *pctl; +}; + +static void __iomem *th1520_padcfg(struct th1520_pinctrl *thp, + unsigned int pin) +{ + return thp->base + 4 * (pin / 2); +} + +static unsigned int th1520_padcfg_shift(unsigned int pin) +{ + return 16 * (pin & BIT(0)); +} + +static void __iomem *th1520_muxcfg(struct th1520_pinctrl *thp, + unsigned int pin) +{ + return thp->base + 0x400 + 4 * (pin / 8); +} + +static unsigned int th1520_muxcfg_shift(unsigned int pin) +{ + return 4 * (pin & GENMASK(2, 0)); +} + +enum th1520_muxtype { + TH1520_MUX_____, + TH1520_MUX_GPIO, + TH1520_MUX_PWM, + TH1520_MUX_UART, + TH1520_MUX_IR, + TH1520_MUX_I2C, + TH1520_MUX_SPI, + TH1520_MUX_QSPI, + TH1520_MUX_SDIO, + TH1520_MUX_AUD, + TH1520_MUX_I2S, + TH1520_MUX_MAC0, + TH1520_MUX_MAC1, + TH1520_MUX_DPU0, + TH1520_MUX_DPU1, + TH1520_MUX_ISP, + TH1520_MUX_HDMI, + TH1520_MUX_BSEL, + TH1520_MUX_DBG, + TH1520_MUX_CLK, + TH1520_MUX_JTAG, + TH1520_MUX_ISO, + TH1520_MUX_FUSE, + TH1520_MUX_RST, +}; + +static const char *const th1520_muxtype_string[] = { + [TH1520_MUX_GPIO] = "gpio", + [TH1520_MUX_PWM] = "pwm", + [TH1520_MUX_UART] = "uart", + [TH1520_MUX_IR] = "ir", + [TH1520_MUX_I2C] = "i2c", + [TH1520_MUX_SPI] = "spi", + [TH1520_MUX_QSPI] = "qspi", + [TH1520_MUX_SDIO] = "sdio", + [TH1520_MUX_AUD] = "audio", + [TH1520_MUX_I2S] = "i2s", + [TH1520_MUX_MAC0] = "gmac0", + [TH1520_MUX_MAC1] = "gmac1", + [TH1520_MUX_DPU0] = "dpu0", + [TH1520_MUX_DPU1] = "dpu1", + [TH1520_MUX_ISP] = "isp", + [TH1520_MUX_HDMI] = "hdmi", + [TH1520_MUX_BSEL] = "bootsel", + [TH1520_MUX_DBG] = "debug", + [TH1520_MUX_CLK] = "clock", + [TH1520_MUX_JTAG] = "jtag", + [TH1520_MUX_ISO] = "iso7816", + [TH1520_MUX_FUSE] = "efuse", + [TH1520_MUX_RST] = "reset", +}; + +static enum th1520_muxtype th1520_muxtype_get(const char *str) +{ + enum th1520_muxtype mt; + + for (mt = TH1520_MUX_GPIO; mt < ARRAY_SIZE(th1520_muxtype_string); mt++) { + if (!strcmp(str, th1520_muxtype_string[mt])) + return mt; + } + return TH1520_MUX_____; +} + +#define TH1520_PAD(_nr, _name, m0, m1, m2, m3, m4, m5, _flags) \ + { .number = _nr, .name = #_name, .drv_data = (void *)((_flags) | \ + (TH1520_MUX_##m0 << 0) | (TH1520_MUX_##m1 << 5) | (TH1520_MUX_##m2 << 10) | \ + (TH1520_MUX_##m3 << 15) | (TH1520_MUX_##m4 << 20) | (TH1520_MUX_##m5 << 25)) } + +static unsigned long th1520_pad_muxdata(void *drv_data) +{ + return (uintptr_t)drv_data & TH1520_PAD_MUXDATA; +} + +static bool th1520_pad_no_padcfg(void *drv_data) +{ + return (uintptr_t)drv_data & TH1520_PAD_NO_PADCFG; +} + +static const struct pinctrl_pin_desc th1520_group1_pins[] = { + TH1520_PAD(0, OSC_CLK_IN, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(1, OSC_CLK_OUT, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(2, SYS_RST_N, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(3, RTC_CLK_IN, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(4, RTC_CLK_OUT, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + /* skip number 5 so we can calculate register offsets and shifts from the pin number */ + TH1520_PAD(6, TEST_MODE, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(7, DEBUG_MODE, DBG, ____, ____, GPIO, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(8, POR_SEL, ____, ____, ____, ____, ____, ____, TH1520_PAD_NO_PADCFG), + TH1520_PAD(9, I2C_AON_SCL, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(10, I2C_AON_SDA, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(11, CPU_JTG_TCLK, JTAG, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(12, CPU_JTG_TMS, JTAG, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(13, CPU_JTG_TDI, JTAG, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(14, CPU_JTG_TDO, JTAG, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(15, CPU_JTG_TRST, JTAG, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(16, AOGPIO_7, CLK, AUD, ____, GPIO, ____, ____, 0), + TH1520_PAD(17, AOGPIO_8, UART, AUD, IR, GPIO, ____, ____, 0), + TH1520_PAD(18, AOGPIO_9, UART, AUD, IR, GPIO, ____, ____, 0), + TH1520_PAD(19, AOGPIO_10, CLK, AUD, ____, GPIO, ____, ____, 0), + TH1520_PAD(20, AOGPIO_11, GPIO, AUD, ____, ____, ____, ____, 0), + TH1520_PAD(21, AOGPIO_12, GPIO, AUD, ____, ____, ____, ____, 0), + TH1520_PAD(22, AOGPIO_13, GPIO, AUD, ____, ____, ____, ____, 0), + TH1520_PAD(23, AOGPIO_14, GPIO, AUD, ____, ____, ____, ____, 0), + TH1520_PAD(24, AOGPIO_15, GPIO, AUD, ____, ____, ____, ____, 0), + TH1520_PAD(25, AUDIO_PA0, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(26, AUDIO_PA1, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(27, AUDIO_PA2, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(28, AUDIO_PA3, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(29, AUDIO_PA4, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(30, AUDIO_PA5, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(31, AUDIO_PA6, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(32, AUDIO_PA7, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(33, AUDIO_PA8, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(34, AUDIO_PA9, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(35, AUDIO_PA10, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(36, AUDIO_PA11, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(37, AUDIO_PA12, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(38, AUDIO_PA13, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(39, AUDIO_PA14, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(40, AUDIO_PA15, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(41, AUDIO_PA16, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(42, AUDIO_PA17, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(43, AUDIO_PA27, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(44, AUDIO_PA28, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(45, AUDIO_PA29, AUD, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(46, AUDIO_PA30, AUD, RST, ____, GPIO, ____, ____, 0), +}; + +static const struct pinctrl_pin_desc th1520_group2_pins[] = { + TH1520_PAD(0, QSPI1_SCLK, QSPI, ISO, ____, GPIO, FUSE, ____, 0), + TH1520_PAD(1, QSPI1_CSN0, QSPI, ____, I2C, GPIO, FUSE, ____, 0), + TH1520_PAD(2, QSPI1_D0_MOSI, QSPI, ISO, I2C, GPIO, FUSE, ____, 0), + TH1520_PAD(3, QSPI1_D1_MISO, QSPI, ISO, ____, GPIO, FUSE, ____, 0), + TH1520_PAD(4, QSPI1_D2_WP, QSPI, ISO, UART, GPIO, FUSE, ____, 0), + TH1520_PAD(5, QSPI1_D3_HOLD, QSPI, ISO, UART, GPIO, ____, ____, 0), + TH1520_PAD(6, I2C0_SCL, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(7, I2C0_SDA, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(8, I2C1_SCL, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(9, I2C1_SDA, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(10, UART1_TXD, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(11, UART1_RXD, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(12, UART4_TXD, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(13, UART4_RXD, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(14, UART4_CTSN, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(15, UART4_RTSN, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(16, UART3_TXD, DBG, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(17, UART3_RXD, DBG, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(18, GPIO0_18, GPIO, I2C, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(19, GPIO0_19, GPIO, I2C, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(20, GPIO0_20, GPIO, UART, IR, ____, DPU0, DPU1, 0), + TH1520_PAD(21, GPIO0_21, GPIO, UART, IR, ____, DPU0, DPU1, 0), + TH1520_PAD(22, GPIO0_22, GPIO, JTAG, I2C, ____, DPU0, DPU1, 0), + TH1520_PAD(23, GPIO0_23, GPIO, JTAG, I2C, ____, DPU0, DPU1, 0), + TH1520_PAD(24, GPIO0_24, GPIO, JTAG, QSPI, ____, DPU0, DPU1, 0), + TH1520_PAD(25, GPIO0_25, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(26, GPIO0_26, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(27, GPIO0_27, GPIO, ____, I2C, ____, DPU0, DPU1, 0), + TH1520_PAD(28, GPIO0_28, GPIO, ____, I2C, ____, DPU0, DPU1, 0), + TH1520_PAD(29, GPIO0_29, GPIO, ____, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(30, GPIO0_30, GPIO, ____, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(31, GPIO0_31, GPIO, ____, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(32, GPIO1_0, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(33, GPIO1_1, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(34, GPIO1_2, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(35, GPIO1_3, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(36, GPIO1_4, GPIO, JTAG, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(37, GPIO1_5, GPIO, ____, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(38, GPIO1_6, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(39, GPIO1_7, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(40, GPIO1_8, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(41, GPIO1_9, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(42, GPIO1_10, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(43, GPIO1_11, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(44, GPIO1_12, GPIO, QSPI, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(45, GPIO1_13, GPIO, UART, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(46, GPIO1_14, GPIO, UART, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(47, GPIO1_15, GPIO, UART, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(48, GPIO1_16, GPIO, UART, ____, ____, DPU0, DPU1, 0), + TH1520_PAD(49, CLK_OUT_0, BSEL, CLK, ____, GPIO, ____, ____, 0), + TH1520_PAD(50, CLK_OUT_1, BSEL, CLK, ____, GPIO, ____, ____, 0), + TH1520_PAD(51, CLK_OUT_2, BSEL, CLK, ____, GPIO, ____, ____, 0), + TH1520_PAD(52, CLK_OUT_3, BSEL, CLK, ____, GPIO, ____, ____, 0), + TH1520_PAD(53, GPIO1_21, JTAG, ____, ISP, GPIO, ____, ____, 0), + TH1520_PAD(54, GPIO1_22, JTAG, ____, ISP, GPIO, ____, ____, 0), + TH1520_PAD(55, GPIO1_23, JTAG, ____, ISP, GPIO, ____, ____, 0), + TH1520_PAD(56, GPIO1_24, JTAG, ____, ISP, GPIO, ____, ____, 0), + TH1520_PAD(57, GPIO1_25, JTAG, ____, ISP, GPIO, ____, ____, 0), + TH1520_PAD(58, GPIO1_26, GPIO, ____, ISP, ____, ____, ____, 0), + TH1520_PAD(59, GPIO1_27, GPIO, ____, ISP, ____, ____, ____, 0), + TH1520_PAD(60, GPIO1_28, GPIO, ____, ISP, ____, ____, ____, 0), + TH1520_PAD(61, GPIO1_29, GPIO, ____, ISP, ____, ____, ____, 0), + TH1520_PAD(62, GPIO1_30, GPIO, ____, ISP, ____, ____, ____, 0), +}; + +static const struct pinctrl_pin_desc th1520_group3_pins[] = { + TH1520_PAD(0, UART0_TXD, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(1, UART0_RXD, UART, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(2, QSPI0_SCLK, QSPI, PWM, I2S, GPIO, ____, ____, 0), + TH1520_PAD(3, QSPI0_CSN0, QSPI, PWM, I2S, GPIO, ____, ____, 0), + TH1520_PAD(4, QSPI0_CSN1, QSPI, PWM, I2S, GPIO, ____, ____, 0), + TH1520_PAD(5, QSPI0_D0_MOSI, QSPI, PWM, I2S, GPIO, ____, ____, 0), + TH1520_PAD(6, QSPI0_D1_MISO, QSPI, PWM, I2S, GPIO, ____, ____, 0), + TH1520_PAD(7, QSPI0_D2_WP, QSPI, PWM, I2S, GPIO, ____, ____, 0), + TH1520_PAD(8, QSPI1_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0), + TH1520_PAD(9, I2C2_SCL, I2C, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(10, I2C2_SDA, I2C, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(11, I2C3_SCL, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(12, I2C3_SDA, I2C, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(13, GPIO2_13, GPIO, SPI, ____, ____, ____, ____, 0), + TH1520_PAD(14, SPI_SCLK, SPI, UART, IR, GPIO, ____, ____, 0), + TH1520_PAD(15, SPI_CSN, SPI, UART, IR, GPIO, ____, ____, 0), + TH1520_PAD(16, SPI_MOSI, SPI, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(17, SPI_MISO, SPI, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(18, GPIO2_18, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(19, GPIO2_19, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(20, GPIO2_20, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(21, GPIO2_21, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(22, GPIO2_22, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(23, GPIO2_23, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(24, GPIO2_24, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(25, GPIO2_25, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(26, SDIO0_WPRTN, SDIO, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(27, SDIO0_DETN, SDIO, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(28, SDIO1_WPRTN, SDIO, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(29, SDIO1_DETN, SDIO, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(30, GPIO2_30, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(31, GPIO2_31, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(32, GPIO3_0, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(33, GPIO3_1, GPIO, MAC1, ____, ____, ____, ____, 0), + TH1520_PAD(34, GPIO3_2, GPIO, PWM, ____, ____, ____, ____, 0), + TH1520_PAD(35, GPIO3_3, GPIO, PWM, ____, ____, ____, ____, 0), + TH1520_PAD(36, HDMI_SCL, HDMI, PWM, ____, GPIO, ____, ____, 0), + TH1520_PAD(37, HDMI_SDA, HDMI, PWM, ____, GPIO, ____, ____, 0), + TH1520_PAD(38, HDMI_CEC, HDMI, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(39, GMAC0_TX_CLK, MAC0, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(40, GMAC0_RX_CLK, MAC0, ____, ____, GPIO, ____, ____, 0), + TH1520_PAD(41, GMAC0_TXEN, MAC0, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(42, GMAC0_TXD0, MAC0, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(43, GMAC0_TXD1, MAC0, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(44, GMAC0_TXD2, MAC0, UART, ____, GPIO, ____, ____, 0), + TH1520_PAD(45, GMAC0_TXD3, MAC0, I2C, ____, GPIO, ____, ____, 0), + TH1520_PAD(46, GMAC0_RXDV, MAC0, I2C, ____, GPIO, ____, ____, 0), + TH1520_PAD(47, GMAC0_RXD0, MAC0, I2C, ____, GPIO, ____, ____, 0), + TH1520_PAD(48, GMAC0_RXD1, MAC0, I2C, ____, GPIO, ____, ____, 0), + TH1520_PAD(49, GMAC0_RXD2, MAC0, SPI, ____, GPIO, ____, ____, 0), + TH1520_PAD(50, GMAC0_RXD3, MAC0, SPI, ____, GPIO, ____, ____, 0), + TH1520_PAD(51, GMAC0_MDC, MAC0, SPI, MAC1, GPIO, ____, ____, 0), + TH1520_PAD(52, GMAC0_MDIO, MAC0, SPI, MAC1, GPIO, ____, ____, 0), + TH1520_PAD(53, GMAC0_COL, MAC0, PWM, ____, GPIO, ____, ____, 0), + TH1520_PAD(54, GMAC0_CRS, MAC0, PWM, ____, GPIO, ____, ____, 0), +}; + +static const struct th1520_pad_group th1520_group1 = { + .name = "th1520-group1", + .pins = th1520_group1_pins, + .npins = ARRAY_SIZE(th1520_group1_pins), +}; + +static const struct th1520_pad_group th1520_group2 = { + .name = "th1520-group2", + .pins = th1520_group2_pins, + .npins = ARRAY_SIZE(th1520_group2_pins), +}; + +static const struct th1520_pad_group th1520_group3 = { + .name = "th1520-group3", + .pins = th1520_group3_pins, + .npins = ARRAY_SIZE(th1520_group3_pins), +}; + +static int th1520_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + + return thp->desc.npins; +} + +static const char *th1520_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int gsel) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + + return thp->desc.pins[gsel].name; +} + +static int th1520_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int gsel, + const unsigned int **pins, + unsigned int *npins) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + + *pins = &thp->desc.pins[gsel].number; + *npins = 1; + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static void th1520_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned int pin) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + void __iomem *padcfg = th1520_padcfg(thp, pin); + void __iomem *muxcfg = th1520_muxcfg(thp, pin); + u32 pad; + u32 mux; + + scoped_guard(raw_spinlock_irqsave, &thp->lock) { + pad = readl_relaxed(padcfg); + mux = readl_relaxed(muxcfg); + } + + seq_printf(s, "[PADCFG_%03u:0x%x=0x%07x MUXCFG_%03u:0x%x=0x%08x]", + 1 + pin / 2, 0x000 + 4 * (pin / 2), pad, + 1 + pin / 8, 0x400 + 4 * (pin / 8), mux); +} +#else +#define th1520_pin_dbg_show NULL +#endif + +static void th1520_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned int nmaps) +{ + unsigned long *seen = NULL; + unsigned int i; + + for (i = 0; i < nmaps; i++) { + if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN && + map[i].data.configs.configs != seen) { + seen = map[i].data.configs.configs; + kfree(seen); + } + } + + kfree(map); +} + +static int th1520_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **maps, + unsigned int *num_maps) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + struct pinctrl_map *map; + unsigned long *configs; + unsigned int nconfigs; + unsigned int nmaps; + int ret; + + nmaps = 0; + for_each_available_child_of_node_scoped(np, child) { + int npins = of_property_count_strings(child, "pins"); + + if (npins <= 0) { + dev_err(thp->pctl->dev, "no pins selected for %pOFn.%pOFn\n", + np, child); + return -EINVAL; + } + nmaps += npins; + if (of_property_present(child, "function")) + nmaps += npins; + } + + map = kcalloc(nmaps, sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + nmaps = 0; + guard(mutex)(&thp->mutex); + for_each_available_child_of_node_scoped(np, child) { + unsigned int rollback = nmaps; + enum th1520_muxtype muxtype; + struct property *prop; + const char *funcname; + const char **pgnames; + const char *pinname; + int npins; + + ret = pinconf_generic_parse_dt_config(child, pctldev, &configs, &nconfigs); + if (ret) { + dev_err(thp->pctl->dev, "%pOFn.%pOFn: error parsing pin config\n", + np, child); + goto free_map; + } + + if (!of_property_read_string(child, "function", &funcname)) { + muxtype = th1520_muxtype_get(funcname); + if (!muxtype) { + dev_err(thp->pctl->dev, "%pOFn.%pOFn: unknown function '%s'\n", + np, child, funcname); + ret = -EINVAL; + goto free_configs; + } + + funcname = devm_kasprintf(thp->pctl->dev, GFP_KERNEL, "%pOFn.%pOFn", + np, child); + if (!funcname) { + ret = -ENOMEM; + goto free_configs; + } + + npins = of_property_count_strings(child, "pins"); + pgnames = devm_kcalloc(thp->pctl->dev, npins, sizeof(*pgnames), GFP_KERNEL); + if (!pgnames) { + ret = -ENOMEM; + goto free_configs; + } + } else { + funcname = NULL; + } + + npins = 0; + of_property_for_each_string(child, "pins", prop, pinname) { + unsigned int i; + + for (i = 0; i < thp->desc.npins; i++) { + if (!strcmp(pinname, thp->desc.pins[i].name)) + break; + } + if (i == thp->desc.npins) { + nmaps = rollback; + dev_err(thp->pctl->dev, "%pOFn.%pOFn: unknown pin '%s'\n", + np, child, pinname); + ret = -EINVAL; + goto free_configs; + } + + if (nconfigs) { + map[nmaps].type = PIN_MAP_TYPE_CONFIGS_PIN; + map[nmaps].data.configs.group_or_pin = thp->desc.pins[i].name; + map[nmaps].data.configs.configs = configs; + map[nmaps].data.configs.num_configs = nconfigs; + nmaps += 1; + } + if (funcname) { + pgnames[npins++] = thp->desc.pins[i].name; + map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP; + map[nmaps].data.mux.function = funcname; + map[nmaps].data.mux.group = thp->desc.pins[i].name; + nmaps += 1; + } + } + + if (funcname) { + ret = pinmux_generic_add_function(pctldev, funcname, pgnames, + npins, (void *)muxtype); + if (ret < 0) { + dev_err(thp->pctl->dev, "error adding function %s\n", funcname); + goto free_map; + } + } + } + + *maps = map; + *num_maps = nmaps; + return 0; + +free_configs: + kfree(configs); +free_map: + th1520_pinctrl_dt_free_map(pctldev, map, nmaps); + return ret; +} + +static const struct pinctrl_ops th1520_pinctrl_ops = { + .get_groups_count = th1520_pinctrl_get_groups_count, + .get_group_name = th1520_pinctrl_get_group_name, + .get_group_pins = th1520_pinctrl_get_group_pins, + .pin_dbg_show = th1520_pin_dbg_show, + .dt_node_to_map = th1520_pinctrl_dt_node_to_map, + .dt_free_map = th1520_pinctrl_dt_free_map, +}; + +static const u8 th1520_drive_strength_in_ma[16] = { + 1, 2, 3, 5, 7, 8, 10, 12, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static u16 th1520_drive_strength_from_ma(u32 arg) +{ + u16 ds; + + for (ds = 0; ds < TH1520_PADCFG_DS; ds++) { + if (arg <= th1520_drive_strength_in_ma[ds]) + return ds; + } + return TH1520_PADCFG_DS; +} + +static int th1520_padcfg_rmw(struct th1520_pinctrl *thp, unsigned int pin, + u32 mask, u32 value) +{ + void __iomem *padcfg = th1520_padcfg(thp, pin); + unsigned int shift = th1520_padcfg_shift(pin); + u32 tmp; + + mask <<= shift; + value <<= shift; + + scoped_guard(raw_spinlock_irqsave, &thp->lock) { + tmp = readl_relaxed(padcfg); + tmp = (tmp & ~mask) | value; + writel_relaxed(tmp, padcfg); + } + return 0; +} + +static int th1520_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + const struct pin_desc *desc = pin_desc_get(pctldev, pin); + bool enabled; + int param; + u32 value; + u32 arg; + + if (th1520_pad_no_padcfg(desc->drv_data)) + return -ENOTSUPP; + + value = readl_relaxed(th1520_padcfg(thp, pin)); + value = (value >> th1520_padcfg_shift(pin)) & GENMASK(9, 0); + + param = pinconf_to_config_param(*config); + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + enabled = !(value & (TH1520_PADCFG_SPU | TH1520_PADCFG_PE)); + arg = 0; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + enabled = (value & TH1520_PADCFG_BIAS) == TH1520_PADCFG_PE; + arg = enabled ? TH1520_PULL_DOWN_OHM : 0; + break; + case PIN_CONFIG_BIAS_PULL_UP: + if (value & TH1520_PADCFG_SPU) { + enabled = true; + arg = TH1520_PULL_STRONG_OHM; + } else if ((value & (TH1520_PADCFG_PE | TH1520_PADCFG_PS)) == + (TH1520_PADCFG_PE | TH1520_PADCFG_PS)) { + enabled = true; + arg = TH1520_PULL_UP_OHM; + } else { + enabled = false; + arg = 0; + } + break; + case PIN_CONFIG_DRIVE_STRENGTH: + enabled = true; + arg = th1520_drive_strength_in_ma[value & TH1520_PADCFG_DS]; + break; + case PIN_CONFIG_INPUT_ENABLE: + enabled = value & TH1520_PADCFG_IE; + arg = enabled ? 1 : 0; + break; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + enabled = value & TH1520_PADCFG_ST; + arg = enabled ? 1 : 0; + break; + case PIN_CONFIG_SLEW_RATE: + enabled = value & TH1520_PADCFG_SL; + arg = enabled ? 1 : 0; + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + return enabled ? 0 : -EINVAL; +} + +static int th1520_pinconf_group_get(struct pinctrl_dev *pctldev, + unsigned int gsel, unsigned long *config) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + unsigned int pin = thp->desc.pins[gsel].number; + + return th1520_pinconf_get(pctldev, pin, config); +} + +static int th1520_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + const struct pin_desc *desc = pin_desc_get(pctldev, pin); + unsigned int i; + u16 mask, value; + + if (th1520_pad_no_padcfg(desc->drv_data)) + return -ENOTSUPP; + + mask = 0; + value = 0; + for (i = 0; i < num_configs; i++) { + int param = pinconf_to_config_param(configs[i]); + u32 arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + mask |= TH1520_PADCFG_BIAS; + value &= ~TH1520_PADCFG_BIAS; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + if (arg == 0) + return -ENOTSUPP; + mask |= TH1520_PADCFG_BIAS; + value &= ~TH1520_PADCFG_BIAS; + value |= TH1520_PADCFG_PE; + break; + case PIN_CONFIG_BIAS_PULL_UP: + if (arg == 0) + return -ENOTSUPP; + mask |= TH1520_PADCFG_BIAS; + value &= ~TH1520_PADCFG_BIAS; + if (arg == TH1520_PULL_STRONG_OHM) + value |= TH1520_PADCFG_SPU; + else + value |= TH1520_PADCFG_PE | TH1520_PADCFG_PS; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + mask |= TH1520_PADCFG_DS; + value &= ~TH1520_PADCFG_DS; + value |= th1520_drive_strength_from_ma(arg); + break; + case PIN_CONFIG_INPUT_ENABLE: + mask |= TH1520_PADCFG_IE; + if (arg) + value |= TH1520_PADCFG_IE; + else + value &= ~TH1520_PADCFG_IE; + break; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + mask |= TH1520_PADCFG_ST; + if (arg) + value |= TH1520_PADCFG_ST; + else + value &= ~TH1520_PADCFG_ST; + break; + case PIN_CONFIG_SLEW_RATE: + mask |= TH1520_PADCFG_SL; + if (arg) + value |= TH1520_PADCFG_SL; + else + value &= ~TH1520_PADCFG_SL; + break; + default: + return -ENOTSUPP; + } + } + + return th1520_padcfg_rmw(thp, pin, mask, value); +} + +static int th1520_pinconf_group_set(struct pinctrl_dev *pctldev, + unsigned int gsel, + unsigned long *configs, + unsigned int num_configs) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + unsigned int pin = thp->desc.pins[gsel].number; + + return th1520_pinconf_set(pctldev, pin, configs, num_configs); +} + +#ifdef CONFIG_DEBUG_FS +static void th1520_pinconf_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned int pin) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + u32 value = readl_relaxed(th1520_padcfg(thp, pin)); + + value = (value >> th1520_padcfg_shift(pin)) & GENMASK(9, 0); + + seq_printf(s, " [0x%03x]", value); +} +#else +#define th1520_pinconf_dbg_show NULL +#endif + +static const struct pinconf_ops th1520_pinconf_ops = { + .pin_config_get = th1520_pinconf_get, + .pin_config_group_get = th1520_pinconf_group_get, + .pin_config_set = th1520_pinconf_set, + .pin_config_group_set = th1520_pinconf_group_set, + .pin_config_dbg_show = th1520_pinconf_dbg_show, + .is_generic = true, +}; + +static int th1520_pinmux_set(struct th1520_pinctrl *thp, unsigned int pin, + unsigned long muxdata, enum th1520_muxtype muxtype) +{ + void __iomem *muxcfg = th1520_muxcfg(thp, pin); + unsigned int shift = th1520_muxcfg_shift(pin); + u32 mask, value, tmp; + + for (value = 0; muxdata; muxdata >>= 5, value++) { + if ((muxdata & GENMASK(4, 0)) == muxtype) + break; + } + if (!muxdata) { + dev_err(thp->pctl->dev, "invalid mux %s for pin %s\n", + th1520_muxtype_string[muxtype], pin_get_name(thp->pctl, pin)); + return -EINVAL; + } + + mask = GENMASK(3, 0) << shift; + value = value << shift; + + scoped_guard(raw_spinlock_irqsave, &thp->lock) { + tmp = readl_relaxed(muxcfg); + tmp = (tmp & ~mask) | value; + writel_relaxed(tmp, muxcfg); + } + return 0; +} + +static int th1520_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned int fsel, unsigned int gsel) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + const struct function_desc *func = pinmux_generic_get_function(pctldev, fsel); + enum th1520_muxtype muxtype; + + if (!func) + return -EINVAL; + + muxtype = (uintptr_t)func->data; + return th1520_pinmux_set(thp, thp->desc.pins[gsel].number, + th1520_pad_muxdata(thp->desc.pins[gsel].drv_data), + muxtype); +} + +static int th1520_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + const struct pin_desc *desc = pin_desc_get(pctldev, offset); + + return th1520_pinmux_set(thp, offset, + th1520_pad_muxdata(desc->drv_data), + TH1520_MUX_GPIO); +} + +static int th1520_gpio_set_direction(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset, bool input) +{ + struct th1520_pinctrl *thp = pinctrl_dev_get_drvdata(pctldev); + + return th1520_padcfg_rmw(thp, offset, TH1520_PADCFG_IE, + input ? TH1520_PADCFG_IE : 0); +} + +static const struct pinmux_ops th1520_pinmux_ops = { + .get_functions_count = pinmux_generic_get_function_count, + .get_function_name = pinmux_generic_get_function_name, + .get_function_groups = pinmux_generic_get_function_groups, + .set_mux = th1520_pinmux_set_mux, + .gpio_request_enable = th1520_gpio_request_enable, + .gpio_set_direction = th1520_gpio_set_direction, + .strict = true, +}; + +static int th1520_pinctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct th1520_pad_group *group; + struct device_node *np = dev->of_node; + struct th1520_pinctrl *thp; + struct clk *clk; + u32 pin_group; + int ret; + + thp = devm_kzalloc(dev, sizeof(*thp), GFP_KERNEL); + if (!thp) + return -ENOMEM; + + thp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(thp->base)) + return PTR_ERR(thp->base); + + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "error getting clock\n"); + + ret = of_property_read_u32(np, "thead,pad-group", &pin_group); + if (ret) + return dev_err_probe(dev, ret, "failed to read the thead,pad-group property\n"); + + if (pin_group == 1) + group = &th1520_group1; + else if (pin_group == 2) + group = &th1520_group2; + else if (pin_group == 3) + group = &th1520_group3; + else + return dev_err_probe(dev, -EINVAL, "unit address did not match any pad group\n"); + + thp->desc.name = group->name; + thp->desc.pins = group->pins; + thp->desc.npins = group->npins; + thp->desc.pctlops = &th1520_pinctrl_ops; + thp->desc.pmxops = &th1520_pinmux_ops; + thp->desc.confops = &th1520_pinconf_ops; + thp->desc.owner = THIS_MODULE; + mutex_init(&thp->mutex); + raw_spin_lock_init(&thp->lock); + + ret = devm_pinctrl_register_and_init(dev, &thp->desc, thp, &thp->pctl); + if (ret) + return dev_err_probe(dev, ret, "could not register pinctrl driver\n"); + + return pinctrl_enable(thp->pctl); +} + +static const struct of_device_id th1520_pinctrl_of_match[] = { + { .compatible = "thead,th1520-pinctrl"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, th1520_pinctrl_of_match); + +static struct platform_driver th1520_pinctrl_driver = { + .probe = th1520_pinctrl_probe, + .driver = { + .name = "pinctrl-th1520", + .of_match_table = th1520_pinctrl_of_match, + }, +}; +module_platform_driver(th1520_pinctrl_driver); + +MODULE_DESCRIPTION("Pinctrl driver for the T-Head TH1520 SoC"); +MODULE_AUTHOR("Emil Renner Berthing <emil.renner.berthing@canonical.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index f4256a918165..48f8aabf3bfa 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -1524,7 +1524,7 @@ static int pinmux_xway_probe(struct platform_device *pdev) * files which don't set the "gpio-ranges" property or systems that * utilize ACPI the driver has to call gpiochip_add_pin_range(). */ - if (!of_property_read_bool(pdev->dev.of_node, "gpio-ranges")) { + if (!of_property_present(pdev->dev.of_node, "gpio-ranges")) { /* finish with registering the gpio range in pinctrl */ xway_gpio_range.npins = xway_chip.ngpio; xway_gpio_range.base = xway_chip.base; diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c index 3c6d56fdb8c9..fddf0fef4b13 100644 --- a/drivers/pinctrl/pinctrl-zynqmp.c +++ b/drivers/pinctrl/pinctrl-zynqmp.c @@ -10,6 +10,7 @@ #include <dt-bindings/pinctrl/pinctrl-zynqmp.h> +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/init.h> #include <linux/module.h> @@ -44,12 +45,17 @@ #define DRIVE_STRENGTH_8MA 8 #define DRIVE_STRENGTH_12MA 12 +#define VERSAL_LPD_PIN_PREFIX "LPD_MIO" +#define VERSAL_PMC_PIN_PREFIX "PMC_MIO" + +#define VERSAL_PINCTRL_ATTR_NODETYPE_MASK GENMASK(19, 14) +#define VERSAL_PINCTRL_NODETYPE_LPD_MIO BIT(0) + /** * struct zynqmp_pmux_function - a pinmux function * @name: Name of the pin mux function * @groups: List of pin groups for this function * @ngroups: Number of entries in @groups - * @node: Firmware node matching with the function * * This structure holds information about pin control function * and function group names supporting that function. @@ -93,6 +99,8 @@ struct zynqmp_pctrl_group { }; static struct pinctrl_desc zynqmp_desc; +static u32 family_code; +static u32 sub_family_code; static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev) { @@ -596,8 +604,12 @@ static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid, if (!groups[resp[i]].name) return -ENOMEM; - for (pin = 0; pin < groups[resp[i]].npins; pin++) - __set_bit(groups[resp[i]].pins[pin], used_pins); + for (pin = 0; pin < groups[resp[i]].npins; pin++) { + if (family_code == ZYNQMP_FAMILY_CODE) + __set_bit(groups[resp[i]].pins[pin], used_pins); + else + __set_bit((u8)groups[resp[i]].pins[pin] - 1, used_pins); + } } } done: @@ -873,6 +885,70 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev, return 0; } +static int versal_pinctrl_get_attributes(u32 pin_idx, u32 *response) +{ + struct zynqmp_pm_query_data qdata = {0}; + u32 payload[PAYLOAD_ARG_CNT]; + int ret; + + qdata.qid = PM_QID_PINCTRL_GET_ATTRIBUTES; + qdata.arg1 = pin_idx; + + ret = zynqmp_pm_query_data(qdata, payload); + if (ret) + return ret; + + memcpy(response, &payload[1], sizeof(*response)); + + return 0; +} + +static int versal_pinctrl_prepare_pin_desc(struct device *dev, + const struct pinctrl_pin_desc **zynqmp_pins, + unsigned int *npins) +{ + u32 lpd_mio_pins = 0, attr, nodetype; + struct pinctrl_pin_desc *pins, *pin; + int ret, i; + + ret = zynqmp_pm_is_function_supported(PM_QUERY_DATA, PM_QID_PINCTRL_GET_ATTRIBUTES); + if (ret) + return ret; + + ret = zynqmp_pinctrl_get_num_pins(npins); + if (ret) + return ret; + + pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL); + if (!pins) + return -ENOMEM; + + for (i = 0; i < *npins; i++) { + ret = versal_pinctrl_get_attributes(i, &attr); + if (ret) + return ret; + + pin = &pins[i]; + pin->number = attr; + nodetype = FIELD_GET(VERSAL_PINCTRL_ATTR_NODETYPE_MASK, attr); + if (nodetype == VERSAL_PINCTRL_NODETYPE_LPD_MIO) { + pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", + VERSAL_LPD_PIN_PREFIX, i); + lpd_mio_pins++; + } else { + pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", + VERSAL_PMC_PIN_PREFIX, i - lpd_mio_pins); + } + + if (!pin->name) + return -ENOMEM; + } + + *zynqmp_pins = pins; + + return 0; +} + static int zynqmp_pinctrl_probe(struct platform_device *pdev) { struct zynqmp_pinctrl *pctrl; @@ -882,9 +958,18 @@ static int zynqmp_pinctrl_probe(struct platform_device *pdev) if (!pctrl) return -ENOMEM; - ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev, - &zynqmp_desc.pins, - &zynqmp_desc.npins); + ret = zynqmp_pm_get_family_info(&family_code, &sub_family_code); + if (ret < 0) + return ret; + + if (family_code == ZYNQMP_FAMILY_CODE) { + ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev, &zynqmp_desc.pins, + &zynqmp_desc.npins); + } else { + ret = versal_pinctrl_prepare_pin_desc(&pdev->dev, &zynqmp_desc.pins, + &zynqmp_desc.npins); + } + if (ret) { dev_err(&pdev->dev, "pin desc prepare fail with %d\n", ret); return ret; @@ -907,6 +992,7 @@ static int zynqmp_pinctrl_probe(struct platform_device *pdev) static const struct of_device_id zynqmp_pinctrl_of_match[] = { { .compatible = "xlnx,zynqmp-pinctrl" }, + { .compatible = "xlnx,versal-pinctrl" }, { } }; MODULE_DEVICE_TABLE(of, zynqmp_pinctrl_of_match); diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 02033ea1c643..0743190da59e 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -14,6 +14,7 @@ #include <linux/array_size.h> #include <linux/ctype.h> +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/err.h> @@ -93,6 +94,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin) if (!desc || !ops) return true; + guard(mutex)(&desc->mux_lock); if (ops->strict && desc->mux_usecount) return false; @@ -127,29 +129,31 @@ static int pin_request(struct pinctrl_dev *pctldev, dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n", pin, desc->name, owner); - if ((!gpio_range || ops->strict) && - desc->mux_usecount && strcmp(desc->mux_owner, owner)) { - dev_err(pctldev->dev, - "pin %s already requested by %s; cannot claim for %s\n", - desc->name, desc->mux_owner, owner); - goto out; - } + scoped_guard(mutex, &desc->mux_lock) { + if ((!gpio_range || ops->strict) && + desc->mux_usecount && strcmp(desc->mux_owner, owner)) { + dev_err(pctldev->dev, + "pin %s already requested by %s; cannot claim for %s\n", + desc->name, desc->mux_owner, owner); + goto out; + } - if ((gpio_range || ops->strict) && desc->gpio_owner) { - dev_err(pctldev->dev, - "pin %s already requested by %s; cannot claim for %s\n", - desc->name, desc->gpio_owner, owner); - goto out; - } + if ((gpio_range || ops->strict) && desc->gpio_owner) { + dev_err(pctldev->dev, + "pin %s already requested by %s; cannot claim for %s\n", + desc->name, desc->gpio_owner, owner); + goto out; + } - if (gpio_range) { - desc->gpio_owner = owner; - } else { - desc->mux_usecount++; - if (desc->mux_usecount > 1) - return 0; + if (gpio_range) { + desc->gpio_owner = owner; + } else { + desc->mux_usecount++; + if (desc->mux_usecount > 1) + return 0; - desc->mux_owner = owner; + desc->mux_owner = owner; + } } /* Let each pin increase references to this module */ @@ -178,12 +182,14 @@ static int pin_request(struct pinctrl_dev *pctldev, out_free_pin: if (status) { - if (gpio_range) { - desc->gpio_owner = NULL; - } else { - desc->mux_usecount--; - if (!desc->mux_usecount) - desc->mux_owner = NULL; + scoped_guard(mutex, &desc->mux_lock) { + if (gpio_range) { + desc->gpio_owner = NULL; + } else { + desc->mux_usecount--; + if (!desc->mux_usecount) + desc->mux_owner = NULL; + } } } out: @@ -219,15 +225,17 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, return NULL; } - if (!gpio_range) { - /* - * A pin should not be freed more times than allocated. - */ - if (WARN_ON(!desc->mux_usecount)) - return NULL; - desc->mux_usecount--; - if (desc->mux_usecount) - return NULL; + scoped_guard(mutex, &desc->mux_lock) { + if (!gpio_range) { + /* + * A pin should not be freed more times than allocated. + */ + if (WARN_ON(!desc->mux_usecount)) + return NULL; + desc->mux_usecount--; + if (desc->mux_usecount) + return NULL; + } } /* @@ -239,13 +247,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, else if (ops->free) ops->free(pctldev, pin); - if (gpio_range) { - owner = desc->gpio_owner; - desc->gpio_owner = NULL; - } else { - owner = desc->mux_owner; - desc->mux_owner = NULL; - desc->mux_setting = NULL; + scoped_guard(mutex, &desc->mux_lock) { + if (gpio_range) { + owner = desc->gpio_owner; + desc->gpio_owner = NULL; + } else { + owner = desc->mux_owner; + desc->mux_owner = NULL; + desc->mux_setting = NULL; + } } module_put(pctldev->owner); @@ -458,7 +468,8 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting) pins[i]); continue; } - desc->mux_setting = &(setting->data.mux); + scoped_guard(mutex, &desc->mux_lock) + desc->mux_setting = &(setting->data.mux); } ret = ops->set_mux(pctldev, setting->data.mux.func, @@ -472,8 +483,10 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting) err_set_mux: for (i = 0; i < num_pins; i++) { desc = pin_desc_get(pctldev, pins[i]); - if (desc) - desc->mux_setting = NULL; + if (desc) { + scoped_guard(mutex, &desc->mux_lock) + desc->mux_setting = NULL; + } } err_pin_request: /* On error release all taken pins */ @@ -492,6 +505,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting) unsigned int num_pins = 0; int i; struct pin_desc *desc; + bool is_equal; if (pctlops->get_group_pins) ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, @@ -517,7 +531,10 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting) pins[i]); continue; } - if (desc->mux_setting == &(setting->data.mux)) { + scoped_guard(mutex, &desc->mux_lock) + is_equal = (desc->mux_setting == &(setting->data.mux)); + + if (is_equal) { pin_free(pctldev, pins[i], NULL); } else { const char *gname; @@ -608,40 +625,42 @@ static int pinmux_pins_show(struct seq_file *s, void *what) if (desc == NULL) continue; - if (desc->mux_owner && - !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev))) - is_hog = true; - - if (pmxops->strict) { - if (desc->mux_owner) - seq_printf(s, "pin %d (%s): device %s%s", - pin, desc->name, desc->mux_owner, + scoped_guard(mutex, &desc->mux_lock) { + if (desc->mux_owner && + !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev))) + is_hog = true; + + if (pmxops->strict) { + if (desc->mux_owner) + seq_printf(s, "pin %d (%s): device %s%s", + pin, desc->name, desc->mux_owner, + is_hog ? " (HOG)" : ""); + else if (desc->gpio_owner) + seq_printf(s, "pin %d (%s): GPIO %s", + pin, desc->name, desc->gpio_owner); + else + seq_printf(s, "pin %d (%s): UNCLAIMED", + pin, desc->name); + } else { + /* For non-strict controllers */ + seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name, + desc->mux_owner ? desc->mux_owner + : "(MUX UNCLAIMED)", + desc->gpio_owner ? desc->gpio_owner + : "(GPIO UNCLAIMED)", is_hog ? " (HOG)" : ""); - else if (desc->gpio_owner) - seq_printf(s, "pin %d (%s): GPIO %s", - pin, desc->name, desc->gpio_owner); + } + + /* If mux: print function+group claiming the pin */ + if (desc->mux_setting) + seq_printf(s, " function %s group %s\n", + pmxops->get_function_name(pctldev, + desc->mux_setting->func), + pctlops->get_group_name(pctldev, + desc->mux_setting->group)); else - seq_printf(s, "pin %d (%s): UNCLAIMED", - pin, desc->name); - } else { - /* For non-strict controllers */ - seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name, - desc->mux_owner ? desc->mux_owner - : "(MUX UNCLAIMED)", - desc->gpio_owner ? desc->gpio_owner - : "(GPIO UNCLAIMED)", - is_hog ? " (HOG)" : ""); + seq_putc(s, '\n'); } - - /* If mux: print function+group claiming the pin */ - if (desc->mux_setting) - seq_printf(s, " function %s group %s\n", - pmxops->get_function_name(pctldev, - desc->mux_setting->func), - pctlops->get_group_name(pctldev, - desc->mux_setting->group)); - else - seq_putc(s, '\n'); } mutex_unlock(&pctldev->mutex); diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm index 8fe459d082ed..206226318e45 100644 --- a/drivers/pinctrl/qcom/Kconfig.msm +++ b/drivers/pinctrl/qcom/Kconfig.msm @@ -46,6 +46,15 @@ config PINCTRL_IPQ5332 Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc IPQ5332 platform. +config PINCTRL_IPQ5424 + tristate "Qualcomm Technologies, Inc. IPQ5424 pin controller driver" + depends on ARM64 || COMPILE_TEST + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for + the Qualcomm Technologies Inc. TLMM block found on the + Qualcomm Technologies Inc. IPQ5424 platform. Select this for + IPQ5424. + config PINCTRL_IPQ8074 tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver" depends on ARM64 || COMPILE_TEST @@ -182,6 +191,20 @@ config PINCTRL_QCS404 This is the pinctrl, pinmux, pinconf and gpiolib driver for the TLMM block found in the Qualcomm QCS404 platform. +config PINCTRL_QCS615 + tristate "Qualcomm Technologies QCS615 pin controller driver" + depends on ARM64 || COMPILE_TEST + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + TLMM block found on the Qualcomm QCS615 platform. + +config PINCTRL_QCS8300 + tristate "Qualcomm Technologies QCS8300 pin controller driver" + depends on ARM64 || COMPILE_TEST + help + This is the pinctrl, pinmux and pinconf driver for the Qualcomm + TLMM block found on the Qualcomm QCS8300 platform. + config PINCTRL_QDF2XXX tristate "Qualcomm Technologies QDF2xxx pin controller driver" depends on ACPI @@ -204,6 +227,14 @@ config PINCTRL_SA8775P This is the pinctrl, pinmux and pinconf driver for the Qualcomm TLMM block found on the Qualcomm SA8775P platforms. +config PINCTRL_SAR2130P + tristate "Qualcomm Technologies Inc SAR2130P pin controller driver" + depends on ARM64 || COMPILE_TEST + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc TLMM block found on the Qualcomm + Technologies Inc SAR2130P platform. + config PINCTRL_SC7180 tristate "Qualcomm Technologies Inc SC7180 pin controller driver" depends on ARM64 || COMPILE_TEST @@ -382,6 +413,14 @@ config PINCTRL_SM8650 Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SM8650 platform. +config PINCTRL_SM8750 + tristate "Qualcomm Technologies Inc SM8750 pin controller driver" + depends on ARM64 || COMPILE_TEST + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc TLMM block found on the Qualcomm + Technologies Inc SM8750 platform. + config PINCTRL_X1E80100 tristate "Qualcomm Technologies Inc X1E80100 pin controller driver" depends on ARM64 || COMPILE_TEST diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index eb04297b6388..9a23d41d801c 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o obj-$(CONFIG_PINCTRL_IPQ5018) += pinctrl-ipq5018.o obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o obj-$(CONFIG_PINCTRL_IPQ5332) += pinctrl-ipq5332.o +obj-$(CONFIG_PINCTRL_IPQ5424) += pinctrl-ipq5424.o obj-$(CONFIG_PINCTRL_IPQ8074) += pinctrl-ipq8074.o obj-$(CONFIG_PINCTRL_IPQ6018) += pinctrl-ipq6018.o obj-$(CONFIG_PINCTRL_IPQ9574) += pinctrl-ipq9574.o @@ -23,6 +24,8 @@ obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o obj-$(CONFIG_PINCTRL_QCM2290) += pinctrl-qcm2290.o obj-$(CONFIG_PINCTRL_QCS404) += pinctrl-qcs404.o +obj-$(CONFIG_PINCTRL_QCS615) += pinctrl-qcs615.o +obj-$(CONFIG_PINCTRL_QCS8300) += pinctrl-qcs8300.o obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o obj-$(CONFIG_PINCTRL_MDM9607) += pinctrl-mdm9607.o obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o @@ -32,6 +35,7 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o obj-$(CONFIG_PINCTRL_QDU1000) += pinctrl-qdu1000.o obj-$(CONFIG_PINCTRL_SA8775P) += pinctrl-sa8775p.o +obj-$(CONFIG_PINCTRL_SAR2130P) += pinctrl-sar2130p.o obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o obj-$(CONFIG_PINCTRL_SC7280) += pinctrl-sc7280.o obj-$(CONFIG_PINCTRL_SC7280_LPASS_LPI) += pinctrl-sc7280-lpass-lpi.o @@ -62,6 +66,7 @@ obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o obj-$(CONFIG_PINCTRL_SM8550_LPASS_LPI) += pinctrl-sm8550-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8650) += pinctrl-sm8650.o obj-$(CONFIG_PINCTRL_SM8650_LPASS_LPI) += pinctrl-sm8650-lpass-lpi.o +obj-$(CONFIG_PINCTRL_SM8750) += pinctrl-sm8750.o obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o obj-$(CONFIG_PINCTRL_X1E80100) += pinctrl-x1e80100.o diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c index a18df4162299..20c3b9025044 100644 --- a/drivers/pinctrl/qcom/pinctrl-apq8064.c +++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c @@ -629,7 +629,7 @@ static struct platform_driver apq8064_pinctrl_driver = { .of_match_table = apq8064_pinctrl_of_match, }, .probe = apq8064_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init apq8064_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c index afada80e52a2..3fc0a40762b6 100644 --- a/drivers/pinctrl/qcom/pinctrl-apq8084.c +++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c @@ -1207,7 +1207,7 @@ static struct platform_driver apq8084_pinctrl_driver = { .of_match_table = apq8084_pinctrl_of_match, }, .probe = apq8084_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init apq8084_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c index cb13576ad6cf..1f7944dd829d 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c @@ -710,7 +710,7 @@ static struct platform_driver ipq4019_pinctrl_driver = { .of_match_table = ipq4019_pinctrl_of_match, }, .probe = ipq4019_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq4019_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c index 68f65b57003e..e2951f81c3ee 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c @@ -754,7 +754,7 @@ static struct platform_driver ipq5018_pinctrl_driver = { .of_match_table = ipq5018_pinctrl_of_match, }, .probe = ipq5018_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq5018_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c index 882175118970..625f8014051f 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c @@ -834,7 +834,7 @@ static struct platform_driver ipq5332_pinctrl_driver = { .of_match_table = ipq5332_pinctrl_of_match, }, .probe = ipq5332_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq5332_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c new file mode 100644 index 000000000000..796299cd2e4e --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "pinctrl-msm.h" + +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .grp = PINCTRL_PINGROUP("gpio" #id, \ + gpio##id##_pins, \ + ARRAY_SIZE(gpio##id##_pins)), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_SIZE * id, \ + .io_reg = 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = 0x8 + REG_SIZE * id, \ + .intr_status_reg = 0xc + REG_SIZE * id, \ + .intr_target_reg = 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +static const struct pinctrl_pin_desc ipq5424_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); + +enum ipq5424_functions { + msm_mux_atest_char, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_tic, + msm_mux_audio_pri, + msm_mux_audio_pri0, + msm_mux_audio_pri1, + msm_mux_audio_sec, + msm_mux_audio_sec0, + msm_mux_audio_sec1, + msm_mux_core_voltage, + msm_mux_cri_trng0, + msm_mux_cri_trng1, + msm_mux_cri_trng2, + msm_mux_cri_trng3, + msm_mux_cxc_clk, + msm_mux_cxc_data, + msm_mux_dbg_out, + msm_mux_gcc_plltest, + msm_mux_gcc_tlmm, + msm_mux_gpio, + msm_mux_i2c0_scl, + msm_mux_i2c0_sda, + msm_mux_i2c1_scl, + msm_mux_i2c1_sda, + msm_mux_i2c11, + msm_mux_mac0, + msm_mux_mac1, + msm_mux_mdc_mst, + msm_mux_mdc_slv, + msm_mux_mdio_mst, + msm_mux_mdio_slv, + msm_mux_pcie0_clk, + msm_mux_pcie0_wake, + msm_mux_pcie1_clk, + msm_mux_pcie1_wake, + msm_mux_pcie2_clk, + msm_mux_pcie2_wake, + msm_mux_pcie3_clk, + msm_mux_pcie3_wake, + msm_mux_pll_test, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_PTA0_0, + msm_mux_PTA0_1, + msm_mux_PTA0_2, + msm_mux_PTA10, + msm_mux_PTA11, + msm_mux_pwm0, + msm_mux_pwm1, + msm_mux_pwm2, + msm_mux_qdss_cti_trig_in_a0, + msm_mux_qdss_cti_trig_out_a0, + msm_mux_qdss_cti_trig_in_a1, + msm_mux_qdss_cti_trig_out_a1, + msm_mux_qdss_cti_trig_in_b0, + msm_mux_qdss_cti_trig_out_b0, + msm_mux_qdss_cti_trig_in_b1, + msm_mux_qdss_cti_trig_out_b1, + msm_mux_qdss_traceclk_a, + msm_mux_qdss_tracectl_a, + msm_mux_qdss_tracedata_a, + msm_mux_qspi_clk, + msm_mux_qspi_cs, + msm_mux_qspi_data, + msm_mux_resout, + msm_mux_rx0, + msm_mux_rx1, + msm_mux_rx2, + msm_mux_sdc_clk, + msm_mux_sdc_cmd, + msm_mux_sdc_data, + msm_mux_spi0, + msm_mux_spi1, + msm_mux_spi10, + msm_mux_spi11, + msm_mux_tsens_max, + msm_mux_uart0, + msm_mux_uart1, + msm_mux_wci_txd, + msm_mux_wci_rxd, + msm_mux_wsi_clk, + msm_mux_wsi_data, + msm_mux__, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", +}; + +static const char * const sdc_data_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; + +static const char * const qspi_data_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; + +static const char * const pwm2_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; + +static const char * const wci_txd_groups[] = { + "gpio0", "gpio1", "gpio8", "gpio10", "gpio11", "gpio40", "gpio41", +}; + +static const char * const wci_rxd_groups[] = { + "gpio0", "gpio1", "gpio8", "gpio10", "gpio11", "gpio40", "gpio41", +}; + +static const char * const sdc_cmd_groups[] = { + "gpio4", +}; + +static const char * const qspi_cs_groups[] = { + "gpio4", +}; + +static const char * const qdss_cti_trig_out_a1_groups[] = { + "gpio27", +}; + +static const char * const sdc_clk_groups[] = { + "gpio5", +}; + +static const char * const qspi_clk_groups[] = { + "gpio5", +}; + +static const char * const spi0_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9", +}; + +static const char * const pwm1_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9", +}; + +static const char * const cri_trng0_groups[] = { + "gpio6", +}; + +static const char * const qdss_tracedata_a_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", + "gpio13", "gpio14", "gpio15", "gpio20", "gpio21", "gpio36", "gpio37", + "gpio38", "gpio39", +}; + +static const char * const cri_trng1_groups[] = { + "gpio7", +}; + +static const char * const cri_trng2_groups[] = { + "gpio8", +}; + +static const char * const cri_trng3_groups[] = { + "gpio9", +}; + +static const char * const uart0_groups[] = { + "gpio10", "gpio11", "gpio12", "gpio13", +}; + +static const char * const pwm0_groups[] = { + "gpio10", "gpio11", "gpio12", "gpio13", +}; + +static const char * const prng_rosc0_groups[] = { + "gpio12", +}; + +static const char * const prng_rosc1_groups[] = { + "gpio13", +}; + +static const char * const i2c0_scl_groups[] = { + "gpio14", +}; + +static const char * const tsens_max_groups[] = { + "gpio14", +}; + +static const char * const prng_rosc2_groups[] = { + "gpio14", +}; + +static const char * const i2c0_sda_groups[] = { + "gpio15", +}; + +static const char * const prng_rosc3_groups[] = { + "gpio15", +}; + +static const char * const core_voltage_groups[] = { + "gpio16", "gpio17", +}; + +static const char * const i2c1_scl_groups[] = { + "gpio16", +}; + +static const char * const i2c1_sda_groups[] = { + "gpio17", +}; + +static const char * const mdc_slv_groups[] = { + "gpio20", +}; + +static const char * const atest_char0_groups[] = { + "gpio20", +}; + +static const char * const mdio_slv_groups[] = { + "gpio21", +}; + +static const char * const atest_char1_groups[] = { + "gpio21", +}; + +static const char * const mdc_mst_groups[] = { + "gpio22", +}; + +static const char * const atest_char2_groups[] = { + "gpio22", +}; + +static const char * const mdio_mst_groups[] = { + "gpio23", +}; + +static const char * const atest_char3_groups[] = { + "gpio23", +}; + +static const char * const pcie0_clk_groups[] = { + "gpio24", +}; + +static const char * const PTA10_groups[] = { + "gpio24", "gpio26", "gpio27", +}; + +static const char * const mac0_groups[] = { + "gpio24", "gpio26", +}; + +static const char * const atest_char_groups[] = { + "gpio24", +}; + +static const char * const pcie0_wake_groups[] = { + "gpio26", +}; + +static const char * const pcie1_clk_groups[] = { + "gpio27", +}; + +static const char * const i2c11_groups[] = { + "gpio27", "gpio29", +}; + +static const char * const pcie1_wake_groups[] = { + "gpio29", +}; + +static const char * const pcie2_clk_groups[] = { + "gpio30", +}; + +static const char * const mac1_groups[] = { + "gpio30", "gpio32", +}; + +static const char * const pcie2_wake_groups[] = { + "gpio32", +}; + +static const char * const PTA11_groups[] = { + "gpio30", "gpio32", "gpio33", +}; + +static const char * const audio_pri0_groups[] = { + "gpio32", "gpio32", +}; + +static const char * const pcie3_clk_groups[] = { + "gpio33", +}; + +static const char * const audio_pri1_groups[] = { + "gpio33", "gpio33", +}; + +static const char * const pcie3_wake_groups[] = { + "gpio35", +}; + +static const char * const audio_sec1_groups[] = { + "gpio35", "gpio35", +}; + +static const char * const audio_pri_groups[] = { + "gpio36", "gpio37", "gpio38", "gpio39", +}; + +static const char * const spi1_groups[] = { + "gpio11", "gpio36", "gpio37", "gpio38", "gpio46", +}; + +static const char * const audio_sec0_groups[] = { + "gpio36", "gpio36", +}; + +static const char * const rx1_groups[] = { + "gpio38", "gpio46", +}; + +static const char * const pll_test_groups[] = { + "gpio38", +}; + +static const char * const dbg_out_groups[] = { + "gpio46", +}; + +static const char * const PTA0_0_groups[] = { + "gpio40", +}; + +static const char * const atest_tic_groups[] = { + "gpio40", +}; + +static const char * const PTA0_1_groups[] = { + "gpio41", +}; + +static const char * const cxc_data_groups[] = { + "gpio41", +}; + +static const char * const PTA0_2_groups[] = { + "gpio42", +}; + +static const char * const cxc_clk_groups[] = { + "gpio42", +}; + +static const char * const uart1_groups[] = { + "gpio43", "gpio44", +}; + +static const char * const audio_sec_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48", +}; + +static const char * const gcc_plltest_groups[] = { + "gpio43", "gpio45", +}; + +static const char * const gcc_tlmm_groups[] = { + "gpio44", +}; + +static const char * const qdss_cti_trig_out_b1_groups[] = { + "gpio33", +}; + +static const char * const rx0_groups[] = { + "gpio39", "gpio47", +}; + +static const char * const qdss_traceclk_a_groups[] = { + "gpio45", +}; + +static const char * const qdss_tracectl_a_groups[] = { + "gpio46", +}; + +static const char * const qdss_cti_trig_out_a0_groups[] = { + "gpio24", +}; + +static const char * const qdss_cti_trig_in_a0_groups[] = { + "gpio26", +}; + +static const char * const resout_groups[] = { + "gpio49", +}; + +static const char * const qdss_cti_trig_in_a1_groups[] = { + "gpio29", +}; + +static const char * const qdss_cti_trig_out_b0_groups[] = { + "gpio30", +}; + +static const char * const qdss_cti_trig_in_b0_groups[] = { + "gpio32", +}; + +static const char * const qdss_cti_trig_in_b1_groups[] = { + "gpio35", +}; + +static const char * const spi10_groups[] = { + "gpio45", "gpio47", "gpio48", +}; + +static const char * const spi11_groups[] = { + "gpio10", "gpio12", "gpio13", +}; + +static const char * const wsi_clk_groups[] = { + "gpio24", "gpio27", +}; + +static const char * const wsi_data_groups[] = { + "gpio26", "gpio29", +}; + +static const char * const rx2_groups[] = { + "gpio37", "gpio45", +}; + +static const struct pinfunction ipq5424_functions[] = { + MSM_PIN_FUNCTION(atest_char), + MSM_PIN_FUNCTION(atest_char0), + MSM_PIN_FUNCTION(atest_char1), + MSM_PIN_FUNCTION(atest_char2), + MSM_PIN_FUNCTION(atest_char3), + MSM_PIN_FUNCTION(atest_tic), + MSM_PIN_FUNCTION(audio_pri), + MSM_PIN_FUNCTION(audio_pri0), + MSM_PIN_FUNCTION(audio_pri1), + MSM_PIN_FUNCTION(audio_sec), + MSM_PIN_FUNCTION(audio_sec0), + MSM_PIN_FUNCTION(audio_sec1), + MSM_PIN_FUNCTION(core_voltage), + MSM_PIN_FUNCTION(cri_trng0), + MSM_PIN_FUNCTION(cri_trng1), + MSM_PIN_FUNCTION(cri_trng2), + MSM_PIN_FUNCTION(cri_trng3), + MSM_PIN_FUNCTION(cxc_clk), + MSM_PIN_FUNCTION(cxc_data), + MSM_PIN_FUNCTION(dbg_out), + MSM_PIN_FUNCTION(gcc_plltest), + MSM_PIN_FUNCTION(gcc_tlmm), + MSM_PIN_FUNCTION(gpio), + MSM_PIN_FUNCTION(i2c0_scl), + MSM_PIN_FUNCTION(i2c0_sda), + MSM_PIN_FUNCTION(i2c1_scl), + MSM_PIN_FUNCTION(i2c1_sda), + MSM_PIN_FUNCTION(i2c11), + MSM_PIN_FUNCTION(mac0), + MSM_PIN_FUNCTION(mac1), + MSM_PIN_FUNCTION(mdc_mst), + MSM_PIN_FUNCTION(mdc_slv), + MSM_PIN_FUNCTION(mdio_mst), + MSM_PIN_FUNCTION(mdio_slv), + MSM_PIN_FUNCTION(pcie0_clk), + MSM_PIN_FUNCTION(pcie0_wake), + MSM_PIN_FUNCTION(pcie1_clk), + MSM_PIN_FUNCTION(pcie1_wake), + MSM_PIN_FUNCTION(pcie2_clk), + MSM_PIN_FUNCTION(pcie2_wake), + MSM_PIN_FUNCTION(pcie3_clk), + MSM_PIN_FUNCTION(pcie3_wake), + MSM_PIN_FUNCTION(pll_test), + MSM_PIN_FUNCTION(prng_rosc0), + MSM_PIN_FUNCTION(prng_rosc1), + MSM_PIN_FUNCTION(prng_rosc2), + MSM_PIN_FUNCTION(prng_rosc3), + MSM_PIN_FUNCTION(PTA0_0), + MSM_PIN_FUNCTION(PTA0_1), + MSM_PIN_FUNCTION(PTA0_2), + MSM_PIN_FUNCTION(PTA10), + MSM_PIN_FUNCTION(PTA11), + MSM_PIN_FUNCTION(pwm0), + MSM_PIN_FUNCTION(pwm1), + MSM_PIN_FUNCTION(pwm2), + MSM_PIN_FUNCTION(qdss_cti_trig_in_a0), + MSM_PIN_FUNCTION(qdss_cti_trig_out_a0), + MSM_PIN_FUNCTION(qdss_cti_trig_in_a1), + MSM_PIN_FUNCTION(qdss_cti_trig_out_a1), + MSM_PIN_FUNCTION(qdss_cti_trig_in_b0), + MSM_PIN_FUNCTION(qdss_cti_trig_out_b0), + MSM_PIN_FUNCTION(qdss_cti_trig_in_b1), + MSM_PIN_FUNCTION(qdss_cti_trig_out_b1), + MSM_PIN_FUNCTION(qdss_traceclk_a), + MSM_PIN_FUNCTION(qdss_tracectl_a), + MSM_PIN_FUNCTION(qdss_tracedata_a), + MSM_PIN_FUNCTION(qspi_clk), + MSM_PIN_FUNCTION(qspi_cs), + MSM_PIN_FUNCTION(qspi_data), + MSM_PIN_FUNCTION(resout), + MSM_PIN_FUNCTION(rx0), + MSM_PIN_FUNCTION(rx1), + MSM_PIN_FUNCTION(rx2), + MSM_PIN_FUNCTION(sdc_clk), + MSM_PIN_FUNCTION(sdc_cmd), + MSM_PIN_FUNCTION(sdc_data), + MSM_PIN_FUNCTION(spi0), + MSM_PIN_FUNCTION(spi1), + MSM_PIN_FUNCTION(spi10), + MSM_PIN_FUNCTION(spi11), + MSM_PIN_FUNCTION(tsens_max), + MSM_PIN_FUNCTION(uart0), + MSM_PIN_FUNCTION(uart1), + MSM_PIN_FUNCTION(wci_txd), + MSM_PIN_FUNCTION(wci_rxd), + MSM_PIN_FUNCTION(wsi_clk), + MSM_PIN_FUNCTION(wsi_data), +}; + +static const struct msm_pingroup ipq5424_groups[] = { + PINGROUP(0, sdc_data, qspi_data, pwm2, wci_txd, wci_rxd, _, _, _, _), + PINGROUP(1, sdc_data, qspi_data, pwm2, wci_txd, wci_rxd, _, _, _, _), + PINGROUP(2, sdc_data, qspi_data, pwm2, _, _, _, _, _, _), + PINGROUP(3, sdc_data, qspi_data, pwm2, _, _, _, _, _, _), + PINGROUP(4, sdc_cmd, qspi_cs, _, _, _, _, _, _, _), + PINGROUP(5, sdc_clk, qspi_clk, _, _, _, _, _, _, _), + PINGROUP(6, spi0, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _), + PINGROUP(7, spi0, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _), + PINGROUP(8, spi0, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _), + PINGROUP(9, spi0, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _), + PINGROUP(10, uart0, pwm0, spi11, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _), + PINGROUP(11, uart0, pwm0, spi1, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _), + PINGROUP(12, uart0, pwm0, spi11, _, prng_rosc0, qdss_tracedata_a, _, _, _), + PINGROUP(13, uart0, pwm0, spi11, _, prng_rosc1, qdss_tracedata_a, _, _, _), + PINGROUP(14, i2c0_scl, tsens_max, _, prng_rosc2, qdss_tracedata_a, _, _, _, _), + PINGROUP(15, i2c0_sda, _, prng_rosc3, qdss_tracedata_a, _, _, _, _, _), + PINGROUP(16, core_voltage, i2c1_scl, _, _, _, _, _, _, _), + PINGROUP(17, core_voltage, i2c1_sda, _, _, _, _, _, _, _), + PINGROUP(18, _, _, _, _, _, _, _, _, _), + PINGROUP(19, _, _, _, _, _, _, _, _, _), + PINGROUP(20, mdc_slv, atest_char0, _, qdss_tracedata_a, _, _, _, _, _), + PINGROUP(21, mdio_slv, atest_char1, _, qdss_tracedata_a, _, _, _, _, _), + PINGROUP(22, mdc_mst, atest_char2, _, _, _, _, _, _, _), + PINGROUP(23, mdio_mst, atest_char3, _, _, _, _, _, _, _), + PINGROUP(24, pcie0_clk, PTA10, mac0, _, wsi_clk, _, atest_char, qdss_cti_trig_out_a0, _), + PINGROUP(25, _, _, _, _, _, _, _, _, _), + PINGROUP(26, pcie0_wake, PTA10, mac0, _, wsi_data, _, qdss_cti_trig_in_a0, _, _), + PINGROUP(27, pcie1_clk, i2c11, PTA10, wsi_clk, qdss_cti_trig_out_a1, _, _, _, _), + PINGROUP(28, _, _, _, _, _, _, _, _, _), + PINGROUP(29, pcie1_wake, i2c11, wsi_data, qdss_cti_trig_in_a1, _, _, _, _, _), + PINGROUP(30, pcie2_clk, PTA11, mac1, qdss_cti_trig_out_b0, _, _, _, _, _), + PINGROUP(31, _, _, _, _, _, _, _, _, _), + PINGROUP(32, pcie2_wake, PTA11, mac1, audio_pri0, audio_pri0, qdss_cti_trig_in_b0, _, _, _), + PINGROUP(33, pcie3_clk, PTA11, audio_pri1, audio_pri1, qdss_cti_trig_out_b1, _, _, _, _), + PINGROUP(34, _, _, _, _, _, _, _, _, _), + PINGROUP(35, pcie3_wake, audio_sec1, audio_sec1, qdss_cti_trig_in_b1, _, _, _, _, _), + PINGROUP(36, audio_pri, spi1, audio_sec0, audio_sec0, qdss_tracedata_a, _, _, _, _), + PINGROUP(37, audio_pri, spi1, rx2, qdss_tracedata_a, _, _, _, _, _), + PINGROUP(38, audio_pri, spi1, pll_test, rx1, qdss_tracedata_a, _, _, _, _), + PINGROUP(39, audio_pri, rx0, _, qdss_tracedata_a, _, _, _, _, _), + PINGROUP(40, PTA0_0, wci_txd, wci_rxd, _, atest_tic, _, _, _, _), + PINGROUP(41, PTA0_1, wci_txd, wci_rxd, cxc_data, _, _, _, _, _), + PINGROUP(42, PTA0_2, cxc_clk, _, _, _, _, _, _, _), + PINGROUP(43, uart1, gcc_plltest, _, _, _, _, _, _, _), + PINGROUP(44, uart1, gcc_tlmm, _, _, _, _, _, _, _), + PINGROUP(45, spi10, rx2, audio_sec, gcc_plltest, _, qdss_traceclk_a, _, _, _), + PINGROUP(46, spi1, rx1, audio_sec, dbg_out, qdss_tracectl_a, _, _, _, _), + PINGROUP(47, spi10, rx0, audio_sec, _, _, _, _, _, _), + PINGROUP(48, spi10, audio_sec, _, _, _, _, _, _, _), + PINGROUP(49, resout, _, _, _, _, _, _, _, _), +}; + +static const struct msm_pinctrl_soc_data ipq5424_pinctrl = { + .pins = ipq5424_pins, + .npins = ARRAY_SIZE(ipq5424_pins), + .functions = ipq5424_functions, + .nfunctions = ARRAY_SIZE(ipq5424_functions), + .groups = ipq5424_groups, + .ngroups = ARRAY_SIZE(ipq5424_groups), + .ngpios = 50, +}; + +static int ipq5424_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &ipq5424_pinctrl); +} + +static const struct of_device_id ipq5424_pinctrl_of_match[] = { + { .compatible = "qcom,ipq5424-tlmm", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ipq5424_pinctrl_of_match); + +static struct platform_driver ipq5424_pinctrl_driver = { + .driver = { + .name = "ipq5424-tlmm", + .of_match_table = ipq5424_pinctrl_of_match, + }, + .probe = ipq5424_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init ipq5424_pinctrl_init(void) +{ + return platform_driver_register(&ipq5424_pinctrl_driver); +} +arch_initcall(ipq5424_pinctrl_init); + +static void __exit ipq5424_pinctrl_exit(void) +{ + platform_driver_unregister(&ipq5424_pinctrl_driver); +} +module_exit(ipq5424_pinctrl_exit); + +MODULE_DESCRIPTION("QTI IPQ5424 TLMM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c index ac330d8712b5..0ad08647dbcd 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c @@ -1080,7 +1080,7 @@ static struct platform_driver ipq6018_pinctrl_driver = { .of_match_table = ipq6018_pinctrl_of_match, }, .probe = ipq6018_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq6018_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c index e10e1bc4c911..e2bb94e86aef 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c @@ -631,7 +631,7 @@ static struct platform_driver ipq8064_pinctrl_driver = { .of_match_table = ipq8064_pinctrl_of_match, }, .probe = ipq8064_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq8064_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c index fee32c1d1d3e..337f3a1c92c1 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c @@ -1041,7 +1041,7 @@ static struct platform_driver ipq8074_pinctrl_driver = { .of_match_table = ipq8074_pinctrl_of_match, }, .probe = ipq8074_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq8074_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c index 20ab59cb621b..e2491617b236 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c @@ -799,7 +799,7 @@ static struct platform_driver ipq9574_pinctrl_driver = { .of_match_table = ipq9574_pinctrl_of_match, }, .probe = ipq9574_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init ipq9574_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c index 415d24e16267..e7cd3ef1cf3e 100644 --- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c +++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c @@ -1059,7 +1059,7 @@ static struct platform_driver mdm9607_pinctrl_driver = { .of_match_table = mdm9607_pinctrl_of_match, }, .probe = mdm9607_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init mdm9607_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c index 3f2eafea0b24..0a2ae383d3d5 100644 --- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c +++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c @@ -446,7 +446,7 @@ static struct platform_driver mdm9615_pinctrl_driver = { .of_match_table = mdm9615_pinctrl_of_match, }, .probe = mdm9615_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init mdm9615_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index aeaf0d1958f5..ec913c2e200f 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1457,7 +1457,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) * files which don't set the "gpio-ranges" property or systems that * utilize ACPI the driver has to call gpiochip_add_pin_range(). */ - if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) { + if (!of_property_present(pctrl->dev->of_node, "gpio-ranges")) { ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio); if (ret) { diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c index 40806c0650ef..64fee70f1772 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8226.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c @@ -654,7 +654,7 @@ static struct platform_driver msm8226_pinctrl_driver = { .of_match_table = msm8226_pinctrl_of_match, }, .probe = msm8226_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8226_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c index dba6d531b4a1..999a5f867eb5 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8660.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c @@ -981,7 +981,7 @@ static struct platform_driver msm8660_pinctrl_driver = { .of_match_table = msm8660_pinctrl_of_match, }, .probe = msm8660_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8660_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c index 14b17ba9f906..756856d20d6b 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8909.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c @@ -929,7 +929,7 @@ static struct platform_driver msm8909_pinctrl_driver = { .of_match_table = msm8909_pinctrl_of_match, }, .probe = msm8909_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8909_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c index 184dcf842273..cea5c54f92fe 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8916.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c @@ -969,7 +969,7 @@ static struct platform_driver msm8916_pinctrl_driver = { .of_match_table = msm8916_pinctrl_of_match, }, .probe = msm8916_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8916_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c index c2253821ae8d..998351bdfee1 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8953.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c @@ -1816,7 +1816,7 @@ static struct platform_driver msm8953_pinctrl_driver = { .of_match_table = msm8953_pinctrl_of_match, }, .probe = msm8953_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8953_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c index 6b9148d226e9..ebe230b3b437 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8960.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c @@ -1246,7 +1246,7 @@ static struct platform_driver msm8960_pinctrl_driver = { .of_match_table = msm8960_pinctrl_of_match, }, .probe = msm8960_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8960_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c index 9a951888e8a1..c30d80e4e98c 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8976.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c @@ -1096,7 +1096,7 @@ static struct platform_driver msm8976_pinctrl_driver = { .of_match_table = msm8976_pinctrl_of_match, }, .probe = msm8976_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8976_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c index 1ed1dd32d6c7..b1a6759ab4a5 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8994.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c @@ -1343,7 +1343,7 @@ static struct platform_driver msm8994_pinctrl_driver = { .of_match_table = msm8994_pinctrl_of_match, }, .probe = msm8994_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8994_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c index 777c2a74036e..1b5d80eaab83 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8996.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c @@ -1920,7 +1920,7 @@ static struct platform_driver msm8996_pinctrl_driver = { .of_match_table = msm8996_pinctrl_of_match, }, .probe = msm8996_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8996_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c index 4aaf45e54f3a..b7cbf32b3125 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8998.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c @@ -1535,7 +1535,7 @@ static struct platform_driver msm8998_pinctrl_driver = { .of_match_table = msm8998_pinctrl_of_match, }, .probe = msm8998_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8998_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c index 750a8272ded7..238c83f6ec4f 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c @@ -1083,7 +1083,7 @@ static struct platform_driver msm8x74_pinctrl_driver = { .of_match_table = msm8x74_pinctrl_of_match, }, .probe = msm8x74_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init msm8x74_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c index f5c1c427b44e..ba699eac9ee8 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c +++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c @@ -1113,7 +1113,7 @@ static struct platform_driver qcm2290_pinctrl_driver = { .of_match_table = qcm2290_pinctrl_of_match, }, .probe = qcm2290_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init qcm2290_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c index 9a875b7dc998..ae7224012f8a 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c @@ -1644,7 +1644,7 @@ static struct platform_driver qcs404_pinctrl_driver = { .of_match_table = qcs404_pinctrl_of_match, }, .probe = qcs404_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init qcs404_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c new file mode 100644 index 000000000000..23015b055f6a --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c @@ -0,0 +1,1107 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "pinctrl-msm.h" + +enum { + SOUTH, + EAST, + WEST +}; + +static const char * const qcs615_tiles[] = { + [SOUTH] = "south", + [EAST] = "east", + [WEST] = "west" +}; + +#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .grp = PINCTRL_PINGROUP("gpio" #id, \ + gpio##id##_pins, \ + ARRAY_SIZE(gpio##id##_pins)), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = 0x1000 * id, \ + .io_reg = 0x1000 * id + 0x4, \ + .intr_cfg_reg = 0x1000 * id + 0x8, \ + .intr_status_reg = 0x1000 * id + 0xc, \ + .intr_target_reg = 0x1000 * id + 0x8, \ + .tile = _tile, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .tile = _tile, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .tile = WEST, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +static const struct pinctrl_pin_desc qcs615_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "UFS_RESET"), + PINCTRL_PIN(124, "SDC1_RCLK"), + PINCTRL_PIN(125, "SDC1_CLK"), + PINCTRL_PIN(126, "SDC1_CMD"), + PINCTRL_PIN(127, "SDC1_DATA"), + PINCTRL_PIN(128, "SDC2_CLK"), + PINCTRL_PIN(129, "SDC2_CMD"), + PINCTRL_PIN(130, "SDC2_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); + +static const unsigned int ufs_reset_pins[] = { 123 }; +static const unsigned int sdc1_rclk_pins[] = { 124 }; +static const unsigned int sdc1_clk_pins[] = { 125 }; +static const unsigned int sdc1_cmd_pins[] = { 126 }; +static const unsigned int sdc1_data_pins[] = { 127 }; +static const unsigned int sdc2_clk_pins[] = { 128 }; +static const unsigned int sdc2_cmd_pins[] = { 129 }; +static const unsigned int sdc2_data_pins[] = { 130 }; + +enum qcs615_functions { + msm_mux_gpio, + msm_mux_adsp_ext, + msm_mux_agera_pll, + msm_mux_aoss_cti, + msm_mux_atest_char, + msm_mux_atest_tsens, + msm_mux_atest_usb, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_i2c, + msm_mux_cci_timer, + msm_mux_copy_gp, + msm_mux_copy_phase, + msm_mux_cri_trng, + msm_mux_dbg_out_clk, + msm_mux_ddr_bist, + msm_mux_ddr_pxi, + msm_mux_dp_hot, + msm_mux_edp_hot, + msm_mux_edp_lcd, + msm_mux_emac_gcc, + msm_mux_emac_phy_intr, + msm_mux_forced_usb, + msm_mux_gcc_gp, + msm_mux_gp_pdm, + msm_mux_gps_tx, + msm_mux_hs0_mi2s, + msm_mux_hs1_mi2s, + msm_mux_jitter_bist, + msm_mux_ldo_en, + msm_mux_ldo_update, + msm_mux_m_voc, + msm_mux_mclk1, + msm_mux_mclk2, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync0_out, + msm_mux_mdp_vsync1_out, + msm_mux_mdp_vsync2_out, + msm_mux_mdp_vsync3_out, + msm_mux_mdp_vsync4_out, + msm_mux_mdp_vsync5_out, + msm_mux_mi2s_1, + msm_mux_mss_lte, + msm_mux_nav_pps_in, + msm_mux_nav_pps_out, + msm_mux_pa_indicator_or, + msm_mux_pcie_clk_req, + msm_mux_pcie_ep_rst, + msm_mux_phase_flag, + msm_mux_pll_bist, + msm_mux_pll_bypassnl, + msm_mux_pll_reset_n, + msm_mux_prng_rosc, + msm_mux_qdss_cti, + msm_mux_qdss_gpio, + msm_mux_qlink_enable, + msm_mux_qlink_request, + msm_mux_qspi, + msm_mux_qup0, + msm_mux_qup1, + msm_mux_rgmii, + msm_mux_sd_write_protect, + msm_mux_sp_cmu, + msm_mux_ter_mi2s, + msm_mux_tgu_ch, + msm_mux_uim1, + msm_mux_uim2, + msm_mux_usb0_hs, + msm_mux_usb1_hs, + msm_mux_usb_phy_ps, + msm_mux_vfr_1, + msm_mux_vsense_trigger_mirnat, + msm_mux_wlan, + msm_mux_wsa_clk, + msm_mux_wsa_data, + msm_mux__, +}; + +static const char *const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", + "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", + "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", + "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", + "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", + "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", + "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", + "gpio54", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", + "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", + "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", + "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", + "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", + "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", + "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107", + "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113", + "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119", + "gpio120", "gpio121", "gpio122", +}; + +static const char *const adsp_ext_groups[] = { + "gpio118", +}; + +static const char *const agera_pll_groups[] = { + "gpio28", +}; + +static const char *const aoss_cti_groups[] = { + "gpio76", +}; + +static const char *const atest_char_groups[] = { + "gpio84", "gpio85", "gpio86", "gpio87", + "gpio115", "gpio117", "gpio118", "gpio119", + "gpio120", "gpio121", +}; + +static const char *const atest_tsens_groups[] = { + "gpio7", "gpio29", +}; + +static const char *const atest_usb_groups[] = { + "gpio7", "gpio10", "gpio11", "gpio54", + "gpio55", "gpio67", "gpio68", "gpio76", + "gpio75", "gpio77", +}; + +static const char *const cam_mclk_groups[] = { + "gpio28", "gpio29", "gpio30", "gpio31", +}; + +static const char *const cci_async_groups[] = { + "gpio26", "gpio41", "gpio42", +}; + +static const char *const cci_i2c_groups[] = { + "gpio32", "gpio33", "gpio34", "gpio35", +}; + +static const char *const cci_timer_groups[] = { + "gpio37", "gpio38", "gpio39", "gpio41", + "gpio42", +}; + +static const char *const copy_gp_groups[] = { + "gpio86", +}; + +static const char *const copy_phase_groups[] = { + "gpio103", +}; + +static const char *const cri_trng_groups[] = { + "gpio60", "gpio61", "gpio62", +}; + +static const char *const dbg_out_clk_groups[] = { + "gpio11", +}; + +static const char *const ddr_bist_groups[] = { + "gpio7", "gpio8", "gpio9", "gpio10", +}; + +static const char *const ddr_pxi_groups[] = { + "gpio6", "gpio7", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio54", "gpio55", +}; + +static const char *const dp_hot_groups[] = { + "gpio102", "gpio103", "gpio104", +}; + +static const char *const edp_hot_groups[] = { + "gpio113", +}; + +static const char *const edp_lcd_groups[] = { + "gpio119", +}; + +static const char *const emac_gcc_groups[] = { + "gpio101", "gpio102", +}; + +static const char *const emac_phy_intr_groups[] = { + "gpio89", +}; + +static const char *const forced_usb_groups[] = { + "gpio43", +}; + +static const char *const gcc_gp_groups[] = { + "gpio21", "gpio22", "gpio57", "gpio58", + "gpio59", "gpio78", +}; + +static const char *const gp_pdm_groups[] = { + "gpio8", "gpio54", "gpio63", "gpio66", + "gpio79", "gpio95", +}; + +static const char *const gps_tx_groups[] = { + "gpio53", "gpio54", "gpio56", "gpio57", + "gpio59", "gpio60", +}; + +static const char *const hs0_mi2s_groups[] = { + "gpio36", "gpio37", "gpio38", "gpio39", +}; + +static const char *const hs1_mi2s_groups[] = { + "gpio24", "gpio25", "gpio26", "gpio27", +}; + +static const char *const jitter_bist_groups[] = { + "gpio12", "gpio26", +}; + +static const char *const ldo_en_groups[] = { + "gpio97", +}; + +static const char *const ldo_update_groups[] = { + "gpio98", +}; + +static const char *const m_voc_groups[] = { + "gpio120", +}; + +static const char *const mclk1_groups[] = { + "gpio121", +}; + +static const char *const mclk2_groups[] = { + "gpio122", +}; + +static const char *const mdp_vsync_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio90", + "gpio97", "gpio98", +}; + +static const char *const mdp_vsync0_out_groups[] = { + "gpio90", +}; + +static const char *const mdp_vsync1_out_groups[] = { + "gpio90", +}; + +static const char *const mdp_vsync2_out_groups[] = { + "gpio90", +}; + +static const char *const mdp_vsync3_out_groups[] = { + "gpio90", +}; + +static const char *const mdp_vsync4_out_groups[] = { + "gpio90", +}; + +static const char *const mdp_vsync5_out_groups[] = { + "gpio90", +}; + +static const char *const mi2s_1_groups[] = { + "gpio108", "gpio109", "gpio110", "gpio111", +}; + +static const char *const mss_lte_groups[] = { + "gpio106", "gpio107", +}; + +static const char *const nav_pps_in_groups[] = { + "gpio53", "gpio56", "gpio57", "gpio59", + "gpio60", +}; + +static const char *const nav_pps_out_groups[] = { + "gpio53", "gpio56", "gpio57", "gpio59", + "gpio60", +}; + +static const char *const pa_indicator_or_groups[] = { + "gpio53", +}; + +static const char *const pcie_clk_req_groups[] = { + "gpio90", +}; + +static const char *const pcie_ep_rst_groups[] = { + "gpio89", +}; + +static const char *const phase_flag_groups[] = { + "gpio10", "gpio18", "gpio19", "gpio20", + "gpio23", "gpio24", "gpio25", "gpio38", + "gpio40", "gpio41", "gpio42", "gpio43", + "gpio44", "gpio45", "gpio53", "gpio54", + "gpio55", "gpio67", "gpio68", "gpio75", + "gpio76", "gpio77", "gpio78", "gpio79", + "gpio80", "gpio82", "gpio84", "gpio92", + "gpio116", "gpio117", "gpio118", "gpio119", +}; + +static const char *const pll_bist_groups[] = { + "gpio27", +}; + +static const char *const pll_bypassnl_groups[] = { + "gpio13", +}; + +static const char *const pll_reset_n_groups[] = { + "gpio14", +}; + +static const char *const prng_rosc_groups[] = { + "gpio99", "gpio102", +}; + +static const char *const qdss_cti_groups[] = { + "gpio83", "gpio96", "gpio97", "gpio98", + "gpio103", "gpio104", "gpio112", "gpio113", +}; + +static const char *const qdss_gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", + "gpio6", "gpio7", "gpio8", "gpio9", + "gpio14", "gpio15", "gpio20", "gpio21", + "gpio28", "gpio29", "gpio30", "gpio31", + "gpio32", "gpio33", "gpio34", "gpio35", + "gpio44", "gpio45", "gpio46", "gpio47", + "gpio81", "gpio82", "gpio92", "gpio93", + "gpio94", "gpio95", "gpio108", "gpio109", + "gpio117", "gpio118", "gpio119", "gpio120", +}; + +static const char *const qlink_enable_groups[] = { + "gpio52", +}; + +static const char *const qlink_request_groups[] = { + "gpio51", +}; + +static const char *const qspi_groups[] = { + "gpio44", "gpio45", "gpio46", "gpio47", + "gpio48", "gpio49", "gpio50", +}; + +static const char *const qup0_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", + "gpio4", "gpio5", "gpio16", "gpio17", + "gpio18", "gpio19", +}; + +static const char *const qup1_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9", + "gpio10", "gpio11", "gpio12", "gpio13", + "gpio14", "gpio15", "gpio20", "gpio21", + "gpio22", "gpio23", +}; + +static const char *const rgmii_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", + "gpio96", "gpio97", "gpio102", "gpio103", + "gpio112", "gpio113", "gpio114", +}; + +static const char *const sd_write_protect_groups[] = { + "gpio24", +}; + +static const char *const sp_cmu_groups[] = { + "gpio64", +}; + +static const char *const ter_mi2s_groups[] = { + "gpio115", "gpio116", "gpio117", "gpio118", +}; + +static const char *const tgu_ch_groups[] = { + "gpio89", "gpio90", "gpio91", "gpio92", +}; + +static const char *const uim1_groups[] = { + "gpio77", "gpio78", "gpio79", "gpio80", +}; + +static const char *const uim2_groups[] = { + "gpio73", "gpio74", "gpio75", "gpio76", +}; + +static const char *const usb0_hs_groups[] = { + "gpio88", +}; + +static const char *const usb1_hs_groups[] = { + "gpio89", +}; + +static const char *const usb_phy_ps_groups[] = { + "gpio104", +}; + +static const char *const vfr_1_groups[] = { + "gpio92", +}; + +static const char *const vsense_trigger_mirnat_groups[] = { + "gpio7", +}; + +static const char *const wlan_groups[] = { + "gpio16", "gpio17", "gpio47", "gpio48", +}; + +static const char *const wsa_clk_groups[] = { + "gpio111", +}; + +static const char *const wsa_data_groups[] = { + "gpio110", +}; + +static const struct pinfunction qcs615_functions[] = { + MSM_PIN_FUNCTION(gpio), + MSM_PIN_FUNCTION(adsp_ext), + MSM_PIN_FUNCTION(agera_pll), + MSM_PIN_FUNCTION(aoss_cti), + MSM_PIN_FUNCTION(atest_char), + MSM_PIN_FUNCTION(atest_tsens), + MSM_PIN_FUNCTION(atest_usb), + MSM_PIN_FUNCTION(cam_mclk), + MSM_PIN_FUNCTION(cci_async), + MSM_PIN_FUNCTION(cci_i2c), + MSM_PIN_FUNCTION(cci_timer), + MSM_PIN_FUNCTION(copy_gp), + MSM_PIN_FUNCTION(copy_phase), + MSM_PIN_FUNCTION(cri_trng), + MSM_PIN_FUNCTION(dbg_out_clk), + MSM_PIN_FUNCTION(ddr_bist), + MSM_PIN_FUNCTION(ddr_pxi), + MSM_PIN_FUNCTION(dp_hot), + MSM_PIN_FUNCTION(edp_hot), + MSM_PIN_FUNCTION(edp_lcd), + MSM_PIN_FUNCTION(emac_gcc), + MSM_PIN_FUNCTION(emac_phy_intr), + MSM_PIN_FUNCTION(forced_usb), + MSM_PIN_FUNCTION(gcc_gp), + MSM_PIN_FUNCTION(gp_pdm), + MSM_PIN_FUNCTION(gps_tx), + MSM_PIN_FUNCTION(hs0_mi2s), + MSM_PIN_FUNCTION(hs1_mi2s), + MSM_PIN_FUNCTION(jitter_bist), + MSM_PIN_FUNCTION(ldo_en), + MSM_PIN_FUNCTION(ldo_update), + MSM_PIN_FUNCTION(m_voc), + MSM_PIN_FUNCTION(mclk1), + MSM_PIN_FUNCTION(mclk2), + MSM_PIN_FUNCTION(mdp_vsync), + MSM_PIN_FUNCTION(mdp_vsync0_out), + MSM_PIN_FUNCTION(mdp_vsync1_out), + MSM_PIN_FUNCTION(mdp_vsync2_out), + MSM_PIN_FUNCTION(mdp_vsync3_out), + MSM_PIN_FUNCTION(mdp_vsync4_out), + MSM_PIN_FUNCTION(mdp_vsync5_out), + MSM_PIN_FUNCTION(mi2s_1), + MSM_PIN_FUNCTION(mss_lte), + MSM_PIN_FUNCTION(nav_pps_in), + MSM_PIN_FUNCTION(nav_pps_out), + MSM_PIN_FUNCTION(pa_indicator_or), + MSM_PIN_FUNCTION(pcie_clk_req), + MSM_PIN_FUNCTION(pcie_ep_rst), + MSM_PIN_FUNCTION(phase_flag), + MSM_PIN_FUNCTION(pll_bist), + MSM_PIN_FUNCTION(pll_bypassnl), + MSM_PIN_FUNCTION(pll_reset_n), + MSM_PIN_FUNCTION(prng_rosc), + MSM_PIN_FUNCTION(qdss_cti), + MSM_PIN_FUNCTION(qdss_gpio), + MSM_PIN_FUNCTION(qlink_enable), + MSM_PIN_FUNCTION(qlink_request), + MSM_PIN_FUNCTION(qspi), + MSM_PIN_FUNCTION(qup0), + MSM_PIN_FUNCTION(qup1), + MSM_PIN_FUNCTION(rgmii), + MSM_PIN_FUNCTION(sd_write_protect), + MSM_PIN_FUNCTION(sp_cmu), + MSM_PIN_FUNCTION(ter_mi2s), + MSM_PIN_FUNCTION(tgu_ch), + MSM_PIN_FUNCTION(uim1), + MSM_PIN_FUNCTION(uim2), + MSM_PIN_FUNCTION(usb0_hs), + MSM_PIN_FUNCTION(usb1_hs), + MSM_PIN_FUNCTION(usb_phy_ps), + MSM_PIN_FUNCTION(vfr_1), + MSM_PIN_FUNCTION(vsense_trigger_mirnat), + MSM_PIN_FUNCTION(wlan), + MSM_PIN_FUNCTION(wsa_clk), + MSM_PIN_FUNCTION(wsa_data), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup qcs615_groups[] = { + [0] = PINGROUP(0, WEST, qup0, _, qdss_gpio, _, _, _, _, _, _), + [1] = PINGROUP(1, WEST, qup0, _, qdss_gpio, _, _, _, _, _, _), + [2] = PINGROUP(2, WEST, qup0, _, qdss_gpio, _, _, _, _, _, _), + [3] = PINGROUP(3, WEST, qup0, _, qdss_gpio, _, _, _, _, _, _), + [4] = PINGROUP(4, WEST, qup0, _, _, _, _, _, _, _, _), + [5] = PINGROUP(5, WEST, qup0, _, _, _, _, _, _, _, _), + [6] = PINGROUP(6, EAST, qup1, qdss_gpio, ddr_pxi, _, _, _, _, _, _), + [7] = PINGROUP(7, EAST, qup1, ddr_bist, qdss_gpio, atest_tsens, + vsense_trigger_mirnat, atest_usb, ddr_pxi, _, _), + [8] = PINGROUP(8, EAST, qup1, gp_pdm, ddr_bist, qdss_gpio, _, _, _, _, _), + [9] = PINGROUP(9, EAST, qup1, ddr_bist, qdss_gpio, _, _, _, _, _, _), + [10] = PINGROUP(10, EAST, qup1, ddr_bist, _, phase_flag, atest_usb, ddr_pxi, _, _, _), + [11] = PINGROUP(11, EAST, qup1, dbg_out_clk, atest_usb, ddr_pxi, _, _, _, _, _), + [12] = PINGROUP(12, EAST, qup1, jitter_bist, ddr_pxi, _, _, _, _, _, _), + [13] = PINGROUP(13, EAST, qup1, pll_bypassnl, _, ddr_pxi, _, _, _, _, _), + [14] = PINGROUP(14, EAST, qup1, pll_reset_n, _, qdss_gpio, _, _, _, _, _), + [15] = PINGROUP(15, EAST, qup1, qdss_gpio, _, _, _, _, _, _, _), + [16] = PINGROUP(16, WEST, qup0, _, wlan, _, _, _, _, _, _), + [17] = PINGROUP(17, WEST, qup0, _, wlan, _, _, _, _, _, _), + [18] = PINGROUP(18, WEST, qup0, _, phase_flag, _, _, _, _, _, _), + [19] = PINGROUP(19, WEST, qup0, _, phase_flag, _, _, _, _, _, _), + [20] = PINGROUP(20, SOUTH, qup1, _, phase_flag, qdss_gpio, _, _, _, _, _), + [21] = PINGROUP(21, SOUTH, qup1, gcc_gp, _, qdss_gpio, _, _, _, _, _), + [22] = PINGROUP(22, SOUTH, qup1, gcc_gp, _, _, _, _, _, _, _), + [23] = PINGROUP(23, SOUTH, qup1, _, phase_flag, _, _, _, _, _, _), + [24] = PINGROUP(24, EAST, hs1_mi2s, sd_write_protect, _, phase_flag, _, _, _, _, _), + [25] = PINGROUP(25, EAST, hs1_mi2s, _, phase_flag, _, _, _, _, _, _), + [26] = PINGROUP(26, EAST, cci_async, hs1_mi2s, jitter_bist, _, _, _, _, _, _), + [27] = PINGROUP(27, EAST, hs1_mi2s, pll_bist, _, _, _, _, _, _, _), + [28] = PINGROUP(28, EAST, cam_mclk, agera_pll, qdss_gpio, _, _, _, _, _, _), + [29] = PINGROUP(29, EAST, cam_mclk, _, qdss_gpio, atest_tsens, _, _, _, _, _), + [30] = PINGROUP(30, EAST, cam_mclk, qdss_gpio, _, _, _, _, _, _, _), + [31] = PINGROUP(31, EAST, cam_mclk, _, qdss_gpio, _, _, _, _, _, _), + [32] = PINGROUP(32, EAST, cci_i2c, _, qdss_gpio, _, _, _, _, _, _), + [33] = PINGROUP(33, EAST, cci_i2c, _, qdss_gpio, _, _, _, _, _, _), + [34] = PINGROUP(34, EAST, cci_i2c, _, qdss_gpio, _, _, _, _, _, _), + [35] = PINGROUP(35, EAST, cci_i2c, _, qdss_gpio, _, _, _, _, _, _), + [36] = PINGROUP(36, EAST, hs0_mi2s, _, _, _, _, _, _, _, _), + [37] = PINGROUP(37, EAST, cci_timer, hs0_mi2s, _, _, _, _, _, _, _), + [38] = PINGROUP(38, EAST, cci_timer, hs0_mi2s, _, phase_flag, _, _, _, _, _), + [39] = PINGROUP(39, EAST, cci_timer, hs0_mi2s, _, _, _, _, _, _, _), + [40] = PINGROUP(40, EAST, _, phase_flag, _, _, _, _, _, _, _), + [41] = PINGROUP(41, EAST, cci_async, cci_timer, _, phase_flag, _, _, _, _, _), + [42] = PINGROUP(42, EAST, cci_async, cci_timer, _, phase_flag, _, _, _, _, _), + [43] = PINGROUP(43, SOUTH, _, phase_flag, forced_usb, _, _, _, _, _, _), + [44] = PINGROUP(44, EAST, qspi, _, phase_flag, qdss_gpio, _, _, _, _, _), + [45] = PINGROUP(45, EAST, qspi, _, phase_flag, qdss_gpio, _, _, _, _, _), + [46] = PINGROUP(46, EAST, qspi, _, qdss_gpio, _, _, _, _, _, _), + [47] = PINGROUP(47, EAST, qspi, _, qdss_gpio, wlan, _, _, _, _, _), + [48] = PINGROUP(48, EAST, qspi, _, wlan, _, _, _, _, _, _), + [49] = PINGROUP(49, EAST, qspi, _, _, _, _, _, _, _, _), + [50] = PINGROUP(50, EAST, qspi, _, _, _, _, _, _, _, _), + [51] = PINGROUP(51, SOUTH, qlink_request, _, _, _, _, _, _, _, _), + [52] = PINGROUP(52, SOUTH, qlink_enable, _, _, _, _, _, _, _, _), + [53] = PINGROUP(53, SOUTH, pa_indicator_or, nav_pps_in, nav_pps_out, gps_tx, _, + phase_flag, _, _, _), + [54] = PINGROUP(54, SOUTH, _, gps_tx, gp_pdm, _, phase_flag, atest_usb, ddr_pxi, _, _), + [55] = PINGROUP(55, SOUTH, _, _, phase_flag, atest_usb, ddr_pxi, _, _, _, _), + [56] = PINGROUP(56, SOUTH, _, nav_pps_in, nav_pps_out, gps_tx, _, _, _, _, _), + [57] = PINGROUP(57, SOUTH, _, nav_pps_in, gps_tx, nav_pps_out, gcc_gp, _, _, _, _), + [58] = PINGROUP(58, SOUTH, _, gcc_gp, _, _, _, _, _, _, _), + [59] = PINGROUP(59, SOUTH, _, nav_pps_in, nav_pps_out, gps_tx, gcc_gp, _, _, _, _), + [60] = PINGROUP(60, SOUTH, _, nav_pps_in, nav_pps_out, gps_tx, cri_trng, _, _, _, _), + [61] = PINGROUP(61, SOUTH, _, cri_trng, _, _, _, _, _, _, _), + [62] = PINGROUP(62, SOUTH, _, cri_trng, _, _, _, _, _, _, _), + [63] = PINGROUP(63, SOUTH, _, _, gp_pdm, _, _, _, _, _, _), + [64] = PINGROUP(64, SOUTH, _, sp_cmu, _, _, _, _, _, _, _), + [65] = PINGROUP(65, SOUTH, _, _, _, _, _, _, _, _, _), + [66] = PINGROUP(66, SOUTH, _, gp_pdm, _, _, _, _, _, _, _), + [67] = PINGROUP(67, SOUTH, _, _, _, phase_flag, atest_usb, _, _, _, _), + [68] = PINGROUP(68, SOUTH, _, _, _, phase_flag, atest_usb, _, _, _, _), + [69] = PINGROUP(69, SOUTH, _, _, _, _, _, _, _, _, _), + [70] = PINGROUP(70, SOUTH, _, _, _, _, _, _, _, _, _), + [71] = PINGROUP(71, SOUTH, _, _, _, _, _, _, _, _, _), + [72] = PINGROUP(72, SOUTH, _, _, _, _, _, _, _, _, _), + [73] = PINGROUP(73, SOUTH, uim2, _, _, _, _, _, _, _, _), + [74] = PINGROUP(74, SOUTH, uim2, _, _, _, _, _, _, _, _), + [75] = PINGROUP(75, SOUTH, uim2, _, phase_flag, atest_usb, _, _, _, _, _), + [76] = PINGROUP(76, SOUTH, uim2, _, phase_flag, atest_usb, aoss_cti, _, _, _, _), + [77] = PINGROUP(77, SOUTH, uim1, _, phase_flag, atest_usb, _, _, _, _, _), + [78] = PINGROUP(78, SOUTH, uim1, gcc_gp, _, phase_flag, _, _, _, _, _), + [79] = PINGROUP(79, SOUTH, uim1, gp_pdm, _, phase_flag, _, _, _, _, _), + [80] = PINGROUP(80, SOUTH, uim1, _, phase_flag, _, _, _, _, _, _), + [81] = PINGROUP(81, WEST, rgmii, mdp_vsync, _, qdss_gpio, _, _, _, _, _), + [82] = PINGROUP(82, WEST, rgmii, mdp_vsync, _, phase_flag, qdss_gpio, _, _, _, _), + [83] = PINGROUP(83, WEST, rgmii, mdp_vsync, _, qdss_cti, _, _, _, _, _), + [84] = PINGROUP(84, SOUTH, _, phase_flag, atest_char, _, _, _, _, _, _), + [85] = PINGROUP(85, SOUTH, _, atest_char, _, _, _, _, _, _, _), + [86] = PINGROUP(86, SOUTH, copy_gp, _, atest_char, _, _, _, _, _, _), + [87] = PINGROUP(87, SOUTH, _, atest_char, _, _, _, _, _, _, _), + [88] = PINGROUP(88, WEST, _, usb0_hs, _, _, _, _, _, _, _), + [89] = PINGROUP(89, WEST, emac_phy_intr, pcie_ep_rst, tgu_ch, usb1_hs, _, _, _, _, _), + [90] = PINGROUP(90, WEST, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, + mdp_vsync2_out, mdp_vsync3_out, mdp_vsync4_out, mdp_vsync5_out, + pcie_clk_req, tgu_ch), + [91] = PINGROUP(91, WEST, rgmii, tgu_ch, _, _, _, _, _, _, _), + [92] = PINGROUP(92, WEST, rgmii, vfr_1, tgu_ch, _, phase_flag, qdss_gpio, _, _, _), + [93] = PINGROUP(93, WEST, rgmii, qdss_gpio, _, _, _, _, _, _, _), + [94] = PINGROUP(94, WEST, rgmii, qdss_gpio, _, _, _, _, _, _, _), + [95] = PINGROUP(95, WEST, rgmii, gp_pdm, qdss_gpio, _, _, _, _, _, _), + [96] = PINGROUP(96, WEST, rgmii, qdss_cti, _, _, _, _, _, _, _), + [97] = PINGROUP(97, WEST, rgmii, mdp_vsync, ldo_en, qdss_cti, _, _, _, _, _), + [98] = PINGROUP(98, WEST, mdp_vsync, ldo_update, qdss_cti, _, _, _, _, _, _), + [99] = PINGROUP(99, EAST, prng_rosc, _, _, _, _, _, _, _, _), + [100] = PINGROUP(100, WEST, _, _, _, _, _, _, _, _, _), + [101] = PINGROUP(101, WEST, emac_gcc, _, _, _, _, _, _, _, _), + [102] = PINGROUP(102, WEST, rgmii, dp_hot, emac_gcc, prng_rosc, _, _, _, _, _), + [103] = PINGROUP(103, WEST, rgmii, dp_hot, copy_phase, qdss_cti, _, _, _, _, _), + [104] = PINGROUP(104, WEST, usb_phy_ps, _, qdss_cti, dp_hot, _, _, _, _, _), + [105] = PINGROUP(105, SOUTH, _, _, _, _, _, _, _, _, _), + [106] = PINGROUP(106, EAST, mss_lte, _, _, _, _, _, _, _, _), + [107] = PINGROUP(107, EAST, mss_lte, _, _, _, _, _, _, _, _), + [108] = PINGROUP(108, SOUTH, mi2s_1, _, qdss_gpio, _, _, _, _, _, _), + [109] = PINGROUP(109, SOUTH, mi2s_1, _, qdss_gpio, _, _, _, _, _, _), + [110] = PINGROUP(110, SOUTH, wsa_data, mi2s_1, _, _, _, _, _, _, _), + [111] = PINGROUP(111, SOUTH, wsa_clk, mi2s_1, _, _, _, _, _, _, _), + [112] = PINGROUP(112, WEST, rgmii, _, qdss_cti, _, _, _, _, _, _), + [113] = PINGROUP(113, WEST, rgmii, edp_hot, _, qdss_cti, _, _, _, _, _), + [114] = PINGROUP(114, WEST, rgmii, _, _, _, _, _, _, _, _), + [115] = PINGROUP(115, SOUTH, ter_mi2s, atest_char, _, _, _, _, _, _, _), + [116] = PINGROUP(116, SOUTH, ter_mi2s, _, phase_flag, _, _, _, _, _, _), + [117] = PINGROUP(117, SOUTH, ter_mi2s, _, phase_flag, qdss_gpio, atest_char, _, _, _, _), + [118] = PINGROUP(118, SOUTH, ter_mi2s, adsp_ext, _, phase_flag, qdss_gpio, atest_char, + _, _, _), + [119] = PINGROUP(119, SOUTH, edp_lcd, _, phase_flag, qdss_gpio, atest_char, _, _, _, _), + [120] = PINGROUP(120, SOUTH, m_voc, qdss_gpio, atest_char, _, _, _, _, _, _), + [121] = PINGROUP(121, SOUTH, mclk1, atest_char, _, _, _, _, _, _, _), + [122] = PINGROUP(122, SOUTH, mclk2, _, _, _, _, _, _, _, _), + [123] = UFS_RESET(ufs_reset, 0x9f000), + [124] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x9a000, 15, 0), + [125] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x9a000, 13, 6), + [126] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x9a000, 11, 3), + [127] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x9a000, 9, 0), + [128] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x98000, 14, 6), + [129] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x98000, 11, 3), + [130] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x98000, 9, 0), +}; + +static const struct msm_gpio_wakeirq_map qcs615_pdc_map[] = { + { 1, 45 }, { 3, 31 }, { 7, 55 }, { 9, 110 }, { 11, 34 }, + { 13, 33 }, { 14, 35 }, { 17, 46 }, { 19, 48 }, { 21, 83 }, + { 22, 36 }, { 26, 38 }, { 35, 37 }, { 39, 125 }, { 41, 47 }, + { 47, 49 }, { 48, 51 }, { 50, 52 }, { 51, 123 }, { 55, 56 }, + { 56, 57 }, { 57, 58 }, { 60, 60 }, { 71, 54 }, { 80, 73 }, + { 81, 64 }, { 82, 50 }, { 83, 65 }, { 84, 92 }, { 85, 99 }, + { 86, 67 }, { 87, 84 }, { 88, 124 }, { 89, 122 }, { 90, 69 }, + { 92, 88 }, { 93, 75 }, { 94, 91 }, { 95, 72 }, { 96, 82 }, + { 97, 74 }, { 98, 95 }, { 99, 94 }, { 100, 100 }, { 101, 40 }, + { 102, 93 }, { 103, 77 }, { 104, 78 }, { 105, 96 }, { 107, 97 }, + { 108, 111 }, { 112, 112 }, { 113, 113 }, { 117, 85 }, { 118, 102 }, + { 119, 87 }, { 120, 114 }, { 121, 89 }, { 122, 90 }, +}; + +static const struct msm_pinctrl_soc_data qcs615_tlmm = { + .pins = qcs615_pins, + .npins = ARRAY_SIZE(qcs615_pins), + .functions = qcs615_functions, + .nfunctions = ARRAY_SIZE(qcs615_functions), + .groups = qcs615_groups, + .ngroups = ARRAY_SIZE(qcs615_groups), + .ngpios = 123, + .tiles = qcs615_tiles, + .ntiles = ARRAY_SIZE(qcs615_tiles), + .wakeirq_map = qcs615_pdc_map, + .nwakeirq_map = ARRAY_SIZE(qcs615_pdc_map), +}; + +static const struct of_device_id qcs615_tlmm_of_match[] = { + { + .compatible = "qcom,qcs615-tlmm", + }, + {}, +}; + +static int qcs615_tlmm_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &qcs615_tlmm); +} + +static struct platform_driver qcs615_tlmm_driver = { + .driver = { + .name = "qcs615-tlmm", + .of_match_table = qcs615_tlmm_of_match, + }, + .probe = qcs615_tlmm_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init qcs615_tlmm_init(void) +{ + return platform_driver_register(&qcs615_tlmm_driver); +} +arch_initcall(qcs615_tlmm_init); + +static void __exit qcs615_tlmm_exit(void) +{ + platform_driver_unregister(&qcs615_tlmm_driver); +} +module_exit(qcs615_tlmm_exit); + +MODULE_DESCRIPTION("QTI QCS615 TLMM driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, qcs615_tlmm_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c new file mode 100644 index 000000000000..ba6de944a859 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c @@ -0,0 +1,1246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "pinctrl-msm.h" + +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11)\ + { \ + .grp = PINCTRL_PINGROUP("gpio" #id, \ + gpio##id##_pins, \ + ARRAY_SIZE(gpio##id##_pins)), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9, \ + msm_mux_##f10, \ + msm_mux_##f11 /* egpio mode */ \ + }, \ + .nfuncs = 12, \ + .ctl_reg = REG_SIZE * id, \ + .io_reg = 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = 0x8 + REG_SIZE * id, \ + .intr_status_reg = 0xc + REG_SIZE * id, \ + .intr_target_reg = 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define QUP_I3C(qup_mode, qup_offset) \ + { \ + .mode = qup_mode, \ + .offset = qup_offset, \ + } + +#define QUP_I3C_6_MODE_OFFSET 0xaf000 +#define QUP_I3C_7_MODE_OFFSET 0xb0000 +#define QUP_I3C_13_MODE_OFFSET 0xb1000 +#define QUP_I3C_14_MODE_OFFSET 0xb2000 + +static const struct pinctrl_pin_desc qcs8300_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "UFS_RESET"), + PINCTRL_PIN(134, "SDC1_RCLK"), + PINCTRL_PIN(135, "SDC1_CLK"), + PINCTRL_PIN(136, "SDC1_CMD"), + PINCTRL_PIN(137, "SDC1_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); + +static const unsigned int ufs_reset_pins[] = { 133 }; +static const unsigned int sdc1_rclk_pins[] = { 134 }; +static const unsigned int sdc1_clk_pins[] = { 135 }; +static const unsigned int sdc1_cmd_pins[] = { 136 }; +static const unsigned int sdc1_data_pins[] = { 137 }; + +enum qcs8300_functions { + msm_mux_gpio, + msm_mux_aoss_cti, + msm_mux_atest_char, + msm_mux_atest_usb2, + msm_mux_audio_ref, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_i2c_scl, + msm_mux_cci_i2c_sda, + msm_mux_cci_timer, + msm_mux_cri_trng, + msm_mux_dbg_out, + msm_mux_ddr_bist, + msm_mux_ddr_pxi0, + msm_mux_ddr_pxi1, + msm_mux_ddr_pxi2, + msm_mux_ddr_pxi3, + msm_mux_edp0_hot, + msm_mux_edp0_lcd, + msm_mux_edp1_lcd, + msm_mux_egpio, + msm_mux_emac0_mcg0, + msm_mux_emac0_mcg1, + msm_mux_emac0_mcg2, + msm_mux_emac0_mcg3, + msm_mux_emac0_mdc, + msm_mux_emac0_mdio, + msm_mux_emac0_ptp_aux, + msm_mux_emac0_ptp_pps, + msm_mux_gcc_gp1, + msm_mux_gcc_gp2, + msm_mux_gcc_gp3, + msm_mux_gcc_gp4, + msm_mux_gcc_gp5, + msm_mux_hs0_mi2s, + msm_mux_hs1_mi2s, + msm_mux_hs2_mi2s, + msm_mux_ibi_i3c, + msm_mux_jitter_bist, + msm_mux_mdp0_vsync0, + msm_mux_mdp0_vsync1, + msm_mux_mdp0_vsync3, + msm_mux_mdp0_vsync6, + msm_mux_mdp0_vsync7, + msm_mux_mdp_vsync, + msm_mux_mi2s1_data0, + msm_mux_mi2s1_data1, + msm_mux_mi2s1_sck, + msm_mux_mi2s1_ws, + msm_mux_mi2s2_data0, + msm_mux_mi2s2_data1, + msm_mux_mi2s2_sck, + msm_mux_mi2s2_ws, + msm_mux_mi2s_mclk0, + msm_mux_mi2s_mclk1, + msm_mux_pcie0_clkreq, + msm_mux_pcie1_clkreq, + msm_mux_phase_flag, + msm_mux_pll_bist, + msm_mux_pll_clk, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_qdss_cti, + msm_mux_qdss_gpio, + msm_mux_qup0_se0, + msm_mux_qup0_se1, + msm_mux_qup0_se2, + msm_mux_qup0_se3, + msm_mux_qup0_se4, + msm_mux_qup0_se5, + msm_mux_qup0_se6, + msm_mux_qup0_se7, + msm_mux_qup1_se0, + msm_mux_qup1_se1, + msm_mux_qup1_se2, + msm_mux_qup1_se3, + msm_mux_qup1_se4, + msm_mux_qup1_se5, + msm_mux_qup1_se6, + msm_mux_qup1_se7, + msm_mux_qup2_se0, + msm_mux_sailss_emac0, + msm_mux_sailss_ospi, + msm_mux_sgmii_phy, + msm_mux_tb_trig, + msm_mux_tgu_ch0, + msm_mux_tgu_ch1, + msm_mux_tgu_ch2, + msm_mux_tgu_ch3, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_tsense_pwm3, + msm_mux_tsense_pwm4, + msm_mux_usb2phy_ac, + msm_mux_vsense_trigger, + msm_mux__, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", +}; + +static const char *const aoss_cti_groups[] = { + "gpio37", "gpio38", "gpio39", "gpio40", +}; + +static const char * const atest_char_groups[] = { + "gpio66", "gpio70", "gpio71", "gpio72", "gpio93", +}; + +static const char * const atest_usb2_groups[] = { + "gpio63", "gpio83", "gpio92", "gpio74", "gpio84", "gpio87", "gpio67", + "gpio75", "gpio85", "gpio65", "gpio68", "gpio80", "gpio64", "gpio69", + "gpio81", +}; + +static const char * const audio_ref_groups[] = { + "gpio105", +}; + +static const char * const cam_mclk_groups[] = { + "gpio67", "gpio68", "gpio69", +}; + +static const char * const cci_async_groups[] = { + "gpio63", "gpio64", "gpio65", "gpio29", "gpio30", "gpio31", +}; + +static const char * const cci_i2c_scl_groups[] = { + "gpio58", "gpio30", "gpio60", "gpio32", "gpio62", "gpio55", +}; + +static const char * const cci_i2c_sda_groups[] = { + "gpio57", "gpio29", "gpio59", "gpio31", "gpio61", "gpio54", +}; + +static const char *const cci_timer_groups[] = { + "gpio63", "gpio64", "gpio65", "gpio49", "gpio50", "gpio19", + "gpio20", "gpio21", "gpio22", "gpio23", +}; + +static const char *const cri_trng_groups[] = { + "gpio92", "gpio90", "gpio91", +}; + +static const char *const dbg_out_groups[] = { + "gpio75", +}; + +static const char * const ddr_bist_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56", +}; + +static const char *const ddr_pxi0_groups[] = { + "gpio68", "gpio69", +}; + +static const char *const ddr_pxi1_groups[] = { + "gpio49", "gpio50", +}; + +static const char *const ddr_pxi2_groups[] = { + "gpio52", "gpio83", +}; + +static const char *const ddr_pxi3_groups[] = { + "gpio80", "gpio81", +}; + +static const char *const edp0_hot_groups[] = { + "gpio94", +}; + +static const char *const edp0_lcd_groups[] = { + "gpio48", +}; + +static const char *const edp1_lcd_groups[] = { + "gpio49", +}; + +static const char *const egpio_groups[] = { + "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", + "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", + "gpio122", "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", + "gpio128", "gpio129", "gpio130", "gpio131", "gpio132", +}; + +static const char *const emac0_mcg0_groups[] = { + "gpio10", +}; + +static const char *const emac0_mcg1_groups[] = { + "gpio11", +}; + +static const char *const emac0_mcg2_groups[] = { + "gpio24", +}; + +static const char *const emac0_mcg3_groups[] = { + "gpio79", +}; + +static const char *const emac0_mdc_groups[] = { + "gpio5", +}; + +static const char *const emac0_mdio_groups[] = { + "gpio6", +}; + +static const char * const emac0_ptp_aux_groups[] = { + "gpio24", "gpio31", "gpio32", "gpio79", +}; + +static const char * const emac0_ptp_pps_groups[] = { + "gpio24", "gpio29", "gpio30", "gpio79", +}; + +static const char *const gcc_gp1_groups[] = { + "gpio35", "gpio84", +}; + +static const char *const gcc_gp2_groups[] = { + "gpio36", "gpio81", +}; + +static const char *const gcc_gp3_groups[] = { + "gpio69", "gpio82", +}; + +static const char *const gcc_gp4_groups[] = { + "gpio68", "gpio83", +}; + +static const char *const gcc_gp5_groups[] = { + "gpio76", "gpio77", +}; + +static const char * const hs0_mi2s_groups[] = { + "gpio106", "gpio107", "gpio108", "gpio109", +}; + +static const char * const hs1_mi2s_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48", +}; + +static const char * const hs2_mi2s_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", +}; + +static const char * const ibi_i3c_groups[] = { + "gpio17", "gpio18", "gpio19", "gpio20", "gpio37", "gpio38", + "gpio39", "gpio40", +}; + +static const char *const jitter_bist_groups[] = { + "gpio97", +}; + +static const char *const mdp0_vsync0_groups[] = { + "gpio89", +}; + +static const char *const mdp0_vsync1_groups[] = { + "gpio90", +}; + +static const char *const mdp0_vsync3_groups[] = { + "gpio91", +}; + +static const char *const mdp0_vsync6_groups[] = { + "gpio80", +}; + +static const char *const mdp0_vsync7_groups[] = { + "gpio81", +}; + +static const char *const mdp_vsync_groups[] = { + "gpio42", "gpio52", "gpio32", +}; + +static const char *const mi2s1_data0_groups[] = { + "gpio100", +}; + +static const char *const mi2s1_data1_groups[] = { + "gpio101", +}; + +static const char *const mi2s1_sck_groups[] = { + "gpio98", +}; + +static const char *const mi2s1_ws_groups[] = { + "gpio99", +}; + +static const char *const mi2s2_data0_groups[] = { + "gpio104", +}; + +static const char *const mi2s2_data1_groups[] = { + "gpio105", +}; + +static const char *const mi2s2_sck_groups[] = { + "gpio102", +}; + +static const char *const mi2s2_ws_groups[] = { + "gpio103", +}; + +static const char *const mi2s_mclk0_groups[] = { + "gpio97", +}; + +static const char *const mi2s_mclk1_groups[] = { + "gpio109", +}; + +static const char *const pcie0_clkreq_groups[] = { + "gpio1", +}; + +static const char *const pcie1_clkreq_groups[] = { + "gpio22", +}; + +static const char *const phase_flag_groups[] = { + "gpio66", "gpio56", "gpio118", "gpio117", "gpio116", + "gpio3", "gpio114", "gpio113", "gpio112", "gpio111", + "gpio110", "gpio28", "gpio55", "gpio108", "gpio107", + "gpio106", "gpio105", "gpio104", "gpio103", "gpio102", + "gpio101", "gpio100", "gpio99", "gpio125", "gpio98", + "gpio54", "gpio25", "gpio26", "gpio122", "gpio121", + "gpio120", "gpio9", +}; + +static const char *const pll_bist_groups[] = { + "gpio107", +}; + +static const char *const pll_clk_groups[] = { + "gpio74", +}; + +static const char *const prng_rosc0_groups[] = { + "gpio57", +}; + +static const char *const prng_rosc1_groups[] = { + "gpio58", +}; + +static const char *const prng_rosc2_groups[] = { + "gpio59", +}; + +static const char *const prng_rosc3_groups[] = { + "gpio60", +}; + +static const char *const qdss_cti_groups[] = { + "gpio4", "gpio5", "gpio23", "gpio24", "gpio49", "gpio50", + "gpio51", "gpio52", +}; + +static const char *const qdss_gpio_groups[] = { + "gpio57", "gpio58", "gpio97", "gpio106", + "gpio59", "gpio107", "gpio60", "gpio108", + "gpio36", "gpio100", "gpio61", "gpio101", + "gpio62", "gpio102", "gpio33", "gpio103", + "gpio34", "gpio104", "gpio75", "gpio105", + "gpio72", "gpio109", "gpio71", "gpio110", + "gpio70", "gpio111", "gpio63", "gpio112", + "gpio64", "gpio113", "gpio65", "gpio114", + "gpio73", "gpio98", "gpio74", "gpio99", +}; + +static const char *const qup0_se0_groups[] = { + "gpio17", "gpio18", "gpio19", "gpio20", +}; + +static const char *const qup0_se1_groups[] = { + "gpio19", "gpio20", "gpio17", "gpio18", +}; + +static const char *const qup0_se2_groups[] = { + "gpio33", "gpio34", "gpio35", "gpio36", +}; + +static const char *const qup0_se3_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28", +}; + +static const char *const qup0_se4_groups[] = { + "gpio29", "gpio30", "gpio31", "gpio32", +}; + +static const char *const qup0_se5_groups[] = { + "gpio21", "gpio22", "gpio23", "gpio24", +}; + +static const char *const qup0_se6_groups[] = { + "gpio80", "gpio81", "gpio82", "gpio83", +}; + +static const char *const qup0_se7_groups[] = { + "gpio43", "gpio44", "gpio43", "gpio44", +}; + +static const char *const qup1_se0_groups[] = { + "gpio37", "gpio38", "gpio39", "gpio40", +}; + +static const char *const qup1_se1_groups[] = { + "gpio39", "gpio40", "gpio37", "gpio38", +}; + +static const char *const qup1_se2_groups[] = { + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", +}; + +static const char *const qup1_se3_groups[] = { + "gpio41", "gpio42", "gpio41", "gpio42", +}; + +static const char *const qup1_se4_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48", +}; + +static const char *const qup1_se5_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", +}; + +static const char *const qup1_se6_groups[] = { + "gpio89", "gpio90", "gpio91", "gpio92", +}; + +static const char *const qup1_se7_groups[] = { + "gpio91", "gpio92", "gpio89", "gpio90", +}; + +static const char *const qup2_se0_groups[] = { + "gpio10", "gpio11", "gpio12", "gpio13", + "gpio14", "gpio15", "gpio16", +}; + +static const char *const sailss_emac0_groups[] = { + "gpio15", "gpio16", +}; + +static const char *const sailss_ospi_groups[] = { + "gpio15", "gpio16", +}; + +static const char *const sgmii_phy_groups[] = { + "gpio4", +}; + +static const char *const tb_trig_groups[] = { + "gpio14", +}; + +static const char *const tgu_ch0_groups[] = { + "gpio43", +}; + +static const char *const tgu_ch1_groups[] = { + "gpio44", +}; + +static const char *const tgu_ch2_groups[] = { + "gpio29", +}; + +static const char *const tgu_ch3_groups[] = { + "gpio30", +}; + +static const char *const tsense_pwm1_groups[] = { + "gpio79", +}; + +static const char *const tsense_pwm2_groups[] = { + "gpio78", +}; + +static const char *const tsense_pwm3_groups[] = { + "gpio77", +}; + +static const char *const tsense_pwm4_groups[] = { + "gpio76", +}; + +static const char *const usb2phy_ac_groups[] = { + "gpio7", "gpio8", +}; + +static const char *const vsense_trigger_groups[] = { + "gpio67", +}; + +static const struct pinfunction qcs8300_functions[] = { + MSM_PIN_FUNCTION(gpio), + MSM_PIN_FUNCTION(aoss_cti), + MSM_PIN_FUNCTION(atest_char), + MSM_PIN_FUNCTION(atest_usb2), + MSM_PIN_FUNCTION(audio_ref), + MSM_PIN_FUNCTION(cam_mclk), + MSM_PIN_FUNCTION(cci_async), + MSM_PIN_FUNCTION(cci_i2c_scl), + MSM_PIN_FUNCTION(cci_i2c_sda), + MSM_PIN_FUNCTION(cci_timer), + MSM_PIN_FUNCTION(cri_trng), + MSM_PIN_FUNCTION(dbg_out), + MSM_PIN_FUNCTION(ddr_bist), + MSM_PIN_FUNCTION(ddr_pxi0), + MSM_PIN_FUNCTION(ddr_pxi1), + MSM_PIN_FUNCTION(ddr_pxi2), + MSM_PIN_FUNCTION(ddr_pxi3), + MSM_PIN_FUNCTION(edp0_hot), + MSM_PIN_FUNCTION(edp0_lcd), + MSM_PIN_FUNCTION(edp1_lcd), + MSM_PIN_FUNCTION(egpio), + MSM_PIN_FUNCTION(emac0_mcg0), + MSM_PIN_FUNCTION(emac0_mcg1), + MSM_PIN_FUNCTION(emac0_mcg2), + MSM_PIN_FUNCTION(emac0_mcg3), + MSM_PIN_FUNCTION(emac0_mdc), + MSM_PIN_FUNCTION(emac0_mdio), + MSM_PIN_FUNCTION(emac0_ptp_aux), + MSM_PIN_FUNCTION(emac0_ptp_pps), + MSM_PIN_FUNCTION(gcc_gp1), + MSM_PIN_FUNCTION(gcc_gp2), + MSM_PIN_FUNCTION(gcc_gp3), + MSM_PIN_FUNCTION(gcc_gp4), + MSM_PIN_FUNCTION(gcc_gp5), + MSM_PIN_FUNCTION(hs0_mi2s), + MSM_PIN_FUNCTION(hs1_mi2s), + MSM_PIN_FUNCTION(hs2_mi2s), + MSM_PIN_FUNCTION(ibi_i3c), + MSM_PIN_FUNCTION(jitter_bist), + MSM_PIN_FUNCTION(mdp0_vsync0), + MSM_PIN_FUNCTION(mdp0_vsync1), + MSM_PIN_FUNCTION(mdp0_vsync3), + MSM_PIN_FUNCTION(mdp0_vsync6), + MSM_PIN_FUNCTION(mdp0_vsync7), + MSM_PIN_FUNCTION(mdp_vsync), + MSM_PIN_FUNCTION(mi2s1_data0), + MSM_PIN_FUNCTION(mi2s1_data1), + MSM_PIN_FUNCTION(mi2s1_sck), + MSM_PIN_FUNCTION(mi2s1_ws), + MSM_PIN_FUNCTION(mi2s2_data0), + MSM_PIN_FUNCTION(mi2s2_data1), + MSM_PIN_FUNCTION(mi2s2_sck), + MSM_PIN_FUNCTION(mi2s2_ws), + MSM_PIN_FUNCTION(mi2s_mclk0), + MSM_PIN_FUNCTION(mi2s_mclk1), + MSM_PIN_FUNCTION(pcie0_clkreq), + MSM_PIN_FUNCTION(pcie1_clkreq), + MSM_PIN_FUNCTION(phase_flag), + MSM_PIN_FUNCTION(pll_bist), + MSM_PIN_FUNCTION(pll_clk), + MSM_PIN_FUNCTION(prng_rosc0), + MSM_PIN_FUNCTION(prng_rosc1), + MSM_PIN_FUNCTION(prng_rosc2), + MSM_PIN_FUNCTION(prng_rosc3), + MSM_PIN_FUNCTION(qdss_cti), + MSM_PIN_FUNCTION(qdss_gpio), + MSM_PIN_FUNCTION(qup0_se0), + MSM_PIN_FUNCTION(qup0_se1), + MSM_PIN_FUNCTION(qup0_se2), + MSM_PIN_FUNCTION(qup0_se3), + MSM_PIN_FUNCTION(qup0_se4), + MSM_PIN_FUNCTION(qup0_se5), + MSM_PIN_FUNCTION(qup0_se6), + MSM_PIN_FUNCTION(qup0_se7), + MSM_PIN_FUNCTION(qup1_se0), + MSM_PIN_FUNCTION(qup1_se1), + MSM_PIN_FUNCTION(qup1_se2), + MSM_PIN_FUNCTION(qup1_se3), + MSM_PIN_FUNCTION(qup1_se4), + MSM_PIN_FUNCTION(qup1_se5), + MSM_PIN_FUNCTION(qup1_se6), + MSM_PIN_FUNCTION(qup1_se7), + MSM_PIN_FUNCTION(qup2_se0), + MSM_PIN_FUNCTION(sailss_emac0), + MSM_PIN_FUNCTION(sailss_ospi), + MSM_PIN_FUNCTION(sgmii_phy), + MSM_PIN_FUNCTION(tb_trig), + MSM_PIN_FUNCTION(tgu_ch0), + MSM_PIN_FUNCTION(tgu_ch1), + MSM_PIN_FUNCTION(tgu_ch2), + MSM_PIN_FUNCTION(tgu_ch3), + MSM_PIN_FUNCTION(tsense_pwm1), + MSM_PIN_FUNCTION(tsense_pwm2), + MSM_PIN_FUNCTION(tsense_pwm3), + MSM_PIN_FUNCTION(tsense_pwm4), + MSM_PIN_FUNCTION(usb2phy_ac), + MSM_PIN_FUNCTION(vsense_trigger), +}; + +/* + * Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup qcs8300_groups[] = { + [0] = PINGROUP(0, _, _, _, _, _, _, _, _, _, _, _), + [1] = PINGROUP(1, pcie0_clkreq, _, _, _, _, _, _, _, _, _, _), + [2] = PINGROUP(2, _, _, _, _, _, _, _, _, _, _, _), + [3] = PINGROUP(3, phase_flag, _, _, _, _, _, _, _, _, _, _), + [4] = PINGROUP(4, sgmii_phy, qdss_cti, _, _, _, _, _, _, _, _, _), + [5] = PINGROUP(5, emac0_mdc, qdss_cti, _, _, _, _, _, _, _, _, _), + [6] = PINGROUP(6, emac0_mdio, _, _, _, _, _, _, _, _, _, _), + [7] = PINGROUP(7, usb2phy_ac, _, _, _, _, _, _, _, _, _, _), + [8] = PINGROUP(8, usb2phy_ac, _, _, _, _, _, _, _, _, _, _), + [9] = PINGROUP(9, phase_flag, _, _, _, _, _, _, _, _, _, _), + [10] = PINGROUP(10, qup2_se0, emac0_mcg0, _, _, _, _, _, _, _, _, _), + [11] = PINGROUP(11, qup2_se0, emac0_mcg1, _, _, _, _, _, _, _, _, _), + [12] = PINGROUP(12, qup2_se0, _, _, _, _, _, _, _, _, _, _), + [13] = PINGROUP(13, qup2_se0, _, _, _, _, _, _, _, _, _, _), + [14] = PINGROUP(14, qup2_se0, tb_trig, _, _, _, _, _, _, _, _, _), + [15] = PINGROUP(15, qup2_se0, _, sailss_ospi, sailss_emac0, _, _, _, _, _, _, _), + [16] = PINGROUP(16, qup2_se0, _, _, sailss_ospi, sailss_emac0, _, _, _, _, _, _), + [17] = PINGROUP(17, qup0_se0, qup0_se1, ibi_i3c, _, _, _, _, _, _, _, _), + [18] = PINGROUP(18, qup0_se0, qup0_se1, ibi_i3c, _, _, _, _, _, _, _, _), + [19] = PINGROUP(19, qup0_se1, qup0_se0, cci_timer, ibi_i3c, _, _, _, _, _, _, _), + [20] = PINGROUP(20, qup0_se1, qup0_se0, cci_timer, ibi_i3c, _, _, _, _, _, _, _), + [21] = PINGROUP(21, qup0_se5, cci_timer, _, _, _, _, _, _, _, _, _), + [22] = PINGROUP(22, pcie1_clkreq, qup0_se5, cci_timer, _, _, _, _, _, _, _, _), + [23] = PINGROUP(23, qup0_se5, cci_timer, qdss_cti, _, _, _, _, _, _, _, _), + [24] = PINGROUP(24, qup0_se5, emac0_ptp_aux, emac0_ptp_pps, qdss_cti, + emac0_mcg2, _, _, _, _, _, _), + [25] = PINGROUP(25, qup0_se3, phase_flag, _, _, _, _, _, _, _, _, _), + [26] = PINGROUP(26, qup0_se3, phase_flag, _, _, _, _, _, _, _, _, _), + [27] = PINGROUP(27, qup0_se3, _, _, _, _, _, _, _, _, _, _), + [28] = PINGROUP(28, qup0_se3, phase_flag, _, _, _, _, _, _, _, _, _), + [29] = PINGROUP(29, qup0_se4, cci_i2c_sda, cci_async, emac0_ptp_pps, + tgu_ch2, _, _, _, _, _, _), + [30] = PINGROUP(30, qup0_se4, cci_i2c_scl, cci_async, emac0_ptp_pps, + tgu_ch3, _, _, _, _, _, _), + [31] = PINGROUP(31, qup0_se4, cci_i2c_sda, cci_async, emac0_ptp_aux, _, _, _, _, _, _, _), + [32] = PINGROUP(32, qup0_se4, cci_i2c_scl, emac0_ptp_aux, mdp_vsync, _, _, _, _, _, _, _), + [33] = PINGROUP(33, qup0_se2, qdss_gpio, _, _, _, _, _, _, _, _, _), + [34] = PINGROUP(34, qup0_se2, qdss_gpio, _, _, _, _, _, _, _, _, _), + [35] = PINGROUP(35, qup0_se2, gcc_gp1, _, _, _, _, _, _, _, _, _), + [36] = PINGROUP(36, qup0_se2, gcc_gp2, qdss_gpio, _, _, _, _, _, _, _, _), + [37] = PINGROUP(37, qup1_se0, ibi_i3c, qup1_se1, aoss_cti, _, _, _, _, _, _, _), + [38] = PINGROUP(38, qup1_se0, ibi_i3c, qup1_se1, aoss_cti, _, _, _, _, _, _, _), + [39] = PINGROUP(39, qup1_se1, ibi_i3c, qup1_se0, aoss_cti, _, _, _, _, _, _, _), + [40] = PINGROUP(40, qup1_se1, ibi_i3c, qup1_se0, aoss_cti, _, _, _, _, _, _, _), + [41] = PINGROUP(41, qup1_se3, _, _, _, _, _, _, _, _, _, _), + [42] = PINGROUP(42, qup1_se3, _, mdp_vsync, _, _, _, _, _, _, _, _), + [43] = PINGROUP(43, qup0_se7, _, tgu_ch0, _, _, _, _, _, _, _, _), + [44] = PINGROUP(44, qup0_se7, _, tgu_ch1, _, _, _, _, _, _, _, _), + [45] = PINGROUP(45, qup1_se4, hs1_mi2s, _, _, _, _, _, _, _, _, _), + [46] = PINGROUP(46, qup1_se4, hs1_mi2s, _, _, _, _, _, _, _, _, _), + [47] = PINGROUP(47, qup1_se4, hs1_mi2s, _, _, _, _, _, _, _, _, _), + [48] = PINGROUP(48, qup1_se4, hs1_mi2s, edp0_lcd, _, _, _, _, _, _, _, _), + [49] = PINGROUP(49, qup1_se5, hs2_mi2s, cci_timer, qdss_cti, edp1_lcd, + ddr_pxi1, _, _, _, _, _), + [50] = PINGROUP(50, qup1_se5, hs2_mi2s, cci_timer, qdss_cti, _, ddr_pxi1, _, _, _, _, _), + [51] = PINGROUP(51, qup1_se5, hs2_mi2s, qdss_cti, _, _, _, _, _, _, _, _), + [52] = PINGROUP(52, qup1_se5, hs2_mi2s, qdss_cti, mdp_vsync, ddr_pxi2, _, _, _, _, _, _), + [53] = PINGROUP(53, ddr_bist, _, _, _, _, _, _, _, _, _, _), + [54] = PINGROUP(54, cci_i2c_sda, phase_flag, ddr_bist, _, _, _, _, _, _, _, _), + [55] = PINGROUP(55, cci_i2c_scl, phase_flag, ddr_bist, _, _, _, _, _, _, _, _), + [56] = PINGROUP(56, phase_flag, ddr_bist, _, _, _, _, _, _, _, _, _), + [57] = PINGROUP(57, cci_i2c_sda, prng_rosc0, qdss_gpio, _, _, _, _, _, _, _, _), + [58] = PINGROUP(58, cci_i2c_scl, prng_rosc1, qdss_gpio, _, _, _, _, _, _, _, _), + [59] = PINGROUP(59, cci_i2c_sda, prng_rosc2, qdss_gpio, _, _, _, _, _, _, _, _), + [60] = PINGROUP(60, cci_i2c_scl, prng_rosc3, qdss_gpio, _, _, _, _, _, _, _, _), + [61] = PINGROUP(61, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _, _, _), + [62] = PINGROUP(62, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _, _, _), + [63] = PINGROUP(63, cci_timer, cci_async, qdss_gpio, atest_usb2, _, _, _, _, _, _, _), + [64] = PINGROUP(64, cci_timer, cci_async, qdss_gpio, atest_usb2, _, _, _, _, _, _, _), + [65] = PINGROUP(65, cci_timer, cci_async, qdss_gpio, atest_usb2, _, _, _, _, _, _, _), + [66] = PINGROUP(66, phase_flag, _, atest_char, _, _, _, _, _, _, _, _), + [67] = PINGROUP(67, cam_mclk, vsense_trigger, atest_usb2, _, _, _, _, _, _, _, _), + [68] = PINGROUP(68, cam_mclk, gcc_gp4, atest_usb2, ddr_pxi0, _, _, _, _, _, _, _), + [69] = PINGROUP(69, cam_mclk, gcc_gp3, atest_usb2, ddr_pxi0, _, _, _, _, _, _, _), + [70] = PINGROUP(70, qdss_gpio, atest_char, _, _, _, _, _, _, _, _, _), + [71] = PINGROUP(71, qdss_gpio, atest_char, _, _, _, _, _, _, _, _, _), + [72] = PINGROUP(72, qdss_gpio, atest_char, _, _, _, _, _, _, _, _, _), + [73] = PINGROUP(73, _, qdss_gpio, _, _, _, _, _, _, _, _, _), + [74] = PINGROUP(74, pll_clk, qdss_gpio, atest_usb2, _, _, _, _, _, _, _, _), + [75] = PINGROUP(75, _, dbg_out, qdss_gpio, atest_usb2, _, _, _, _, _, _, _), + [76] = PINGROUP(76, gcc_gp5, tsense_pwm4, _, _, _, _, _, _, _, _, _), + [77] = PINGROUP(77, gcc_gp5, tsense_pwm3, _, _, _, _, _, _, _, _, _), + [78] = PINGROUP(78, tsense_pwm2, _, _, _, _, _, _, _, _, _, _), + [79] = PINGROUP(79, emac0_ptp_aux, emac0_ptp_pps, emac0_mcg3, _, + tsense_pwm1, _, _, _, _, _, _), + [80] = PINGROUP(80, qup0_se6, mdp0_vsync6, _, atest_usb2, ddr_pxi3, _, _, _, _, _, _), + [81] = PINGROUP(81, qup0_se6, mdp0_vsync7, gcc_gp2, _, atest_usb2, ddr_pxi3, _, _, _, _, _), + [82] = PINGROUP(82, qup0_se6, gcc_gp3, _, _, _, _, _, _, _, _, _), + [83] = PINGROUP(83, qup0_se6, gcc_gp4, _, atest_usb2, ddr_pxi2, _, _, _, _, _, _), + [84] = PINGROUP(84, qup1_se2, gcc_gp1, _, atest_usb2, _, _, _, _, _, _, _), + [85] = PINGROUP(85, qup1_se2, _, atest_usb2, _, _, _, _, _, _, _, _), + [86] = PINGROUP(86, qup1_se2, _, _, _, _, _, _, _, _, _, _), + [87] = PINGROUP(87, qup1_se2, _, atest_usb2, _, _, _, _, _, _, _, _), + [88] = PINGROUP(88, qup1_se2, _, _, _, _, _, _, _, _, _, _), + [89] = PINGROUP(89, qup1_se6, qup1_se7, mdp0_vsync0, _, _, _, _, _, _, _, _), + [90] = PINGROUP(90, qup1_se6, qup1_se7, mdp0_vsync1, cri_trng, _, _, _, _, _, _, _), + [91] = PINGROUP(91, qup1_se7, qup1_se6, mdp0_vsync3, cri_trng, _, _, _, _, _, _, _), + [92] = PINGROUP(92, qup1_se7, qup1_se6, cri_trng, _, atest_usb2, _, _, _, _, _, _), + [93] = PINGROUP(93, atest_char, _, _, _, _, _, _, _, _, _, _), + [94] = PINGROUP(94, edp0_hot, _, _, _, _, _, _, _, _, _, _), + [95] = PINGROUP(95, _, _, _, _, _, _, _, _, _, _, _), + [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _, _, _), + [97] = PINGROUP(97, mi2s_mclk0, jitter_bist, qdss_gpio, _, _, _, _, _, _, _, _), + [98] = PINGROUP(98, mi2s1_sck, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [99] = PINGROUP(99, mi2s1_ws, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [100] = PINGROUP(100, mi2s1_data0, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [101] = PINGROUP(101, mi2s1_data1, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [102] = PINGROUP(102, mi2s2_sck, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [103] = PINGROUP(103, mi2s2_ws, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [104] = PINGROUP(104, mi2s2_data0, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [105] = PINGROUP(105, mi2s2_data1, audio_ref, phase_flag, _, qdss_gpio, _, _, _, _, _, _), + [106] = PINGROUP(106, hs0_mi2s, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [107] = PINGROUP(107, hs0_mi2s, pll_bist, phase_flag, _, qdss_gpio, _, _, _, _, _, _), + [108] = PINGROUP(108, hs0_mi2s, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _), + [109] = PINGROUP(109, hs0_mi2s, mi2s_mclk1, qdss_gpio, _, _, _, _, _, _, _, _), + [110] = PINGROUP(110, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _, egpio), + [111] = PINGROUP(111, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _, egpio), + [112] = PINGROUP(112, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _, egpio), + [113] = PINGROUP(113, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _, egpio), + [114] = PINGROUP(114, phase_flag, _, qdss_gpio, _, _, _, _, _, _, _, egpio), + [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _, _, egpio), + [116] = PINGROUP(116, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [117] = PINGROUP(117, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [118] = PINGROUP(118, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _, _, egpio), + [120] = PINGROUP(120, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [121] = PINGROUP(121, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [122] = PINGROUP(122, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _, _, egpio), + [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _, _, egpio), + [125] = PINGROUP(125, phase_flag, _, _, _, _, _, _, _, _, _, egpio), + [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _, _, egpio), + [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _, _, egpio), + [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _, _, egpio), + [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _, _, egpio), + [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _, _, egpio), + [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _, _, egpio), + [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _, _, egpio), + [133] = UFS_RESET(ufs_reset, 0x92000), + [134] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x89000, 15, 0), + [135] = SDC_QDSD_PINGROUP(sdc1_clk, 0x89000, 13, 6), + [136] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x89000, 11, 3), + [137] = SDC_QDSD_PINGROUP(sdc1_data, 0x89000, 9, 0), +}; + +static const struct msm_gpio_wakeirq_map qcs8300_pdc_map[] = { + { 0, 169 }, { 1, 174 }, { 2, 221 }, { 3, 176 }, { 4, 171 }, + { 9, 198 }, { 10, 187 }, { 11, 188 }, { 13, 211 }, { 16, 203 }, + { 17, 213 }, { 18, 209 }, { 19, 201 }, { 20, 230 }, { 21, 231 }, + { 22, 175 }, { 23, 170 }, { 24, 232 }, { 28, 235 }, { 29, 216 }, + { 31, 208 }, { 32, 200 }, { 36, 212 }, { 37, 177 }, { 38, 178 }, + { 39, 184 }, { 40, 185 }, { 42, 186 }, { 44, 194 }, { 45, 173 }, + { 48, 195 }, { 51, 215 }, { 52, 197 }, { 53, 192 }, { 56, 193 }, + { 66, 238 }, { 67, 172 }, { 68, 182 }, { 69, 179 }, { 70, 181 }, + { 71, 202 }, { 72, 183 }, { 73, 189 }, { 74, 196 }, { 75, 190 }, + { 76, 191 }, { 77, 204 }, { 78, 206 }, { 79, 207 }, { 83, 214 }, + { 84, 205 }, { 87, 237 }, { 89, 225 }, { 90, 217 }, { 91, 218 }, + { 92, 226 }, { 93, 227 }, { 94, 228 }, { 95, 236 }, { 97, 199 }, + { 98, 229 }, { 99, 180 }, { 100, 220 }, { 101, 239 }, { 102, 219 }, + { 103, 233 }, { 104, 234 }, { 105, 223 }, { 129, 210 }, { 130, 222 }, +}; + +static const struct msm_pinctrl_soc_data qcs8300_pinctrl = { + .pins = qcs8300_pins, + .npins = ARRAY_SIZE(qcs8300_pins), + .functions = qcs8300_functions, + .nfunctions = ARRAY_SIZE(qcs8300_functions), + .groups = qcs8300_groups, + .ngroups = ARRAY_SIZE(qcs8300_groups), + .ngpios = 133, + .wakeirq_map = qcs8300_pdc_map, + .nwakeirq_map = ARRAY_SIZE(qcs8300_pdc_map), + .egpio_func = 11, +}; + +static int qcs8300_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &qcs8300_pinctrl); +} + +static const struct of_device_id qcs8300_pinctrl_of_match[] = { + { .compatible = "qcom,qcs8300-tlmm", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcs8300_pinctrl_of_match); + +static struct platform_driver qcs8300_pinctrl_driver = { + .driver = { + .name = "qcs8300-tlmm", + .of_match_table = qcs8300_pinctrl_of_match, + }, + .probe = qcs8300_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init qcs8300_pinctrl_init(void) +{ + return platform_driver_register(&qcs8300_pinctrl_driver); +} +arch_initcall(qcs8300_pinctrl_init); + +static void __exit qcs8300_pinctrl_exit(void) +{ + platform_driver_unregister(&qcs8300_pinctrl_driver); +} +module_exit(qcs8300_pinctrl_exit); + +MODULE_DESCRIPTION("QTI QCS8300 pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c index 4d2f6f495163..b5808fcfb13c 100644 --- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c +++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c @@ -145,7 +145,7 @@ static struct platform_driver qdf2xxx_pinctrl_driver = { .acpi_match_table = qdf2xxx_acpi_ids, }, .probe = qdf2xxx_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init qdf2xxx_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c index da4f940bc8d4..47bc529ef550 100644 --- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c +++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c @@ -1248,7 +1248,7 @@ static struct platform_driver qdu1000_tlmm_driver = { .of_match_table = qdu1000_tlmm_of_match, }, .probe = qdu1000_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init qdu1000_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c index 5459c0c681a2..8fdea25d8d67 100644 --- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c +++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c @@ -1530,7 +1530,7 @@ static struct platform_driver sa8775p_pinctrl_driver = { .of_match_table = sa8775p_pinctrl_of_match, }, .probe = sa8775p_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sa8775p_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sar2130p.c b/drivers/pinctrl/qcom/pinctrl-sar2130p.c new file mode 100644 index 000000000000..19a2e37826c7 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sar2130p.c @@ -0,0 +1,1505 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Limited + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "pinctrl-msm.h" + +#define REG_SIZE 0x1000 + +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .grp = PINCTRL_PINGROUP("gpio" #id, \ + gpio##id##_pins, \ + ARRAY_SIZE(gpio##id##_pins)), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_SIZE * id, \ + .io_reg = 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = 0x8 + REG_SIZE * id, \ + .intr_status_reg = 0xc + REG_SIZE * id, \ + .intr_target_reg = 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 4, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +static const struct pinctrl_pin_desc sar2130p_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "GPIO_150"), + PINCTRL_PIN(151, "GPIO_151"), + PINCTRL_PIN(152, "GPIO_152"), + PINCTRL_PIN(153, "GPIO_153"), + PINCTRL_PIN(154, "GPIO_154"), + PINCTRL_PIN(155, "GPIO_155"), + PINCTRL_PIN(156, "SDC1_RCLK"), + PINCTRL_PIN(157, "SDC1_CLK"), + PINCTRL_PIN(158, "SDC1_CMD"), + PINCTRL_PIN(159, "SDC1_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); +DECLARE_MSM_GPIO_PINS(150); +DECLARE_MSM_GPIO_PINS(151); +DECLARE_MSM_GPIO_PINS(152); +DECLARE_MSM_GPIO_PINS(153); +DECLARE_MSM_GPIO_PINS(154); +DECLARE_MSM_GPIO_PINS(155); + +static const unsigned int sdc1_rclk_pins[] = { 156 }; +static const unsigned int sdc1_clk_pins[] = { 157 }; +static const unsigned int sdc1_cmd_pins[] = { 158 }; +static const unsigned int sdc1_data_pins[] = { 159 }; + +enum sar2130p_functions { + msm_mux_gpio, + msm_mux_aoss_cti, + msm_mux_atest_char, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_usb0, + msm_mux_atest_usb00, + msm_mux_atest_usb01, + msm_mux_atest_usb02, + msm_mux_atest_usb03, + msm_mux_audio_ref, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_i2c, + msm_mux_cci_timer0, + msm_mux_cci_timer1, + msm_mux_cci_timer2, + msm_mux_cci_timer3, + msm_mux_cci_timer4, + msm_mux_cri_trng, + msm_mux_cri_trng0, + msm_mux_cri_trng1, + msm_mux_dbg_out, + msm_mux_ddr_bist, + msm_mux_ddr_pxi0, + msm_mux_ddr_pxi1, + msm_mux_ddr_pxi2, + msm_mux_ddr_pxi3, + msm_mux_dp0_hot, + msm_mux_ext_mclk0, + msm_mux_ext_mclk1, + msm_mux_gcc_gp1, + msm_mux_gcc_gp2, + msm_mux_gcc_gp3, + msm_mux_host2wlan_sol, + msm_mux_i2s0_data0, + msm_mux_i2s0_data1, + msm_mux_i2s0_sck, + msm_mux_i2s0_ws, + msm_mux_ibi_i3c, + msm_mux_jitter_bist, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync0, + msm_mux_mdp_vsync1, + msm_mux_mdp_vsync2, + msm_mux_mdp_vsync3, + msm_mux_pcie0_clkreqn, + msm_mux_pcie1_clkreqn, + msm_mux_phase_flag0, + msm_mux_phase_flag1, + msm_mux_phase_flag10, + msm_mux_phase_flag11, + msm_mux_phase_flag12, + msm_mux_phase_flag13, + msm_mux_phase_flag14, + msm_mux_phase_flag15, + msm_mux_phase_flag16, + msm_mux_phase_flag17, + msm_mux_phase_flag18, + msm_mux_phase_flag19, + msm_mux_phase_flag2, + msm_mux_phase_flag20, + msm_mux_phase_flag21, + msm_mux_phase_flag22, + msm_mux_phase_flag23, + msm_mux_phase_flag24, + msm_mux_phase_flag25, + msm_mux_phase_flag26, + msm_mux_phase_flag27, + msm_mux_phase_flag28, + msm_mux_phase_flag29, + msm_mux_phase_flag3, + msm_mux_phase_flag30, + msm_mux_phase_flag31, + msm_mux_phase_flag4, + msm_mux_phase_flag5, + msm_mux_phase_flag6, + msm_mux_phase_flag7, + msm_mux_phase_flag8, + msm_mux_phase_flag9, + msm_mux_pll_bist, + msm_mux_pll_clk, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_qdss_cti, + msm_mux_qdss_gpio, + msm_mux_qdss_gpio0, + msm_mux_qdss_gpio1, + msm_mux_qdss_gpio10, + msm_mux_qdss_gpio11, + msm_mux_qdss_gpio12, + msm_mux_qdss_gpio13, + msm_mux_qdss_gpio14, + msm_mux_qdss_gpio15, + msm_mux_qdss_gpio2, + msm_mux_qdss_gpio3, + msm_mux_qdss_gpio4, + msm_mux_qdss_gpio5, + msm_mux_qdss_gpio6, + msm_mux_qdss_gpio7, + msm_mux_qdss_gpio8, + msm_mux_qdss_gpio9, + msm_mux_qspi0, + msm_mux_qspi1, + msm_mux_qspi2, + msm_mux_qspi3, + msm_mux_qspi_clk, + msm_mux_qspi_cs0, + msm_mux_qspi_cs1, + msm_mux_qup0, + msm_mux_qup1, + msm_mux_qup2, + msm_mux_qup3, + msm_mux_qup4, + msm_mux_qup5, + msm_mux_qup6, + msm_mux_qup7, + msm_mux_qup8, + msm_mux_qup9, + msm_mux_qup10, + msm_mux_qup11, + msm_mux_tb_trig, + msm_mux_tgu_ch0, + msm_mux_tgu_ch1, + msm_mux_tgu_ch2, + msm_mux_tgu_ch3, + msm_mux_tmess_prng0, + msm_mux_tmess_prng1, + msm_mux_tmess_prng2, + msm_mux_tmess_prng3, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_usb0_phy, + msm_mux_vsense_trigger, + msm_mux__, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146", + "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152", + "gpio153", "gpio154", "gpio155", +}; + +static const char * const aoss_cti_groups[] = { + "gpio20", "gpio21", "gpio22", "gpio23", +}; + +static const char * const atest_char_groups[] = { + "gpio45", +}; + +static const char * const atest_char0_groups[] = { + "gpio90", +}; + +static const char * const atest_char1_groups[] = { + "gpio89", +}; + +static const char * const atest_char2_groups[] = { + "gpio88", +}; + +static const char * const atest_char3_groups[] = { + "gpio87", +}; + +static const char * const atest_usb0_groups[] = { + "gpio26", +}; + +static const char * const atest_usb00_groups[] = { + "gpio110", +}; + +static const char * const atest_usb01_groups[] = { + "gpio109", +}; + +static const char * const atest_usb02_groups[] = { + "gpio27", +}; + +static const char * const atest_usb03_groups[] = { + "gpio60", +}; + +static const char * const audio_ref_groups[] = { + "gpio103", +}; + +static const char * const cam_mclk_groups[] = { + "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", + "gpio76", +}; + +static const char * const cci_async_groups[] = { + "gpio80", "gpio81", "gpio82", +}; + +static const char * const cci_i2c_groups[] = { + "gpio67", "gpio68", "gpio78", "gpio79", "gpio80", "gpio81", "gpio83", + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", + "gpio91", "gpio92", +}; + +static const char * const cci_timer0_groups[] = { + "gpio77", +}; + +static const char * const cci_timer1_groups[] = { + "gpio78", +}; + +static const char * const cci_timer2_groups[] = { + "gpio79", +}; + +static const char * const cci_timer3_groups[] = { + "gpio80", +}; + +static const char * const cci_timer4_groups[] = { + "gpio81", +}; + +static const char * const cri_trng_groups[] = { + "gpio60", +}; + +static const char * const cri_trng0_groups[] = { + "gpio70", +}; + +static const char * const cri_trng1_groups[] = { + "gpio71", +}; + +static const char * const dbg_out_groups[] = { + "gpio59", +}; + +static const char * const ddr_bist_groups[] = { + "gpio4", "gpio5", "gpio100", "gpio103", +}; + +static const char * const ddr_pxi0_groups[] = { + "gpio56", "gpio57", +}; + +static const char * const ddr_pxi1_groups[] = { + "gpio41", "gpio45", +}; + +static const char * const ddr_pxi2_groups[] = { + "gpio48", "gpio55", +}; + +static const char * const ddr_pxi3_groups[] = { + "gpio46", "gpio47", +}; + +static const char * const dp0_hot_groups[] = { + "gpio35", "gpio103", +}; + +static const char * const ext_mclk0_groups[] = { + "gpio104", +}; + +static const char * const ext_mclk1_groups[] = { + "gpio103", +}; + +static const char * const gcc_gp1_groups[] = { + "gpio129", "gpio132", +}; + +static const char * const gcc_gp2_groups[] = { + "gpio130", "gpio135", +}; + +static const char * const gcc_gp3_groups[] = { + "gpio131", "gpio136", +}; + +static const char * const host2wlan_sol_groups[] = { + "gpio111", +}; + +static const char * const i2s0_data0_groups[] = { + "gpio106", +}; + +static const char * const i2s0_data1_groups[] = { + "gpio107", +}; + +static const char * const i2s0_sck_groups[] = { + "gpio105", +}; + +static const char * const i2s0_ws_groups[] = { + "gpio108", +}; + +static const char * const ibi_i3c_groups[] = { + "gpio0", "gpio1", "gpio91", "gpio92", +}; + +static const char * const jitter_bist_groups[] = { + "gpio0", +}; + +static const char * const mdp_vsync_groups[] = { + "gpio12", "gpio13", "gpio41", "gpio49", "gpio50", +}; + +static const char * const mdp_vsync0_groups[] = { + "gpio49", +}; + +static const char * const mdp_vsync1_groups[] = { + "gpio49", +}; + +static const char * const mdp_vsync2_groups[] = { + "gpio50", +}; + +static const char * const mdp_vsync3_groups[] = { + "gpio50", +}; + +static const char * const pcie0_clkreqn_groups[] = { + "gpio56", +}; + +static const char * const pcie1_clkreqn_groups[] = { + "gpio59", +}; + +static const char * const phase_flag0_groups[] = { + "gpio133", +}; + +static const char * const phase_flag1_groups[] = { + "gpio128", +}; + +static const char * const phase_flag10_groups[] = { + "gpio94", +}; + +static const char * const phase_flag11_groups[] = { + "gpio93", +}; + +static const char * const phase_flag12_groups[] = { + "gpio134", +}; + +static const char * const phase_flag13_groups[] = { + "gpio139", +}; + +static const char * const phase_flag14_groups[] = { + "gpio138", +}; + +static const char * const phase_flag15_groups[] = { + "gpio137", +}; + +static const char * const phase_flag16_groups[] = { + "gpio62", +}; + +static const char * const phase_flag17_groups[] = { + "gpio61", +}; + +static const char * const phase_flag18_groups[] = { + "gpio41", +}; + +static const char * const phase_flag19_groups[] = { + "gpio23", +}; + +static const char * const phase_flag2_groups[] = { + "gpio127", +}; + +static const char * const phase_flag20_groups[] = { + "gpio22", +}; + +static const char * const phase_flag21_groups[] = { + "gpio21", +}; + +static const char * const phase_flag22_groups[] = { + "gpio19", +}; + +static const char * const phase_flag23_groups[] = { + "gpio18", +}; + +static const char * const phase_flag24_groups[] = { + "gpio17", +}; + +static const char * const phase_flag25_groups[] = { + "gpio16", +}; + +static const char * const phase_flag26_groups[] = { + "gpio13", +}; + +static const char * const phase_flag27_groups[] = { + "gpio12", +}; + +static const char * const phase_flag28_groups[] = { + "gpio3", +}; + +static const char * const phase_flag29_groups[] = { + "gpio2", +}; + +static const char * const phase_flag3_groups[] = { + "gpio126", +}; + +static const char * const phase_flag30_groups[] = { + "gpio149", +}; + +static const char * const phase_flag31_groups[] = { + "gpio148", +}; + +static const char * const phase_flag4_groups[] = { + "gpio151", +}; + +static const char * const phase_flag5_groups[] = { + "gpio150", +}; + +static const char * const phase_flag6_groups[] = { + "gpio98", +}; + +static const char * const phase_flag7_groups[] = { + "gpio97", +}; + +static const char * const phase_flag8_groups[] = { + "gpio96", +}; + +static const char * const phase_flag9_groups[] = { + "gpio95", +}; + +static const char * const pll_bist_groups[] = { + "gpio8", +}; + +static const char * const pll_clk_groups[] = { + "gpio54", +}; + +static const char * const prng_rosc0_groups[] = { + "gpio72", +}; + +static const char * const prng_rosc1_groups[] = { + "gpio73", +}; + +static const char * const prng_rosc2_groups[] = { + "gpio74", +}; + +static const char * const prng_rosc3_groups[] = { + "gpio75", +}; + +static const char * const qdss_cti_groups[] = { + "gpio28", "gpio29", "gpio36", "gpio37", "gpio38", "gpio38", "gpio47", + "gpio48", "gpio53", "gpio53", "gpio105", "gpio106", "gpio154", + "gpio155", +}; + +static const char * const qdss_gpio_groups[] = { + "gpio89", "gpio90", "gpio109", "gpio110", +}; + +static const char * const qdss_gpio0_groups[] = { + "gpio24", "gpio65", +}; + +static const char * const qdss_gpio1_groups[] = { + "gpio25", "gpio66", +}; + +static const char * const qdss_gpio10_groups[] = { + "gpio63", "gpio83", +}; + +static const char * const qdss_gpio11_groups[] = { + "gpio64", "gpio84", +}; + +static const char * const qdss_gpio12_groups[] = { + "gpio39", "gpio85", +}; + +static const char * const qdss_gpio13_groups[] = { + "gpio10", "gpio86", +}; + +static const char * const qdss_gpio14_groups[] = { + "gpio45", "gpio87", +}; + +static const char * const qdss_gpio15_groups[] = { + "gpio11", "gpio88", +}; + +static const char * const qdss_gpio2_groups[] = { + "gpio26", "gpio67", +}; + +static const char * const qdss_gpio3_groups[] = { + "gpio27", "gpio68", +}; + +static const char * const qdss_gpio4_groups[] = { + "gpio30", "gpio77", +}; + +static const char * const qdss_gpio5_groups[] = { + "gpio31", "gpio78", +}; + +static const char * const qdss_gpio6_groups[] = { + "gpio4", "gpio79", +}; + +static const char * const qdss_gpio7_groups[] = { + "gpio5", "gpio80", +}; + +static const char * const qdss_gpio8_groups[] = { + "gpio6", "gpio81", +}; + +static const char * const qdss_gpio9_groups[] = { + "gpio7", "gpio82", +}; + +static const char * const qspi0_groups[] = { + "gpio32", +}; + +static const char * const qspi1_groups[] = { + "gpio33", +}; + +static const char * const qspi2_groups[] = { + "gpio36", +}; + +static const char * const qspi3_groups[] = { + "gpio37", +}; + +static const char * const qspi_clk_groups[] = { + "gpio34", +}; + +static const char * const qspi_cs0_groups[] = { + "gpio35", +}; + +static const char * const qspi_cs1_groups[] = { + "gpio38", +}; + +static const char * const qup0_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio93", +}; + +static const char * const qup1_groups[] = { + "gpio2", "gpio3", "gpio61", "gpio62", +}; + +static const char * const qup2_groups[] = { + "gpio12", "gpio13", "gpio22", "gpio23", +}; + +static const char * const qup3_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19", "gpio41", +}; + +static const char * const qup4_groups[] = { + "gpio20", "gpio21", "gpio22", "gpio23", "gpio94", +}; + +static const char * const qup5_groups[] = { + "gpio95", "gpio96", "gpio97", "gpio98", +}; + +static const char * const qup6_groups[] = { + "gpio63", "gpio64", "gpio91", "gpio92", +}; + +static const char * const qup7_groups[] = { + "gpio24", "gpio25", "gpio26", "gpio27", +}; + +static const char * const qup8_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; + +static const char * const qup9_groups[] = { + "gpio34", "gpio35", "gpio109", "gpio110", +}; + +static const char * const qup10_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", +}; + +static const char * const qup11_groups[] = { + "gpio14", "gpio15", "gpio28", "gpio30", +}; + +static const char * const tb_trig_groups[] = { + "gpio69", +}; + +static const char * const tgu_ch0_groups[] = { + "gpio20", +}; + +static const char * const tgu_ch1_groups[] = { + "gpio21", +}; + +static const char * const tgu_ch2_groups[] = { + "gpio22", +}; + +static const char * const tgu_ch3_groups[] = { + "gpio23", +}; + +static const char * const tmess_prng0_groups[] = { + "gpio80", +}; + +static const char * const tmess_prng1_groups[] = { + "gpio79", +}; + +static const char * const tmess_prng2_groups[] = { + "gpio83", +}; + +static const char * const tmess_prng3_groups[] = { + "gpio81", +}; + +static const char * const tsense_pwm1_groups[] = { + "gpio86", +}; + +static const char * const tsense_pwm2_groups[] = { + "gpio86", +}; + +static const char * const usb0_phy_groups[] = { + "gpio100", +}; + +static const char * const vsense_trigger_groups[] = { + "gpio36", +}; + +static const struct pinfunction sar2130p_functions[] = { + MSM_PIN_FUNCTION(gpio), + MSM_PIN_FUNCTION(qup0), + MSM_PIN_FUNCTION(ibi_i3c), + MSM_PIN_FUNCTION(jitter_bist), + MSM_PIN_FUNCTION(qup1), + MSM_PIN_FUNCTION(phase_flag29), + MSM_PIN_FUNCTION(phase_flag28), + MSM_PIN_FUNCTION(qup10), + MSM_PIN_FUNCTION(ddr_bist), + MSM_PIN_FUNCTION(qdss_gpio6), + MSM_PIN_FUNCTION(qdss_gpio7), + MSM_PIN_FUNCTION(qdss_gpio8), + MSM_PIN_FUNCTION(qdss_gpio9), + MSM_PIN_FUNCTION(qup8), + MSM_PIN_FUNCTION(pll_bist), + MSM_PIN_FUNCTION(qdss_gpio13), + MSM_PIN_FUNCTION(qdss_gpio15), + MSM_PIN_FUNCTION(qup2), + MSM_PIN_FUNCTION(mdp_vsync), + MSM_PIN_FUNCTION(phase_flag27), + MSM_PIN_FUNCTION(phase_flag26), + MSM_PIN_FUNCTION(qup11), + MSM_PIN_FUNCTION(qup3), + MSM_PIN_FUNCTION(phase_flag25), + MSM_PIN_FUNCTION(phase_flag24), + MSM_PIN_FUNCTION(phase_flag23), + MSM_PIN_FUNCTION(phase_flag22), + MSM_PIN_FUNCTION(qup4), + MSM_PIN_FUNCTION(aoss_cti), + MSM_PIN_FUNCTION(tgu_ch0), + MSM_PIN_FUNCTION(phase_flag21), + MSM_PIN_FUNCTION(tgu_ch1), + MSM_PIN_FUNCTION(phase_flag20), + MSM_PIN_FUNCTION(tgu_ch2), + MSM_PIN_FUNCTION(phase_flag19), + MSM_PIN_FUNCTION(tgu_ch3), + MSM_PIN_FUNCTION(qup7), + MSM_PIN_FUNCTION(qdss_gpio0), + MSM_PIN_FUNCTION(qdss_gpio1), + MSM_PIN_FUNCTION(qdss_gpio2), + MSM_PIN_FUNCTION(atest_usb0), + MSM_PIN_FUNCTION(qdss_gpio3), + MSM_PIN_FUNCTION(atest_usb02), + MSM_PIN_FUNCTION(qdss_cti), + MSM_PIN_FUNCTION(qdss_gpio4), + MSM_PIN_FUNCTION(qdss_gpio5), + MSM_PIN_FUNCTION(qspi0), + MSM_PIN_FUNCTION(qspi1), + MSM_PIN_FUNCTION(qspi_clk), + MSM_PIN_FUNCTION(qup9), + MSM_PIN_FUNCTION(qspi_cs0), + MSM_PIN_FUNCTION(dp0_hot), + MSM_PIN_FUNCTION(qspi2), + MSM_PIN_FUNCTION(vsense_trigger), + MSM_PIN_FUNCTION(qspi3), + MSM_PIN_FUNCTION(qspi_cs1), + MSM_PIN_FUNCTION(qdss_gpio12), + MSM_PIN_FUNCTION(phase_flag18), + MSM_PIN_FUNCTION(ddr_pxi1), + MSM_PIN_FUNCTION(qdss_gpio14), + MSM_PIN_FUNCTION(atest_char), + MSM_PIN_FUNCTION(ddr_pxi3), + MSM_PIN_FUNCTION(ddr_pxi2), + MSM_PIN_FUNCTION(mdp_vsync0), + MSM_PIN_FUNCTION(mdp_vsync1), + MSM_PIN_FUNCTION(mdp_vsync2), + MSM_PIN_FUNCTION(mdp_vsync3), + MSM_PIN_FUNCTION(pll_clk), + MSM_PIN_FUNCTION(pcie0_clkreqn), + MSM_PIN_FUNCTION(ddr_pxi0), + MSM_PIN_FUNCTION(pcie1_clkreqn), + MSM_PIN_FUNCTION(dbg_out), + MSM_PIN_FUNCTION(cri_trng), + MSM_PIN_FUNCTION(atest_usb03), + MSM_PIN_FUNCTION(phase_flag17), + MSM_PIN_FUNCTION(phase_flag16), + MSM_PIN_FUNCTION(qup6), + MSM_PIN_FUNCTION(qdss_gpio10), + MSM_PIN_FUNCTION(qdss_gpio11), + MSM_PIN_FUNCTION(cci_i2c), + MSM_PIN_FUNCTION(cam_mclk), + MSM_PIN_FUNCTION(tb_trig), + MSM_PIN_FUNCTION(cri_trng0), + MSM_PIN_FUNCTION(cri_trng1), + MSM_PIN_FUNCTION(prng_rosc0), + MSM_PIN_FUNCTION(prng_rosc1), + MSM_PIN_FUNCTION(prng_rosc2), + MSM_PIN_FUNCTION(prng_rosc3), + MSM_PIN_FUNCTION(cci_timer0), + MSM_PIN_FUNCTION(cci_timer1), + MSM_PIN_FUNCTION(cci_timer2), + MSM_PIN_FUNCTION(tmess_prng1), + MSM_PIN_FUNCTION(cci_timer3), + MSM_PIN_FUNCTION(cci_async), + MSM_PIN_FUNCTION(tmess_prng0), + MSM_PIN_FUNCTION(cci_timer4), + MSM_PIN_FUNCTION(tmess_prng3), + MSM_PIN_FUNCTION(tmess_prng2), + MSM_PIN_FUNCTION(tsense_pwm1), + MSM_PIN_FUNCTION(tsense_pwm2), + MSM_PIN_FUNCTION(atest_char3), + MSM_PIN_FUNCTION(atest_char2), + MSM_PIN_FUNCTION(qdss_gpio), + MSM_PIN_FUNCTION(atest_char1), + MSM_PIN_FUNCTION(atest_char0), + MSM_PIN_FUNCTION(phase_flag11), + MSM_PIN_FUNCTION(phase_flag10), + MSM_PIN_FUNCTION(qup5), + MSM_PIN_FUNCTION(phase_flag9), + MSM_PIN_FUNCTION(phase_flag8), + MSM_PIN_FUNCTION(phase_flag7), + MSM_PIN_FUNCTION(phase_flag6), + MSM_PIN_FUNCTION(usb0_phy), + MSM_PIN_FUNCTION(ext_mclk1), + MSM_PIN_FUNCTION(audio_ref), + MSM_PIN_FUNCTION(ext_mclk0), + MSM_PIN_FUNCTION(i2s0_sck), + MSM_PIN_FUNCTION(i2s0_data0), + MSM_PIN_FUNCTION(i2s0_data1), + MSM_PIN_FUNCTION(i2s0_ws), + MSM_PIN_FUNCTION(atest_usb01), + MSM_PIN_FUNCTION(atest_usb00), + MSM_PIN_FUNCTION(host2wlan_sol), + MSM_PIN_FUNCTION(phase_flag3), + MSM_PIN_FUNCTION(phase_flag2), + MSM_PIN_FUNCTION(phase_flag1), + MSM_PIN_FUNCTION(gcc_gp1), + MSM_PIN_FUNCTION(gcc_gp2), + MSM_PIN_FUNCTION(gcc_gp3), + MSM_PIN_FUNCTION(phase_flag0), + MSM_PIN_FUNCTION(phase_flag12), + MSM_PIN_FUNCTION(phase_flag15), + MSM_PIN_FUNCTION(phase_flag14), + MSM_PIN_FUNCTION(phase_flag13), + MSM_PIN_FUNCTION(phase_flag31), + MSM_PIN_FUNCTION(phase_flag30), + MSM_PIN_FUNCTION(phase_flag5), + MSM_PIN_FUNCTION(phase_flag4), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup sar2130p_groups[] = { + [0] = PINGROUP(0, qup0, ibi_i3c, jitter_bist, _, _, _, _, _, _), + [1] = PINGROUP(1, qup0, ibi_i3c, _, _, _, _, _, _, _), + [2] = PINGROUP(2, qup0, qup1, phase_flag29, _, _, _, _, _, _), + [3] = PINGROUP(3, qup0, qup1, phase_flag28, _, _, _, _, _, _), + [4] = PINGROUP(4, qup10, ddr_bist, qdss_gpio6, _, _, _, _, _, _), + [5] = PINGROUP(5, qup10, ddr_bist, qdss_gpio7, _, _, _, _, _, _), + [6] = PINGROUP(6, qup10, qdss_gpio8, _, _, _, _, _, _, _), + [7] = PINGROUP(7, qup10, qdss_gpio9, _, _, _, _, _, _, _), + [8] = PINGROUP(8, qup8, pll_bist, _, _, _, _, _, _, _), + [9] = PINGROUP(9, qup8, _, _, _, _, _, _, _, _), + [10] = PINGROUP(10, qup8, qdss_gpio13, _, _, _, _, _, _, _), + [11] = PINGROUP(11, qup8, qdss_gpio15, _, _, _, _, _, _, _), + [12] = PINGROUP(12, qup2, mdp_vsync, phase_flag27, _, _, _, _, _, _), + [13] = PINGROUP(13, qup2, mdp_vsync, phase_flag26, _, _, _, _, _, _), + [14] = PINGROUP(14, qup11, _, _, _, _, _, _, _, _), + [15] = PINGROUP(15, qup11, _, _, _, _, _, _, _, _), + [16] = PINGROUP(16, qup3, phase_flag25, _, _, _, _, _, _, _), + [17] = PINGROUP(17, qup3, phase_flag24, _, _, _, _, _, _, _), + [18] = PINGROUP(18, qup3, phase_flag23, _, _, _, _, _, _, _), + [19] = PINGROUP(19, qup3, phase_flag22, _, _, _, _, _, _, _), + [20] = PINGROUP(20, qup4, aoss_cti, tgu_ch0, _, _, _, _, _, _), + [21] = PINGROUP(21, qup4, aoss_cti, phase_flag21, tgu_ch1, _, _, _, _, _), + [22] = PINGROUP(22, qup4, qup2, aoss_cti, phase_flag20, tgu_ch2, _, _, _, _), + [23] = PINGROUP(23, qup4, qup2, aoss_cti, phase_flag19, tgu_ch3, _, _, _, _), + [24] = PINGROUP(24, qup7, qdss_gpio0, _, _, _, _, _, _, _), + [25] = PINGROUP(25, qup7, qdss_gpio1, _, _, _, _, _, _, _), + [26] = PINGROUP(26, qup7, qdss_gpio2, atest_usb0, _, _, _, _, _, _), + [27] = PINGROUP(27, qup7, qdss_gpio3, atest_usb02, _, _, _, _, _, _), + [28] = PINGROUP(28, qup11, qdss_cti, _, _, _, _, _, _, _), + [29] = PINGROUP(29, qdss_cti, _, _, _, _, _, _, _, _), + [30] = PINGROUP(30, qup11, qdss_gpio4, _, _, _, _, _, _, _), + [31] = PINGROUP(31, qdss_gpio5, _, _, _, _, _, _, _, _), + [32] = PINGROUP(32, qspi0, _, _, _, _, _, _, _, _), + [33] = PINGROUP(33, qspi1, _, _, _, _, _, _, _, _), + [34] = PINGROUP(34, qspi_clk, qup9, _, _, _, _, _, _, _), + [35] = PINGROUP(35, qspi_cs0, qup9, dp0_hot, _, _, _, _, _, _), + [36] = PINGROUP(36, qspi2, qdss_cti, vsense_trigger, _, _, _, _, _, _), + [37] = PINGROUP(37, qspi3, qdss_cti, _, _, _, _, _, _, _), + [38] = PINGROUP(38, qspi_cs1, qdss_cti, qdss_cti, _, _, _, _, _, _), + [39] = PINGROUP(39, qdss_gpio12, _, _, _, _, _, _, _, _), + [40] = PINGROUP(40, _, _, _, _, _, _, _, _, _), + [41] = PINGROUP(41, qup3, mdp_vsync, phase_flag18, _, ddr_pxi1, _, _, _, _), + [42] = PINGROUP(42, _, _, _, _, _, _, _, _, _), + [43] = PINGROUP(43, _, _, _, _, _, _, _, _, _), + [44] = PINGROUP(44, _, _, _, _, _, _, _, _, _), + [45] = PINGROUP(45, qdss_gpio14, ddr_pxi1, atest_char, _, _, _, _, _, _), + [46] = PINGROUP(46, ddr_pxi3, _, _, _, _, _, _, _, _), + [47] = PINGROUP(47, qdss_cti, ddr_pxi3, _, _, _, _, _, _, _), + [48] = PINGROUP(48, qdss_cti, ddr_pxi2, _, _, _, _, _, _, _), + [49] = PINGROUP(49, mdp_vsync, mdp_vsync0, mdp_vsync1, _, _, _, _, _, _), + [50] = PINGROUP(50, mdp_vsync, mdp_vsync2, mdp_vsync3, _, _, _, _, _, _), + [51] = PINGROUP(51, _, _, _, _, _, _, _, _, _), + [52] = PINGROUP(52, _, _, _, _, _, _, _, _, _), + [53] = PINGROUP(53, qdss_cti, qdss_cti, _, _, _, _, _, _, _), + [54] = PINGROUP(54, pll_clk, _, _, _, _, _, _, _, _), + [55] = PINGROUP(55, _, ddr_pxi2, _, _, _, _, _, _, _), + [56] = PINGROUP(56, pcie0_clkreqn, _, ddr_pxi0, _, _, _, _, _, _), + [57] = PINGROUP(57, ddr_pxi0, _, _, _, _, _, _, _, _), + [58] = PINGROUP(58, _, _, _, _, _, _, _, _, _), + [59] = PINGROUP(59, pcie1_clkreqn, dbg_out, _, _, _, _, _, _, _), + [60] = PINGROUP(60, cri_trng, atest_usb03, _, _, _, _, _, _, _), + [61] = PINGROUP(61, qup1, phase_flag17, _, _, _, _, _, _, _), + [62] = PINGROUP(62, qup1, phase_flag16, _, _, _, _, _, _, _), + [63] = PINGROUP(63, qup6, qdss_gpio10, _, _, _, _, _, _, _), + [64] = PINGROUP(64, qup6, qdss_gpio11, _, _, _, _, _, _, _), + [65] = PINGROUP(65, qdss_gpio0, _, _, _, _, _, _, _, _), + [66] = PINGROUP(66, qdss_gpio1, _, _, _, _, _, _, _, _), + [67] = PINGROUP(67, cci_i2c, qdss_gpio2, _, _, _, _, _, _, _), + [68] = PINGROUP(68, cci_i2c, qdss_gpio3, _, _, _, _, _, _, _), + [69] = PINGROUP(69, cam_mclk, tb_trig, _, _, _, _, _, _, _), + [70] = PINGROUP(70, cam_mclk, cri_trng0, _, _, _, _, _, _, _), + [71] = PINGROUP(71, cam_mclk, cri_trng1, _, _, _, _, _, _, _), + [72] = PINGROUP(72, cam_mclk, prng_rosc0, _, _, _, _, _, _, _), + [73] = PINGROUP(73, cam_mclk, prng_rosc1, _, _, _, _, _, _, _), + [74] = PINGROUP(74, cam_mclk, prng_rosc2, _, _, _, _, _, _, _), + [75] = PINGROUP(75, cam_mclk, prng_rosc3, _, _, _, _, _, _, _), + [76] = PINGROUP(76, cam_mclk, _, _, _, _, _, _, _, _), + [77] = PINGROUP(77, cci_timer0, qdss_gpio4, _, _, _, _, _, _, _), + [78] = PINGROUP(78, cci_timer1, cci_i2c, qdss_gpio5, _, _, _, _, _, _), + [79] = PINGROUP(79, cci_timer2, cci_i2c, tmess_prng1, qdss_gpio6, _, _, _, _, _), + [80] = PINGROUP(80, cci_timer3, cci_i2c, cci_async, tmess_prng0, qdss_gpio7, _, _, _, _), + [81] = PINGROUP(81, cci_timer4, cci_i2c, cci_async, tmess_prng3, qdss_gpio8, _, _, _, _), + [82] = PINGROUP(82, cci_async, qdss_gpio9, _, _, _, _, _, _, _), + [83] = PINGROUP(83, cci_i2c, tmess_prng2, qdss_gpio10, _, _, _, _, _, _), + [84] = PINGROUP(84, cci_i2c, qdss_gpio11, _, _, _, _, _, _, _), + [85] = PINGROUP(85, cci_i2c, qdss_gpio12, _, _, _, _, _, _, _), + [86] = PINGROUP(86, cci_i2c, qdss_gpio13, tsense_pwm1, tsense_pwm2, _, _, _, _, _), + [87] = PINGROUP(87, cci_i2c, qdss_gpio14, atest_char3, _, _, _, _, _, _), + [88] = PINGROUP(88, cci_i2c, qdss_gpio15, atest_char2, _, _, _, _, _, _), + [89] = PINGROUP(89, cci_i2c, qdss_gpio, atest_char1, _, _, _, _, _, _), + [90] = PINGROUP(90, cci_i2c, qdss_gpio, atest_char0, _, _, _, _, _, _), + [91] = PINGROUP(91, cci_i2c, qup6, ibi_i3c, _, _, _, _, _, _), + [92] = PINGROUP(92, cci_i2c, qup6, ibi_i3c, _, _, _, _, _, _), + [93] = PINGROUP(93, qup0, phase_flag11, _, _, _, _, _, _, _), + [94] = PINGROUP(94, qup4, phase_flag10, _, _, _, _, _, _, _), + [95] = PINGROUP(95, qup5, phase_flag9, _, _, _, _, _, _, _), + [96] = PINGROUP(96, qup5, phase_flag8, _, _, _, _, _, _, _), + [97] = PINGROUP(97, qup5, phase_flag7, _, _, _, _, _, _, _), + [98] = PINGROUP(98, qup5, phase_flag6, _, _, _, _, _, _, _), + [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _), + [100] = PINGROUP(100, usb0_phy, ddr_bist, _, _, _, _, _, _, _), + [101] = PINGROUP(101, _, _, _, _, _, _, _, _, _), + [102] = PINGROUP(102, _, _, _, _, _, _, _, _, _), + [103] = PINGROUP(103, ext_mclk1, audio_ref, dp0_hot, ddr_bist, _, _, _, _, _), + [104] = PINGROUP(104, ext_mclk0, _, _, _, _, _, _, _, _), + [105] = PINGROUP(105, i2s0_sck, _, qdss_cti, _, _, _, _, _, _), + [106] = PINGROUP(106, i2s0_data0, _, qdss_cti, _, _, _, _, _, _), + [107] = PINGROUP(107, i2s0_data1, _, _, _, _, _, _, _, _), + [108] = PINGROUP(108, i2s0_ws, _, _, _, _, _, _, _, _), + [109] = PINGROUP(109, qup9, qdss_gpio, atest_usb01, _, _, _, _, _, _), + [110] = PINGROUP(110, qup9, qdss_gpio, atest_usb00, _, _, _, _, _, _), + [111] = PINGROUP(111, host2wlan_sol, _, _, _, _, _, _, _, _), + [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _), + [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _), + [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _), + [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _), + [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _), + [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _), + [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _), + [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _), + [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _), + [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _), + [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _), + [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _), + [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _), + [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _), + [126] = PINGROUP(126, phase_flag3, _, _, _, _, _, _, _, _), + [127] = PINGROUP(127, phase_flag2, _, _, _, _, _, _, _, _), + [128] = PINGROUP(128, phase_flag1, _, _, _, _, _, _, _, _), + [129] = PINGROUP(129, gcc_gp1, _, _, _, _, _, _, _, _), + [130] = PINGROUP(130, gcc_gp2, _, _, _, _, _, _, _, _), + [131] = PINGROUP(131, gcc_gp3, _, _, _, _, _, _, _, _), + [132] = PINGROUP(132, gcc_gp1, _, _, _, _, _, _, _, _), + [133] = PINGROUP(133, phase_flag0, _, _, _, _, _, _, _, _), + [134] = PINGROUP(134, phase_flag12, _, _, _, _, _, _, _, _), + [135] = PINGROUP(135, gcc_gp2, _, _, _, _, _, _, _, _), + [136] = PINGROUP(136, gcc_gp3, _, _, _, _, _, _, _, _), + [137] = PINGROUP(137, phase_flag15, _, _, _, _, _, _, _, _), + [138] = PINGROUP(138, phase_flag14, _, _, _, _, _, _, _, _), + [139] = PINGROUP(139, phase_flag13, _, _, _, _, _, _, _, _), + [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _), + [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _), + [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _), + [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _), + [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _), + [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _), + [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _), + [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _), + [148] = PINGROUP(148, phase_flag31, _, _, _, _, _, _, _, _), + [149] = PINGROUP(149, phase_flag30, _, _, _, _, _, _, _, _), + [150] = PINGROUP(150, phase_flag5, _, _, _, _, _, _, _, _), + [151] = PINGROUP(151, phase_flag4, _, _, _, _, _, _, _, _), + [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _), + [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _), + [154] = PINGROUP(154, qdss_cti, _, _, _, _, _, _, _, _), + [155] = PINGROUP(155, qdss_cti, _, _, _, _, _, _, _, _), + [156] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xa1000, 0, 0), + [157] = SDC_QDSD_PINGROUP(sdc1_clk, 0xa0000, 13, 6), + [158] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xa0000, 11, 3), + [159] = SDC_QDSD_PINGROUP(sdc1_data, 0xa0000, 9, 0), +}; + +static const struct msm_gpio_wakeirq_map sar2130p_pdc_map[] = { + { 0, 50 }, { 3, 68 }, { 6, 88 }, { 7, 55 }, { 10, 66 }, { 11, 96 }, + { 12, 48 }, { 13, 49 }, { 15, 62 }, { 18, 57 }, { 19, 59 }, { 23, 51 }, + { 27, 74 }, { 28, 67 }, { 29, 84 }, { 30, 58 }, { 31, 94 }, { 32, 60 }, + { 33, 61 }, { 35, 69 }, { 37, 70 }, { 38, 64 }, { 39, 65 }, { 40, 63 }, + { 41, 92 }, { 42, 82 }, { 44, 83 }, { 45, 43 }, { 46, 72 }, { 47, 45 }, + { 48, 44 }, { 49, 71 }, { 50, 87 }, { 53, 77 }, { 54, 78 }, + { 55, 106 }, { 56, 79 }, { 57, 80 }, { 58, 107 }, { 59, 81 }, + { 60, 89 }, { 61, 54 }, { 62, 73 }, { 63, 93 }, { 64, 86 }, { 65, 75 }, + { 67, 42 }, { 68, 76 }, { 76, 116 }, { 77, 12 }, { 83, 13 }, + { 91, 90 }, { 94, 95 }, { 95, 91 }, { 98, 47 }, { 100, 85 }, + { 101, 52 }, { 102, 53 }, { 103, 97 }, { 104, 98 }, { 105, 99 }, + { 106, 100 }, { 107, 101 }, { 108, 102 }, { 109, 103 }, { 111, 104 }, + { 113, 46 }, { 114, 56 }, { 115, 108 }, { 116, 109 }, { 117, 110 }, + { 118, 111 }, { 121, 112 }, { 122, 113 }, { 124, 114 }, { 127, 115 }, + { 132, 118 }, { 134, 119 }, { 135, 120 }, { 136, 121 }, { 139, 122 }, + { 140, 123 }, { 141, 124 }, { 143, 128 }, { 144, 129 }, { 145, 130 }, + { 146, 131 }, { 148, 132 }, { 150, 133 }, { 151, 134 }, { 153, 135 }, + { 155, 137 }, +}; + +static const struct msm_pinctrl_soc_data sar2130p_tlmm = { + .pins = sar2130p_pins, + .npins = ARRAY_SIZE(sar2130p_pins), + .functions = sar2130p_functions, + .nfunctions = ARRAY_SIZE(sar2130p_functions), + .groups = sar2130p_groups, + .ngroups = ARRAY_SIZE(sar2130p_groups), + .ngpios = 156, + .wakeirq_map = sar2130p_pdc_map, + .nwakeirq_map = ARRAY_SIZE(sar2130p_pdc_map), +}; + +static int sar2130p_tlmm_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &sar2130p_tlmm); +} + +static const struct of_device_id sar2130p_tlmm_of_match[] = { + { .compatible = "qcom,sar2130p-tlmm", .data = &sar2130p_tlmm}, + { }, +}; +MODULE_DEVICE_TABLE(of, sar2130p_tlmm_of_match); + +static struct platform_driver sar2130p_tlmm_driver = { + .driver = { + .name = "sar2130p-tlmm", + .of_match_table = sar2130p_tlmm_of_match, + }, + .probe = sar2130p_tlmm_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init sar2130p_tlmm_init(void) +{ + return platform_driver_register(&sar2130p_tlmm_driver); +} +arch_initcall(sar2130p_tlmm_init); + +static void __exit sar2130p_tlmm_exit(void) +{ + platform_driver_unregister(&sar2130p_tlmm_driver); +} +module_exit(sar2130p_tlmm_exit); + +MODULE_DESCRIPTION("QTI SAR2130P TLMM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c index c27aaa599b91..6eb0c73791c0 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7180.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c @@ -1159,7 +1159,7 @@ static struct platform_driver sc7180_pinctrl_driver = { .of_match_table = sc7180_pinctrl_of_match, }, .probe = sc7180_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sc7180_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c index 6bb39812e1d8..1161f0a91a00 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c @@ -142,7 +142,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c index c2db663e396e..0c10eeb60b55 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7280.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c @@ -1505,7 +1505,7 @@ static struct platform_driver sc7280_pinctrl_driver = { .of_match_table = sc7280_pinctrl_of_match, }, .probe = sc7280_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sc7280_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c index cfa7c8be9770..d6a79ad41a40 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c +++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c @@ -1720,7 +1720,7 @@ static struct platform_driver sc8180x_pinctrl_driver = { .acpi_match_table = sc8180x_pinctrl_acpi_match, }, .probe = sc8180x_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sc8180x_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c index c0369baf3398..0e839b6aaaf4 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c @@ -179,7 +179,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c index 4b1c49697698..96f4fb5a5d29 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c +++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c @@ -1926,7 +1926,7 @@ static struct platform_driver sc8280xp_pinctrl_driver = { .of_match_table = sc8280xp_pinctrl_of_match, }, .probe = sc8280xp_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sc8280xp_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c index b93837c00954..907e4ffca5e7 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdm660.c +++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c @@ -1442,7 +1442,7 @@ static struct platform_driver sdm660_pinctrl_driver = { .of_match_table = sdm660_pinctrl_of_match, }, .probe = sdm660_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sdm660_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c index 894c042cb524..c76183ba95e1 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdm670.c +++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c @@ -1337,7 +1337,7 @@ static struct platform_driver sdm670_pinctrl_driver = { .of_match_table = sdm670_pinctrl_of_match, }, .probe = sdm670_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sdm670_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c index 3f3265e0018d..cc05c415ed15 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdm845.c +++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c @@ -1351,7 +1351,7 @@ static struct platform_driver sdm845_pinctrl_driver = { .acpi_match_table = ACPI_PTR(sdm845_pinctrl_acpi_match), }, .probe = sdm845_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sdm845_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c index c88b8bfcacb6..8826db9d21d0 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdx55.c +++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c @@ -990,7 +990,7 @@ static struct platform_driver sdx55_pinctrl_driver = { .of_match_table = sdx55_pinctrl_of_match, }, .probe = sdx55_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sdx55_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c index bd44ec0fcab4..f6f319c997fc 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdx65.c +++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c @@ -939,7 +939,7 @@ static struct platform_driver sdx65_pinctrl_driver = { .of_match_table = sdx65_pinctrl_of_match, }, .probe = sdx65_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sdx65_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c index 396f6fc779a2..3cfe8c7f04df 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdx75.c +++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c @@ -1124,7 +1124,7 @@ static struct platform_driver sdx75_pinctrl_driver = { .of_match_table = sdx75_pinctrl_of_match, }, .probe = sdx75_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sdx75_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c index 2d2c636a3e20..c0e178be9cfc 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c @@ -227,7 +227,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c index 27317b86d835..622f20e6f6f8 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm4450.c +++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c @@ -994,7 +994,7 @@ static struct platform_driver sm4450_tlmm_driver = { .of_match_table = sm4450_tlmm_of_match, }, .probe = sm4450_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; MODULE_DEVICE_TABLE(of, sm4450_tlmm_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c index 316d6fc69131..b7d9186861a2 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c @@ -147,7 +147,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c index 7ce04144b6ed..4e91c75ad952 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm6115.c +++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c @@ -907,7 +907,7 @@ static struct platform_driver sm6115_tlmm_driver = { .of_match_table = sm6115_tlmm_of_match, }, .probe = sm6115_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm6115_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c index 65de34c30759..c188842047aa 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm6125.c +++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c @@ -1266,7 +1266,7 @@ static struct platform_driver sm6125_tlmm_driver = { .of_match_table = sm6125_tlmm_of_match, }, .probe = sm6125_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm6125_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c index 4aeb1ba43ee3..f3828c07b134 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm6350.c +++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c @@ -1373,7 +1373,7 @@ static struct platform_driver sm6350_tlmm_driver = { .of_match_table = sm6350_tlmm_of_match, }, .probe = sm6350_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm6350_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c index d86630d7125c..c82c8516932e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm6375.c +++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c @@ -1516,7 +1516,7 @@ static struct platform_driver sm6375_tlmm_driver = { .of_match_table = sm6375_tlmm_of_match, }, .probe = sm6375_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm6375_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c index 095a1ca75849..3c7fd8af6635 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm7150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c @@ -1255,7 +1255,7 @@ static struct platform_driver sm7150_tlmm_driver = { .of_match_table = sm7150_tlmm_of_match, }, .probe = sm7150_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm7150_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c index f8f5bee74f1d..01aea9c70b7a 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c @@ -1542,7 +1542,7 @@ static struct platform_driver sm8150_pinctrl_driver = { .of_match_table = sm8150_pinctrl_of_match, }, .probe = sm8150_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm8150_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c index 9791d9ba5087..64494a86490e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c @@ -140,7 +140,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c index 54fda77bf296..e9961a49ff98 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8250.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c @@ -1351,7 +1351,7 @@ static struct platform_driver sm8250_pinctrl_driver = { .of_match_table = sm8250_pinctrl_of_match, }, .probe = sm8250_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm8250_pinctrl_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c index 5b9a2cb216bd..7b146b4acfdf 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c @@ -142,7 +142,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c index ac7f2820f2cb..9c69458bd910 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8350.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c @@ -1642,7 +1642,7 @@ static struct platform_driver sm8350_tlmm_driver = { .of_match_table = sm8350_tlmm_of_match, }, .probe = sm8350_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm8350_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c index a028cbb49947..439f6541622e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c @@ -208,7 +208,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c index 617286711695..d11bb1ee9e3d 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8450.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c @@ -1677,7 +1677,7 @@ static struct platform_driver sm8450_tlmm_driver = { .of_match_table = sm8450_tlmm_of_match, }, .probe = sm8450_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm8450_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c index 852192b044e1..73065919c8c2 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c @@ -216,7 +216,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c index 9184e0183755..3c847d9cb5d9 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8550.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c @@ -1762,7 +1762,7 @@ static struct platform_driver sm8550_tlmm_driver = { .of_match_table = sm8550_tlmm_of_match, }, .probe = sm8550_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm8550_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c index 04400c832327..f9fcedf5a65d 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c @@ -223,7 +223,7 @@ static struct platform_driver lpi_pinctrl_driver = { .of_match_table = lpi_pinctrl_of_match, }, .probe = lpi_pinctrl_probe, - .remove_new = lpi_pinctrl_remove, + .remove = lpi_pinctrl_remove, }; module_platform_driver(lpi_pinctrl_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c index adaddd728662..104708252d12 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8650.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c @@ -1742,7 +1742,7 @@ static struct platform_driver sm8650_tlmm_driver = { .of_match_table = sm8650_tlmm_of_match, }, .probe = sm8650_tlmm_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init sm8650_tlmm_init(void) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c new file mode 100644 index 000000000000..1af11cd95fb0 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c @@ -0,0 +1,1729 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "pinctrl-msm.h" + +#define REG_SIZE 0x1000 + +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \ + { \ + .grp = PINCTRL_PINGROUP("gpio" #id, \ + gpio##id##_pins, \ + ARRAY_SIZE(gpio##id##_pins)), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9, \ + msm_mux_##f10, \ + msm_mux_##f11 /* egpio mode */ \ + }, \ + .nfuncs = 12, \ + .ctl_reg = REG_SIZE * id, \ + .io_reg = 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = 0x8 + REG_SIZE * id, \ + .intr_status_reg = 0xc + REG_SIZE * id, \ + .intr_target_reg = 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, ctl, io) \ + { \ + .grp = PINCTRL_PINGROUP(#pg_name, \ + pg_name##_pins, \ + ARRAY_SIZE(pg_name##_pins)), \ + .ctl_reg = ctl, \ + .io_reg = io, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +static const struct pinctrl_pin_desc sm8750_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "GPIO_150"), + PINCTRL_PIN(151, "GPIO_151"), + PINCTRL_PIN(152, "GPIO_152"), + PINCTRL_PIN(153, "GPIO_153"), + PINCTRL_PIN(154, "GPIO_154"), + PINCTRL_PIN(155, "GPIO_155"), + PINCTRL_PIN(156, "GPIO_156"), + PINCTRL_PIN(157, "GPIO_157"), + PINCTRL_PIN(158, "GPIO_158"), + PINCTRL_PIN(159, "GPIO_159"), + PINCTRL_PIN(160, "GPIO_160"), + PINCTRL_PIN(161, "GPIO_161"), + PINCTRL_PIN(162, "GPIO_162"), + PINCTRL_PIN(163, "GPIO_163"), + PINCTRL_PIN(164, "GPIO_164"), + PINCTRL_PIN(165, "GPIO_165"), + PINCTRL_PIN(166, "GPIO_166"), + PINCTRL_PIN(167, "GPIO_167"), + PINCTRL_PIN(168, "GPIO_168"), + PINCTRL_PIN(169, "GPIO_169"), + PINCTRL_PIN(170, "GPIO_170"), + PINCTRL_PIN(171, "GPIO_171"), + PINCTRL_PIN(172, "GPIO_172"), + PINCTRL_PIN(173, "GPIO_173"), + PINCTRL_PIN(174, "GPIO_174"), + PINCTRL_PIN(175, "GPIO_175"), + PINCTRL_PIN(176, "GPIO_176"), + PINCTRL_PIN(177, "GPIO_177"), + PINCTRL_PIN(178, "GPIO_178"), + PINCTRL_PIN(179, "GPIO_179"), + PINCTRL_PIN(180, "GPIO_180"), + PINCTRL_PIN(181, "GPIO_181"), + PINCTRL_PIN(182, "GPIO_182"), + PINCTRL_PIN(183, "GPIO_183"), + PINCTRL_PIN(184, "GPIO_184"), + PINCTRL_PIN(185, "GPIO_185"), + PINCTRL_PIN(186, "GPIO_186"), + PINCTRL_PIN(187, "GPIO_187"), + PINCTRL_PIN(188, "GPIO_188"), + PINCTRL_PIN(189, "GPIO_189"), + PINCTRL_PIN(190, "GPIO_190"), + PINCTRL_PIN(191, "GPIO_191"), + PINCTRL_PIN(192, "GPIO_192"), + PINCTRL_PIN(193, "GPIO_193"), + PINCTRL_PIN(194, "GPIO_194"), + PINCTRL_PIN(195, "GPIO_195"), + PINCTRL_PIN(196, "GPIO_196"), + PINCTRL_PIN(197, "GPIO_197"), + PINCTRL_PIN(198, "GPIO_198"), + PINCTRL_PIN(199, "GPIO_199"), + PINCTRL_PIN(200, "GPIO_200"), + PINCTRL_PIN(201, "GPIO_201"), + PINCTRL_PIN(202, "GPIO_202"), + PINCTRL_PIN(203, "GPIO_203"), + PINCTRL_PIN(204, "GPIO_204"), + PINCTRL_PIN(205, "GPIO_205"), + PINCTRL_PIN(206, "GPIO_206"), + PINCTRL_PIN(207, "GPIO_207"), + PINCTRL_PIN(208, "GPIO_208"), + PINCTRL_PIN(209, "GPIO_209"), + PINCTRL_PIN(210, "GPIO_210"), + PINCTRL_PIN(211, "GPIO_211"), + PINCTRL_PIN(212, "GPIO_212"), + PINCTRL_PIN(213, "GPIO_213"), + PINCTRL_PIN(214, "GPIO_214"), + PINCTRL_PIN(215, "UFS_RESET"), + PINCTRL_PIN(216, "SDC2_CLK"), + PINCTRL_PIN(217, "SDC2_CMD"), + PINCTRL_PIN(218, "SDC2_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); +DECLARE_MSM_GPIO_PINS(150); +DECLARE_MSM_GPIO_PINS(151); +DECLARE_MSM_GPIO_PINS(152); +DECLARE_MSM_GPIO_PINS(153); +DECLARE_MSM_GPIO_PINS(154); +DECLARE_MSM_GPIO_PINS(155); +DECLARE_MSM_GPIO_PINS(156); +DECLARE_MSM_GPIO_PINS(157); +DECLARE_MSM_GPIO_PINS(158); +DECLARE_MSM_GPIO_PINS(159); +DECLARE_MSM_GPIO_PINS(160); +DECLARE_MSM_GPIO_PINS(161); +DECLARE_MSM_GPIO_PINS(162); +DECLARE_MSM_GPIO_PINS(163); +DECLARE_MSM_GPIO_PINS(164); +DECLARE_MSM_GPIO_PINS(165); +DECLARE_MSM_GPIO_PINS(166); +DECLARE_MSM_GPIO_PINS(167); +DECLARE_MSM_GPIO_PINS(168); +DECLARE_MSM_GPIO_PINS(169); +DECLARE_MSM_GPIO_PINS(170); +DECLARE_MSM_GPIO_PINS(171); +DECLARE_MSM_GPIO_PINS(172); +DECLARE_MSM_GPIO_PINS(173); +DECLARE_MSM_GPIO_PINS(174); +DECLARE_MSM_GPIO_PINS(175); +DECLARE_MSM_GPIO_PINS(176); +DECLARE_MSM_GPIO_PINS(177); +DECLARE_MSM_GPIO_PINS(178); +DECLARE_MSM_GPIO_PINS(179); +DECLARE_MSM_GPIO_PINS(180); +DECLARE_MSM_GPIO_PINS(181); +DECLARE_MSM_GPIO_PINS(182); +DECLARE_MSM_GPIO_PINS(183); +DECLARE_MSM_GPIO_PINS(184); +DECLARE_MSM_GPIO_PINS(185); +DECLARE_MSM_GPIO_PINS(186); +DECLARE_MSM_GPIO_PINS(187); +DECLARE_MSM_GPIO_PINS(188); +DECLARE_MSM_GPIO_PINS(189); +DECLARE_MSM_GPIO_PINS(190); +DECLARE_MSM_GPIO_PINS(191); +DECLARE_MSM_GPIO_PINS(192); +DECLARE_MSM_GPIO_PINS(193); +DECLARE_MSM_GPIO_PINS(194); +DECLARE_MSM_GPIO_PINS(195); +DECLARE_MSM_GPIO_PINS(196); +DECLARE_MSM_GPIO_PINS(197); +DECLARE_MSM_GPIO_PINS(198); +DECLARE_MSM_GPIO_PINS(199); +DECLARE_MSM_GPIO_PINS(200); +DECLARE_MSM_GPIO_PINS(201); +DECLARE_MSM_GPIO_PINS(202); +DECLARE_MSM_GPIO_PINS(203); +DECLARE_MSM_GPIO_PINS(204); +DECLARE_MSM_GPIO_PINS(205); +DECLARE_MSM_GPIO_PINS(206); +DECLARE_MSM_GPIO_PINS(207); +DECLARE_MSM_GPIO_PINS(208); +DECLARE_MSM_GPIO_PINS(209); +DECLARE_MSM_GPIO_PINS(210); +DECLARE_MSM_GPIO_PINS(211); +DECLARE_MSM_GPIO_PINS(212); +DECLARE_MSM_GPIO_PINS(213); +DECLARE_MSM_GPIO_PINS(214); + +static const unsigned int ufs_reset_pins[] = { 215 }; +static const unsigned int sdc2_clk_pins[] = { 216 }; +static const unsigned int sdc2_cmd_pins[] = { 217 }; +static const unsigned int sdc2_data_pins[] = { 218 }; + +enum sm8750_functions { + msm_mux_gpio, + msm_mux_aoss_cti, + msm_mux_atest_char, + msm_mux_atest_usb, + msm_mux_audio_ext_mclk0, + msm_mux_audio_ext_mclk1, + msm_mux_audio_ref_clk, + msm_mux_cam_aon_mclk2, + msm_mux_cam_aon_mclk4, + msm_mux_cam_mclk, + msm_mux_cci_async_in, + msm_mux_cci_i2c_scl, + msm_mux_cci_i2c_sda, + msm_mux_cci_timer, + msm_mux_cmu_rng, + msm_mux_coex_uart1_rx, + msm_mux_coex_uart1_tx, + msm_mux_coex_uart2_rx, + msm_mux_coex_uart2_tx, + msm_mux_dbg_out_clk, + msm_mux_ddr_bist_complete, + msm_mux_ddr_bist_fail, + msm_mux_ddr_bist_start, + msm_mux_ddr_bist_stop, + msm_mux_ddr_pxi0, + msm_mux_ddr_pxi1, + msm_mux_ddr_pxi2, + msm_mux_ddr_pxi3, + msm_mux_dp_hot, + msm_mux_egpio, + msm_mux_gcc_gp1, + msm_mux_gcc_gp2, + msm_mux_gcc_gp3, + msm_mux_gnss_adc0, + msm_mux_gnss_adc1, + msm_mux_i2chub0_se0, + msm_mux_i2chub0_se1, + msm_mux_i2chub0_se2, + msm_mux_i2chub0_se3, + msm_mux_i2chub0_se4, + msm_mux_i2chub0_se5, + msm_mux_i2chub0_se6, + msm_mux_i2chub0_se7, + msm_mux_i2chub0_se8, + msm_mux_i2chub0_se9, + msm_mux_i2s0_data0, + msm_mux_i2s0_data1, + msm_mux_i2s0_sck, + msm_mux_i2s0_ws, + msm_mux_i2s1_data0, + msm_mux_i2s1_data1, + msm_mux_i2s1_sck, + msm_mux_i2s1_ws, + msm_mux_ibi_i3c, + msm_mux_jitter_bist, + msm_mux_mdp_esync0_out, + msm_mux_mdp_esync1_out, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync0_out, + msm_mux_mdp_vsync1_out, + msm_mux_mdp_vsync2_out, + msm_mux_mdp_vsync3_out, + msm_mux_mdp_vsync5_out, + msm_mux_mdp_vsync_e, + msm_mux_nav_gpio0, + msm_mux_nav_gpio1, + msm_mux_nav_gpio2, + msm_mux_nav_gpio3, + msm_mux_pcie0_clk_req_n, + msm_mux_phase_flag, + msm_mux_pll_bist_sync, + msm_mux_pll_clk_aux, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_qdss_cti, + msm_mux_qlink_big_enable, + msm_mux_qlink_big_request, + msm_mux_qlink_little_enable, + msm_mux_qlink_little_request, + msm_mux_qlink_wmss, + msm_mux_qspi0, + msm_mux_qspi1, + msm_mux_qspi2, + msm_mux_qspi3, + msm_mux_qspi_clk, + msm_mux_qspi_cs, + msm_mux_qup1_se0, + msm_mux_qup1_se1, + msm_mux_qup1_se2, + msm_mux_qup1_se3, + msm_mux_qup1_se4, + msm_mux_qup1_se5, + msm_mux_qup1_se6, + msm_mux_qup1_se7, + msm_mux_qup2_se0, + msm_mux_qup2_se1, + msm_mux_qup2_se2, + msm_mux_qup2_se3, + msm_mux_qup2_se4, + msm_mux_qup2_se5, + msm_mux_qup2_se6, + msm_mux_qup2_se7, + msm_mux_sd_write_protect, + msm_mux_sdc40, + msm_mux_sdc41, + msm_mux_sdc42, + msm_mux_sdc43, + msm_mux_sdc4_clk, + msm_mux_sdc4_cmd, + msm_mux_tb_trig_sdc2, + msm_mux_tb_trig_sdc4, + msm_mux_tmess_prng0, + msm_mux_tmess_prng1, + msm_mux_tmess_prng2, + msm_mux_tmess_prng3, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_tsense_pwm3, + msm_mux_tsense_pwm4, + msm_mux_uim0_clk, + msm_mux_uim0_data, + msm_mux_uim0_present, + msm_mux_uim0_reset, + msm_mux_uim1_clk, + msm_mux_uim1_data, + msm_mux_uim1_present, + msm_mux_uim1_reset, + msm_mux_usb1_hs, + msm_mux_usb_phy, + msm_mux_vfr_0, + msm_mux_vfr_1, + msm_mux_vsense_trigger_mirnat, + msm_mux_wcn_sw, + msm_mux_wcn_sw_ctrl, + msm_mux__, +}; + +static const char *const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", + "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", + "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", + "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", + "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", + "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", + "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", + "gpio54", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", + "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", + "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", + "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", + "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", + "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", + "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107", + "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113", + "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119", + "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125", + "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", + "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137", + "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143", + "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149", + "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155", + "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161", + "gpio162", "gpio163", "gpio164", "gpio165", "gpio166", "gpio167", + "gpio168", "gpio169", "gpio170", "gpio171", "gpio172", "gpio173", + "gpio174", "gpio175", "gpio176", "gpio177", "gpio178", "gpio179", + "gpio180", "gpio181", "gpio182", "gpio183", "gpio184", "gpio185", + "gpio186", "gpio187", "gpio188", "gpio189", "gpio190", "gpio191", + "gpio192", "gpio193", "gpio194", "gpio195", "gpio196", "gpio197", + "gpio198", "gpio199", "gpio200", "gpio201", "gpio202", "gpio203", + "gpio204", "gpio205", "gpio206", "gpio207", "gpio208", "gpio209", + "gpio210", "gpio211", "gpio212", "gpio213", "gpio214", +}; + +static const char *const egpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", + "gpio6", "gpio7", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio105", "gpio106", "gpio107", "gpio108", + "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170", + "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176", + "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182", + "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188", + "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194", + "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200", + "gpio201", "gpio202", "gpio203", "gpio204", "gpio205", "gpio206", + "gpio207", "gpio208", "gpio209", "gpio210", "gpio211", "gpio212", + "gpio213", "gpio214", +}; + +static const char *const aoss_cti_groups[] = { + "gpio50", "gpio51", "gpio60", "gpio61", +}; + +static const char *const atest_char_groups[] = { + "gpio130", "gpio131", "gpio132", "gpio133", "gpio137", +}; + +static const char *const atest_usb_groups[] = { + "gpio70", "gpio71", "gpio72", "gpio73", "gpio76", +}; + +static const char *const audio_ext_mclk0_groups[] = { + "gpio125", +}; + +static const char *const audio_ext_mclk1_groups[] = { + "gpio124", +}; + +static const char *const audio_ref_clk_groups[] = { + "gpio124", +}; + +static const char *const cam_aon_mclk2_groups[] = { + "gpio91", +}; + +static const char *const cam_aon_mclk4_groups[] = { + "gpio93", +}; + +static const char *const cam_mclk_groups[] = { + "gpio89", "gpio90", "gpio92", "gpio94", "gpio95", "gpio96", +}; + +static const char *const cci_async_in_groups[] = { + "gpio10", "gpio11", "gpio15", +}; + +static const char *const cci_i2c_scl_groups[] = { + "gpio114", "gpio116", "gpio118", "gpio120", "gpio153", "gpio164", +}; + +static const char *const cci_i2c_sda_groups[] = { + "gpio111", "gpio112", "gpio113", "gpio115", "gpio117", "gpio119", +}; + +static const char *const cci_timer_groups[] = { + "gpio109", "gpio110", "gpio111", "gpio163", "gpio164", +}; + +static const char *const cmu_rng_groups[] = { + "gpio40", "gpio41", "gpio41", "gpio43", "gpio148", "gpio149", + "gpio150", "gpio151", +}; + +static const char *const coex_uart1_rx_groups[] = { + "gpio148", +}; + +static const char *const coex_uart1_tx_groups[] = { + "gpio149", +}; + +static const char *const coex_uart2_rx_groups[] = { + "gpio150", +}; + +static const char *const coex_uart2_tx_groups[] = { + "gpio151", +}; + +static const char *const dbg_out_clk_groups[] = { + "gpio78", +}; + +static const char *const ddr_bist_complete_groups[] = { + "gpio44", +}; + +static const char *const ddr_bist_fail_groups[] = { + "gpio40", +}; + +static const char *const ddr_bist_start_groups[] = { + "gpio41", +}; + +static const char *const ddr_bist_stop_groups[] = { + "gpio45", +}; + +static const char *const ddr_pxi0_groups[] = { + "gpio54", "gpio55", +}; + +static const char *const ddr_pxi1_groups[] = { + "gpio44", "gpio45", +}; + +static const char *const ddr_pxi2_groups[] = { + "gpio43", "gpio52", +}; + +static const char *const ddr_pxi3_groups[] = { + "gpio46", "gpio53", +}; + +static const char *const dp_hot_groups[] = { + "gpio47", +}; + +static const char *const gcc_gp1_groups[] = { + "gpio86", "gpio134", +}; + +static const char *const gcc_gp2_groups[] = { + "gpio87", "gpio135", +}; + +static const char *const gcc_gp3_groups[] = { + "gpio88", "gpio136", +}; + +static const char *const gnss_adc0_groups[] = { + "gpio78", "gpio79", +}; + +static const char *const gnss_adc1_groups[] = { + "gpio77", "gpio99", +}; + +static const char *const i2chub0_se0_groups[] = { + "gpio64", "gpio65", +}; + +static const char *const i2chub0_se1_groups[] = { + "gpio66", "gpio67", +}; + +static const char *const i2chub0_se2_groups[] = { + "gpio68", "gpio69", +}; + +static const char *const i2chub0_se3_groups[] = { + "gpio70", "gpio71", +}; + +static const char *const i2chub0_se4_groups[] = { + "gpio72", "gpio73", +}; + +static const char *const i2chub0_se5_groups[] = { + "gpio74", "gpio75", +}; + +static const char *const i2chub0_se6_groups[] = { + "gpio76", "gpio77", +}; + +static const char *const i2chub0_se7_groups[] = { + "gpio82", "gpio83", +}; + +static const char *const i2chub0_se8_groups[] = { + "gpio206", "gpio207", +}; + +static const char *const i2chub0_se9_groups[] = { + "gpio80", "gpio81", +}; + +static const char *const i2s0_data0_groups[] = { + "gpio127", +}; + +static const char *const i2s0_data1_groups[] = { + "gpio128", +}; + +static const char *const i2s0_sck_groups[] = { + "gpio126", +}; + +static const char *const i2s0_ws_groups[] = { + "gpio129", +}; + +static const char *const i2s1_data0_groups[] = { + "gpio122", +}; + +static const char *const i2s1_data1_groups[] = { + "gpio124", +}; + +static const char *const i2s1_sck_groups[] = { + "gpio121", +}; + +static const char *const i2s1_ws_groups[] = { + "gpio123", +}; + +static const char *const ibi_i3c_groups[] = { + "gpio0", "gpio1", "gpio4", "gpio5", "gpio8", "gpio9", + "gpio12", "gpio13", "gpio28", "gpio29", "gpio32", "gpio33", + "gpio36", "gpio37", "gpio48", "gpio49", +}; + +static const char *const jitter_bist_groups[] = { + "gpio73", +}; + +static const char *const mdp_esync0_out_groups[] = { + "gpio88", +}; + +static const char *const mdp_esync1_out_groups[] = { + "gpio100", +}; + +static const char *const mdp_vsync_groups[] = { + "gpio86", "gpio87", "gpio97", "gpio98", +}; + +static const char *const mdp_vsync0_out_groups[] = { + "gpio86", +}; + +static const char *const mdp_vsync1_out_groups[] = { + "gpio86", +}; + +static const char *const mdp_vsync2_out_groups[] = { + "gpio87", +}; + +static const char *const mdp_vsync3_out_groups[] = { + "gpio87", +}; + +static const char *const mdp_vsync5_out_groups[] = { + "gpio87", +}; + +static const char *const mdp_vsync_e_groups[] = { + "gpio88", +}; + +static const char *const nav_gpio0_groups[] = { + "gpio154", +}; + +static const char *const nav_gpio1_groups[] = { + "gpio155", +}; + +static const char *const nav_gpio2_groups[] = { + "gpio152", +}; + +static const char *const nav_gpio3_groups[] = { + "gpio154", +}; + +static const char *const pcie0_clk_req_n_groups[] = { + "gpio103", +}; + +static const char *const phase_flag_groups[] = { + "gpio10", "gpio11", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", + "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", "gpio24", "gpio25", + "gpio26", "gpio27", "gpio28", "gpio29", "gpio31", "gpio64", "gpio65", + "gpio66", "gpio67", "gpio68", "gpio69", "gpio82", "gpio83", "gpio85", + "gpio101", "gpio102", "gpio103", "gpio104", +}; + +static const char *const pll_bist_sync_groups[] = { + "gpio104", +}; + +static const char *const pll_clk_aux_groups[] = { + "gpio95", +}; + +static const char *const prng_rosc0_groups[] = { + "gpio85", +}; + +static const char *const prng_rosc1_groups[] = { + "gpio64", +}; + +static const char *const prng_rosc2_groups[] = { + "gpio65", +}; + +static const char *const prng_rosc3_groups[] = { + "gpio66", +}; + +static const char *const qdss_cti_groups[] = { + "gpio27", "gpio31", "gpio72", "gpio73", "gpio82", "gpio83", "gpio159", + "gpio162", +}; + +static const char *const qlink_big_enable_groups[] = { + "gpio160", +}; + +static const char *const qlink_big_request_groups[] = { + "gpio159", +}; + +static const char *const qlink_little_enable_groups[] = { + "gpio157", +}; + +static const char *const qlink_little_request_groups[] = { + "gpio156", +}; + +static const char *const qlink_wmss_groups[] = { + "gpio158", +}; + +static const char *const qspi0_groups[] = { + "gpio52", +}; + +static const char *const qspi1_groups[] = { + "gpio53", +}; + +static const char *const qspi2_groups[] = { + "gpio55", +}; + +static const char *const qspi3_groups[] = { + "gpio56", +}; + +static const char *const qspi_clk_groups[] = { + "gpio54", +}; + +static const char *const qspi_cs_groups[] = { + "gpio57", "gpio58", +}; + +static const char *const qup1_se0_groups[] = { + "gpio32", "gpio33", "gpio34", "gpio35", +}; + +static const char *const qup1_se1_groups[] = { + "gpio36", "gpio37", "gpio38", "gpio39", +}; + +static const char *const qup1_se2_groups[] = { + "gpio40", "gpio41", "gpio42", "gpio43", "gpio134", "gpio135", "gpio136", +}; + +static const char *const qup1_se3_groups[] = { + "gpio44", "gpio45", "gpio46", "gpio47", +}; + +static const char *const qup1_se4_groups[] = { + "gpio48", "gpio49", "gpio50", "gpio51", +}; + +static const char *const qup1_se5_groups[] = { + "gpio52", "gpio53", "gpio54", "gpio55", +}; + +static const char *const qup1_se6_groups[] = { + "gpio56", "gpio57", "gpio58", "gpio59", +}; + +static const char *const qup1_se7_groups[] = { + "gpio60", "gpio61", "gpio62", "gpio63", +}; + +static const char *const qup2_se0_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; + +static const char *const qup2_se1_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", +}; + +static const char *const qup2_se2_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio15", +}; + +static const char *const qup2_se3_groups[] = { + "gpio12", "gpio13", "gpio14", "gpio15", +}; + +static const char *const qup2_se4_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19", +}; + +static const char *const qup2_se5_groups[] = { + "gpio20", "gpio21", "gpio22", "gpio23", +}; + +static const char *const qup2_se6_groups[] = { + "gpio24", "gpio25", "gpio26", "gpio27", +}; + +static const char *const qup2_se7_groups[] = { + "gpio28", "gpio29", "gpio30", "gpio31", +}; + +static const char *const sd_write_protect_groups[] = { + "gpio85", +}; + +static const char *const sdc40_groups[] = { + "gpio36", "gpio49", +}; + +static const char *const sdc41_groups[] = { + "gpio37", "gpio51", +}; + +static const char *const sdc42_groups[] = { + "gpio38", "gpio60", +}; + +static const char *const sdc43_groups[] = { + "gpio39", "gpio61", +}; + +static const char *const sdc4_clk_groups[] = { + "gpio50", "gpio150", +}; + +static const char *const sdc4_cmd_groups[] = { + "gpio48", "gpio151", +}; + +static const char *const tb_trig_sdc2_groups[] = { + "gpio89", +}; + +static const char *const tb_trig_sdc4_groups[] = { + "gpio147", +}; + +static const char *const tmess_prng0_groups[] = { + "gpio85", +}; + +static const char *const tmess_prng1_groups[] = { + "gpio64", +}; + +static const char *const tmess_prng2_groups[] = { + "gpio65", +}; + +static const char *const tmess_prng3_groups[] = { + "gpio66", +}; + +static const char *const tsense_pwm1_groups[] = { + "gpio57", +}; + +static const char *const tsense_pwm2_groups[] = { + "gpio57", +}; + +static const char *const tsense_pwm3_groups[] = { + "gpio57", +}; + +static const char *const tsense_pwm4_groups[] = { + "gpio57", +}; + +static const char *const uim0_clk_groups[] = { + "gpio131", +}; + +static const char *const uim0_data_groups[] = { + "gpio130", +}; + +static const char *const uim0_present_groups[] = { + "gpio133", +}; + +static const char *const uim0_reset_groups[] = { + "gpio132", +}; + +static const char *const uim1_clk_groups[] = { + "gpio37", "gpio55", "gpio71", "gpio135", +}; + +static const char *const uim1_data_groups[] = { + "gpio134", "gpio36", "gpio54", "gpio70", +}; + +static const char *const uim1_present_groups[] = { + "gpio137", +}; + +static const char *const uim1_reset_groups[] = { + "gpio39", "gpio56", "gpio72", "gpio136", +}; + +static const char *const usb1_hs_groups[] = { + "gpio79", +}; + +static const char *const usb_phy_groups[] = { + "gpio59", "gpio61", +}; + +static const char *const vfr_0_groups[] = { + "gpio150", +}; + +static const char *const vfr_1_groups[] = { + "gpio155", +}; + +static const char *const vsense_trigger_mirnat_groups[] = { + "gpio59", +}; + +static const char *const wcn_sw_groups[] = { + "gpio19", +}; + +static const char *const wcn_sw_ctrl_groups[] = { + "gpio18", +}; + +static const struct pinfunction sm8750_functions[] = { + MSM_PIN_FUNCTION(gpio), + MSM_PIN_FUNCTION(aoss_cti), + MSM_PIN_FUNCTION(atest_char), + MSM_PIN_FUNCTION(atest_usb), + MSM_PIN_FUNCTION(audio_ext_mclk0), + MSM_PIN_FUNCTION(audio_ext_mclk1), + MSM_PIN_FUNCTION(audio_ref_clk), + MSM_PIN_FUNCTION(cam_aon_mclk2), + MSM_PIN_FUNCTION(cam_aon_mclk4), + MSM_PIN_FUNCTION(cam_mclk), + MSM_PIN_FUNCTION(cci_async_in), + MSM_PIN_FUNCTION(cci_i2c_scl), + MSM_PIN_FUNCTION(cci_i2c_sda), + MSM_PIN_FUNCTION(cci_timer), + MSM_PIN_FUNCTION(cmu_rng), + MSM_PIN_FUNCTION(coex_uart1_rx), + MSM_PIN_FUNCTION(coex_uart1_tx), + MSM_PIN_FUNCTION(coex_uart2_rx), + MSM_PIN_FUNCTION(coex_uart2_tx), + MSM_PIN_FUNCTION(dbg_out_clk), + MSM_PIN_FUNCTION(ddr_bist_complete), + MSM_PIN_FUNCTION(ddr_bist_fail), + MSM_PIN_FUNCTION(ddr_bist_start), + MSM_PIN_FUNCTION(ddr_bist_stop), + MSM_PIN_FUNCTION(ddr_pxi0), + MSM_PIN_FUNCTION(ddr_pxi1), + MSM_PIN_FUNCTION(ddr_pxi2), + MSM_PIN_FUNCTION(ddr_pxi3), + MSM_PIN_FUNCTION(dp_hot), + MSM_PIN_FUNCTION(egpio), + MSM_PIN_FUNCTION(gcc_gp1), + MSM_PIN_FUNCTION(gcc_gp2), + MSM_PIN_FUNCTION(gcc_gp3), + MSM_PIN_FUNCTION(gnss_adc0), + MSM_PIN_FUNCTION(gnss_adc1), + MSM_PIN_FUNCTION(i2chub0_se0), + MSM_PIN_FUNCTION(i2chub0_se1), + MSM_PIN_FUNCTION(i2chub0_se2), + MSM_PIN_FUNCTION(i2chub0_se3), + MSM_PIN_FUNCTION(i2chub0_se4), + MSM_PIN_FUNCTION(i2chub0_se5), + MSM_PIN_FUNCTION(i2chub0_se6), + MSM_PIN_FUNCTION(i2chub0_se7), + MSM_PIN_FUNCTION(i2chub0_se8), + MSM_PIN_FUNCTION(i2chub0_se9), + MSM_PIN_FUNCTION(i2s0_data0), + MSM_PIN_FUNCTION(i2s0_data1), + MSM_PIN_FUNCTION(i2s0_sck), + MSM_PIN_FUNCTION(i2s0_ws), + MSM_PIN_FUNCTION(i2s1_data0), + MSM_PIN_FUNCTION(i2s1_data1), + MSM_PIN_FUNCTION(i2s1_sck), + MSM_PIN_FUNCTION(i2s1_ws), + MSM_PIN_FUNCTION(ibi_i3c), + MSM_PIN_FUNCTION(jitter_bist), + MSM_PIN_FUNCTION(mdp_esync0_out), + MSM_PIN_FUNCTION(mdp_esync1_out), + MSM_PIN_FUNCTION(mdp_vsync), + MSM_PIN_FUNCTION(mdp_vsync0_out), + MSM_PIN_FUNCTION(mdp_vsync1_out), + MSM_PIN_FUNCTION(mdp_vsync2_out), + MSM_PIN_FUNCTION(mdp_vsync3_out), + MSM_PIN_FUNCTION(mdp_vsync5_out), + MSM_PIN_FUNCTION(mdp_vsync_e), + MSM_PIN_FUNCTION(nav_gpio0), + MSM_PIN_FUNCTION(nav_gpio1), + MSM_PIN_FUNCTION(nav_gpio2), + MSM_PIN_FUNCTION(nav_gpio3), + MSM_PIN_FUNCTION(pcie0_clk_req_n), + MSM_PIN_FUNCTION(phase_flag), + MSM_PIN_FUNCTION(pll_bist_sync), + MSM_PIN_FUNCTION(pll_clk_aux), + MSM_PIN_FUNCTION(prng_rosc0), + MSM_PIN_FUNCTION(prng_rosc1), + MSM_PIN_FUNCTION(prng_rosc2), + MSM_PIN_FUNCTION(prng_rosc3), + MSM_PIN_FUNCTION(qdss_cti), + MSM_PIN_FUNCTION(qlink_big_enable), + MSM_PIN_FUNCTION(qlink_big_request), + MSM_PIN_FUNCTION(qlink_little_enable), + MSM_PIN_FUNCTION(qlink_little_request), + MSM_PIN_FUNCTION(qlink_wmss), + MSM_PIN_FUNCTION(qspi0), + MSM_PIN_FUNCTION(qspi1), + MSM_PIN_FUNCTION(qspi2), + MSM_PIN_FUNCTION(qspi3), + MSM_PIN_FUNCTION(qspi_clk), + MSM_PIN_FUNCTION(qspi_cs), + MSM_PIN_FUNCTION(qup1_se0), + MSM_PIN_FUNCTION(qup1_se1), + MSM_PIN_FUNCTION(qup1_se2), + MSM_PIN_FUNCTION(qup1_se3), + MSM_PIN_FUNCTION(qup1_se4), + MSM_PIN_FUNCTION(qup1_se5), + MSM_PIN_FUNCTION(qup1_se6), + MSM_PIN_FUNCTION(qup1_se7), + MSM_PIN_FUNCTION(qup2_se0), + MSM_PIN_FUNCTION(qup2_se1), + MSM_PIN_FUNCTION(qup2_se2), + MSM_PIN_FUNCTION(qup2_se3), + MSM_PIN_FUNCTION(qup2_se4), + MSM_PIN_FUNCTION(qup2_se5), + MSM_PIN_FUNCTION(qup2_se6), + MSM_PIN_FUNCTION(qup2_se7), + MSM_PIN_FUNCTION(sd_write_protect), + MSM_PIN_FUNCTION(sdc40), + MSM_PIN_FUNCTION(sdc41), + MSM_PIN_FUNCTION(sdc42), + MSM_PIN_FUNCTION(sdc43), + MSM_PIN_FUNCTION(sdc4_clk), + MSM_PIN_FUNCTION(sdc4_cmd), + MSM_PIN_FUNCTION(tb_trig_sdc2), + MSM_PIN_FUNCTION(tb_trig_sdc4), + MSM_PIN_FUNCTION(tmess_prng0), + MSM_PIN_FUNCTION(tmess_prng1), + MSM_PIN_FUNCTION(tmess_prng2), + MSM_PIN_FUNCTION(tmess_prng3), + MSM_PIN_FUNCTION(tsense_pwm1), + MSM_PIN_FUNCTION(tsense_pwm2), + MSM_PIN_FUNCTION(tsense_pwm3), + MSM_PIN_FUNCTION(tsense_pwm4), + MSM_PIN_FUNCTION(uim0_clk), + MSM_PIN_FUNCTION(uim0_data), + MSM_PIN_FUNCTION(uim0_present), + MSM_PIN_FUNCTION(uim0_reset), + MSM_PIN_FUNCTION(uim1_clk), + MSM_PIN_FUNCTION(uim1_data), + MSM_PIN_FUNCTION(uim1_present), + MSM_PIN_FUNCTION(uim1_reset), + MSM_PIN_FUNCTION(usb1_hs), + MSM_PIN_FUNCTION(usb_phy), + MSM_PIN_FUNCTION(vfr_0), + MSM_PIN_FUNCTION(vfr_1), + MSM_PIN_FUNCTION(vsense_trigger_mirnat), + MSM_PIN_FUNCTION(wcn_sw), + MSM_PIN_FUNCTION(wcn_sw_ctrl), +}; + +/* + * Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup sm8750_groups[] = { + [0] = PINGROUP(0, qup2_se0, ibi_i3c, _, _, _, _, _, _, _, _, egpio), + [1] = PINGROUP(1, qup2_se0, ibi_i3c, _, _, _, _, _, _, _, _, egpio), + [2] = PINGROUP(2, qup2_se0, _, _, _, _, _, _, _, _, _, egpio), + [3] = PINGROUP(3, qup2_se0, _, _, _, _, _, _, _, _, _, egpio), + [4] = PINGROUP(4, qup2_se1, ibi_i3c, _, _, _, _, _, _, _, _, egpio), + [5] = PINGROUP(5, qup2_se1, ibi_i3c, _, _, _, _, _, _, _, _, egpio), + [6] = PINGROUP(6, qup2_se1, _, _, _, _, _, _, _, _, _, egpio), + [7] = PINGROUP(7, qup2_se1, _, _, _, _, _, _, _, _, _, egpio), + [8] = PINGROUP(8, qup2_se2, ibi_i3c, _, _, _, _, _, _, _, _, _), + [9] = PINGROUP(9, qup2_se2, ibi_i3c, _, _, _, _, _, _, _, _, _), + [10] = PINGROUP(10, qup2_se2, cci_async_in, phase_flag, _, _, _, _, _, _, _, _), + [11] = PINGROUP(11, qup2_se2, cci_async_in, phase_flag, _, _, _, _, _, _, _, _), + [12] = PINGROUP(12, qup2_se3, ibi_i3c, qup2_se2, _, _, _, _, _, _, _, _), + [13] = PINGROUP(13, qup2_se3, ibi_i3c, qup2_se2, _, _, _, _, _, _, _, _), + [14] = PINGROUP(14, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _), + [15] = PINGROUP(15, qup2_se3, cci_async_in, qup2_se2, phase_flag, _, _, _, _, _, _, _), + [16] = PINGROUP(16, qup2_se4, phase_flag, _, _, _, _, _, _, _, _, _), + [17] = PINGROUP(17, qup2_se4, phase_flag, _, _, _, _, _, _, _, _, _), + [18] = PINGROUP(18, wcn_sw_ctrl, qup2_se4, phase_flag, _, _, _, _, _, _, _, _), + [19] = PINGROUP(19, wcn_sw, qup2_se4, phase_flag, _, _, _, _, _, _, _, _), + [20] = PINGROUP(20, qup2_se5, phase_flag, _, _, _, _, _, _, _, _, _), + [21] = PINGROUP(21, qup2_se5, phase_flag, _, _, _, _, _, _, _, _, _), + [22] = PINGROUP(22, qup2_se5, phase_flag, _, _, _, _, _, _, _, _, _), + [23] = PINGROUP(23, qup2_se5, qup2_se5, phase_flag, _, _, _, _, _, _, _, _), + [24] = PINGROUP(24, qup2_se6, phase_flag, _, _, _, _, _, _, _, _, _), + [25] = PINGROUP(25, qup2_se6, phase_flag, _, _, _, _, _, _, _, _, _), + [26] = PINGROUP(26, qup2_se6, phase_flag, _, _, _, _, _, _, _, _, _), + [27] = PINGROUP(27, qup2_se6, qdss_cti, phase_flag, _, _, _, _, _, _, _, _), + [28] = PINGROUP(28, qup2_se7, ibi_i3c, phase_flag, _, _, _, _, _, _, _, _), + [29] = PINGROUP(29, qup2_se7, ibi_i3c, phase_flag, _, _, _, _, _, _, _, _), + [30] = PINGROUP(30, qup2_se7, _, _, _, _, _, _, _, _, _, _), + [31] = PINGROUP(31, qup2_se7, qdss_cti, phase_flag, _, _, _, _, _, _, _, _), + [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _, egpio), + [33] = PINGROUP(33, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _, egpio), + [34] = PINGROUP(34, qup1_se0, _, _, _, _, _, _, _, _, _, egpio), + [35] = PINGROUP(35, qup1_se0, _, _, _, _, _, _, _, _, _, egpio), + [36] = PINGROUP(36, qup1_se1, uim1_data, ibi_i3c, sdc40, _, _, _, _, _, _, egpio), + [37] = PINGROUP(37, qup1_se1, uim1_clk, ibi_i3c, sdc41, _, _, _, _, _, _, egpio), + [38] = PINGROUP(38, qup1_se1, sdc42, _, _, _, _, _, _, _, _, _), + [39] = PINGROUP(39, qup1_se1, uim1_reset, sdc43, _, _, _, _, _, _, _, _), + [40] = PINGROUP(40, qup1_se2, cmu_rng, ddr_bist_fail, _, _, _, _, _, _, _, _), + [41] = PINGROUP(41, qup1_se2, cmu_rng, ddr_bist_start, _, _, _, _, _, _, _, _), + [42] = PINGROUP(42, qup1_se2, cmu_rng, _, _, _, _, _, _, _, _, _), + [43] = PINGROUP(43, qup1_se2, cmu_rng, _, ddr_pxi2, _, _, _, _, _, _, _), + [44] = PINGROUP(44, qup1_se3, ddr_bist_complete, ddr_pxi1, _, _, _, _, _, _, _, _), + [45] = PINGROUP(45, qup1_se3, ddr_bist_stop, ddr_pxi1, _, _, _, _, _, _, _, _), + [46] = PINGROUP(46, qup1_se3, ddr_pxi3, _, _, _, _, _, _, _, _, _), + [47] = PINGROUP(47, qup1_se3, dp_hot, _, _, _, _, _, _, _, _, _), + [48] = PINGROUP(48, qup1_se4, ibi_i3c, sdc4_cmd, _, _, _, _, _, _, _, _), + [49] = PINGROUP(49, qup1_se4, ibi_i3c, sdc40, _, _, _, _, _, _, _, _), + [50] = PINGROUP(50, qup1_se4, aoss_cti, sdc4_clk, _, _, _, _, _, _, _, _), + [51] = PINGROUP(51, qup1_se4, aoss_cti, sdc41, _, _, _, _, _, _, _, _), + [52] = PINGROUP(52, qup1_se5, qspi0, ddr_pxi2, _, _, _, _, _, _, _, _), + [53] = PINGROUP(53, qup1_se5, qspi1, _, ddr_pxi3, _, _, _, _, _, _, _), + [54] = PINGROUP(54, qup1_se5, qspi_clk, uim1_data, ddr_pxi0, _, _, _, _, _, _, _), + [55] = PINGROUP(55, qup1_se5, qspi2, uim1_clk, ddr_pxi0, _, _, _, _, _, _, _), + [56] = PINGROUP(56, qup1_se6, qspi3, uim1_reset, _, _, _, _, _, _, _, _), + [57] = PINGROUP(57, qup1_se6, qspi_cs, tsense_pwm1, tsense_pwm2, tsense_pwm3, tsense_pwm4, + _, _, _, _, _), + [58] = PINGROUP(58, qup1_se6, qspi_cs, _, _, _, _, _, _, _, _, _), + [59] = PINGROUP(59, qup1_se6, usb_phy, vsense_trigger_mirnat, _, _, _, _, _, _, _, _), + [60] = PINGROUP(60, qup1_se7, aoss_cti, sdc42, _, _, _, _, _, _, _, _), + [61] = PINGROUP(61, qup1_se7, usb_phy, aoss_cti, sdc43, _, _, _, _, _, _, _), + [62] = PINGROUP(62, qup1_se7, _, _, _, _, _, _, _, _, _, _), + [63] = PINGROUP(63, qup1_se7, _, _, _, _, _, _, _, _, _, _), + [64] = PINGROUP(64, i2chub0_se0, prng_rosc1, tmess_prng1, phase_flag, _, _, _, _, _, _, _), + [65] = PINGROUP(65, i2chub0_se0, prng_rosc2, tmess_prng2, phase_flag, _, _, _, _, _, _, _), + [66] = PINGROUP(66, i2chub0_se1, prng_rosc3, tmess_prng3, phase_flag, _, _, _, _, _, _, _), + [67] = PINGROUP(67, i2chub0_se1, phase_flag, _, _, _, _, _, _, _, _, _), + [68] = PINGROUP(68, i2chub0_se2, phase_flag, _, _, _, _, _, _, _, _, _), + [69] = PINGROUP(69, i2chub0_se2, phase_flag, _, _, _, _, _, _, _, _, _), + [70] = PINGROUP(70, i2chub0_se3, uim1_data, _, atest_usb, _, _, _, _, _, _, _), + [71] = PINGROUP(71, i2chub0_se3, uim1_clk, _, atest_usb, _, _, _, _, _, _, _), + [72] = PINGROUP(72, i2chub0_se4, uim1_reset, qdss_cti, _, atest_usb, _, _, _, _, _, _), + [73] = PINGROUP(73, i2chub0_se4, qdss_cti, jitter_bist, atest_usb, _, _, _, _, _, _, _), + [74] = PINGROUP(74, i2chub0_se5, _, _, _, _, _, _, _, _, _, _), + [75] = PINGROUP(75, i2chub0_se5, _, _, _, _, _, _, _, _, _, _), + [76] = PINGROUP(76, i2chub0_se6, atest_usb, _, _, _, _, _, _, _, _, _), + [77] = PINGROUP(77, i2chub0_se6, gnss_adc1, _, _, _, _, _, _, _, _, _), + [78] = PINGROUP(78, dbg_out_clk, gnss_adc0, _, _, _, _, _, _, _, _, _), + [79] = PINGROUP(79, usb1_hs, gnss_adc0, _, _, _, _, _, _, _, _, _), + [80] = PINGROUP(80, i2chub0_se9, _, _, _, _, _, _, _, _, _, _), + [81] = PINGROUP(81, i2chub0_se9, _, _, _, _, _, _, _, _, _, _), + [82] = PINGROUP(82, i2chub0_se7, qdss_cti, phase_flag, _, _, _, _, _, _, _, _), + [83] = PINGROUP(83, i2chub0_se7, qdss_cti, phase_flag, _, _, _, _, _, _, _, _), + [84] = PINGROUP(84, _, _, _, _, _, _, _, _, _, _, _), + [85] = PINGROUP(85, sd_write_protect, prng_rosc0, tmess_prng0, phase_flag, _, _, _, _, _, + _, _), + [86] = PINGROUP(86, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, gcc_gp1, _, _, _, _, _, _, + _), + [87] = PINGROUP(87, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, mdp_vsync5_out, gcc_gp2, _, + _, _, _, _, _), + [88] = PINGROUP(88, mdp_vsync_e, mdp_esync0_out, gcc_gp3, _, _, _, _, _, _, _, _), + [89] = PINGROUP(89, cam_mclk, tb_trig_sdc2, _, _, _, _, _, _, _, _, _), + [90] = PINGROUP(90, cam_mclk, _, _, _, _, _, _, _, _, _, _), + [91] = PINGROUP(91, cam_aon_mclk2, _, _, _, _, _, _, _, _, _, _), + [92] = PINGROUP(92, cam_mclk, _, _, _, _, _, _, _, _, _, _), + [93] = PINGROUP(93, cam_aon_mclk4, _, _, _, _, _, _, _, _, _, _), + [94] = PINGROUP(94, cam_mclk, _, _, _, _, _, _, _, _, _, _), + [95] = PINGROUP(95, cam_mclk, pll_clk_aux, _, _, _, _, _, _, _, _, _), + [96] = PINGROUP(96, cam_mclk, _, _, _, _, _, _, _, _, _, _), + [97] = PINGROUP(97, mdp_vsync, _, _, _, _, _, _, _, _, _, _), + [98] = PINGROUP(98, mdp_vsync, _, _, _, _, _, _, _, _, _, _), + [99] = PINGROUP(99, gnss_adc1, _, _, _, _, _, _, _, _, _, _), + [100] = PINGROUP(100, mdp_esync1_out, _, _, _, _, _, _, _, _, _, _), + [101] = PINGROUP(101, phase_flag, _, _, _, _, _, _, _, _, _, _), + [102] = PINGROUP(102, phase_flag, _, _, _, _, _, _, _, _, _, _), + [103] = PINGROUP(103, pcie0_clk_req_n, phase_flag, _, _, _, _, _, _, _, _, _), + [104] = PINGROUP(104, pll_bist_sync, phase_flag, _, _, _, _, _, _, _, _, _), + [105] = PINGROUP(105, _, _, _, _, _, _, _, _, _, _, egpio), + [106] = PINGROUP(106, _, _, _, _, _, _, _, _, _, _, egpio), + [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _, _, egpio), + [108] = PINGROUP(108, _, _, _, _, _, _, _, _, _, _, egpio), + [109] = PINGROUP(109, cci_timer, _, _, _, _, _, _, _, _, _, _), + [110] = PINGROUP(110, cci_timer, _, _, _, _, _, _, _, _, _, _), + [111] = PINGROUP(111, cci_timer, cci_i2c_sda, _, _, _, _, _, _, _, _, _), + [112] = PINGROUP(112, cci_i2c_sda, _, _, _, _, _, _, _, _, _, _), + [113] = PINGROUP(113, cci_i2c_sda, _, _, _, _, _, _, _, _, _, _), + [114] = PINGROUP(114, cci_i2c_scl, _, _, _, _, _, _, _, _, _, _), + [115] = PINGROUP(115, cci_i2c_sda, _, _, _, _, _, _, _, _, _, _), + [116] = PINGROUP(116, cci_i2c_scl, _, _, _, _, _, _, _, _, _, _), + [117] = PINGROUP(117, cci_i2c_sda, _, _, _, _, _, _, _, _, _, _), + [118] = PINGROUP(118, cci_i2c_scl, _, _, _, _, _, _, _, _, _, _), + [119] = PINGROUP(119, cci_i2c_sda, _, _, _, _, _, _, _, _, _, _), + [120] = PINGROUP(120, cci_i2c_scl, _, _, _, _, _, _, _, _, _, _), + [121] = PINGROUP(121, i2s1_sck, _, _, _, _, _, _, _, _, _, _), + [122] = PINGROUP(122, i2s1_data0, _, _, _, _, _, _, _, _, _, _), + [123] = PINGROUP(123, i2s1_ws, _, _, _, _, _, _, _, _, _, _), + [124] = PINGROUP(124, i2s1_data1, audio_ext_mclk1, audio_ref_clk, _, _, _, _, _, _, _, _), + [125] = PINGROUP(125, audio_ext_mclk0, _, _, _, _, _, _, _, _, _, _), + [126] = PINGROUP(126, i2s0_sck, _, _, _, _, _, _, _, _, _, _), + [127] = PINGROUP(127, i2s0_data0, _, _, _, _, _, _, _, _, _, _), + [128] = PINGROUP(128, i2s0_data1, _, _, _, _, _, _, _, _, _, _), + [129] = PINGROUP(129, i2s0_ws, _, _, _, _, _, _, _, _, _, _), + [130] = PINGROUP(130, uim0_data, atest_char, _, _, _, _, _, _, _, _, _), + [131] = PINGROUP(131, uim0_clk, atest_char, _, _, _, _, _, _, _, _, _), + [132] = PINGROUP(132, uim0_reset, atest_char, _, _, _, _, _, _, _, _, _), + [133] = PINGROUP(133, uim0_present, atest_char, _, _, _, _, _, _, _, _, _), + [134] = PINGROUP(134, uim1_data, qup1_se2, gcc_gp1, _, _, _, _, _, _, _, _), + [135] = PINGROUP(135, uim1_clk, qup1_se2, gcc_gp2, _, _, _, _, _, _, _, _), + [136] = PINGROUP(136, uim1_reset, qup1_se2, gcc_gp3, _, _, _, _, _, _, _, _), + [137] = PINGROUP(137, uim1_present, atest_char, _, _, _, _, _, _, _, _, _), + [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _, _, _), + [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _, _, _), + [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _, _, _), + [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _, _, _), + [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _, _, _), + [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _, _, _), + [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _, _, _), + [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _, _, _), + [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _, _, _), + [147] = PINGROUP(147, _, tb_trig_sdc4, _, _, _, _, _, _, _, _, _), + [148] = PINGROUP(148, coex_uart1_rx, cmu_rng, _, _, _, _, _, _, _, _, _), + [149] = PINGROUP(149, coex_uart1_tx, cmu_rng, _, _, _, _, _, _, _, _, _), + [150] = PINGROUP(150, _, vfr_0, coex_uart2_rx, cmu_rng, sdc4_clk, _, _, _, _, _, _), + [151] = PINGROUP(151, _, coex_uart2_tx, cmu_rng, sdc4_cmd, _, _, _, _, _, _, _), + [152] = PINGROUP(152, nav_gpio2, _, _, _, _, _, _, _, _, _, _), + [153] = PINGROUP(153, cci_i2c_scl, _, _, _, _, _, _, _, _, _, _), + [154] = PINGROUP(154, nav_gpio0, nav_gpio3, _, _, _, _, _, _, _, _, _), + [155] = PINGROUP(155, nav_gpio1, vfr_1, _, _, _, _, _, _, _, _, _), + [156] = PINGROUP(156, qlink_little_request, _, _, _, _, _, _, _, _, _, _), + [157] = PINGROUP(157, qlink_little_enable, _, _, _, _, _, _, _, _, _, _), + [158] = PINGROUP(158, qlink_wmss, _, _, _, _, _, _, _, _, _, _), + [159] = PINGROUP(159, qlink_big_request, qdss_cti, _, _, _, _, _, _, _, _, _), + [160] = PINGROUP(160, qlink_big_enable, _, _, _, _, _, _, _, _, _, _), + [161] = PINGROUP(161, _, _, _, _, _, _, _, _, _, _, _), + [162] = PINGROUP(162, qdss_cti, _, _, _, _, _, _, _, _, _, _), + [163] = PINGROUP(163, cci_timer, _, _, _, _, _, _, _, _, _, _), + [164] = PINGROUP(164, cci_timer, cci_i2c_scl, _, _, _, _, _, _, _, _, _), + [165] = PINGROUP(165, _, _, _, _, _, _, _, _, _, _, egpio), + [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _, _, egpio), + [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _, _, egpio), + [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _, _, egpio), + [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _, _, egpio), + [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _, _, egpio), + [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _, _, egpio), + [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _, _, egpio), + [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _, _, egpio), + [174] = PINGROUP(174, _, _, _, _, _, _, _, _, _, _, egpio), + [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _, _, egpio), + [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _, _, egpio), + [177] = PINGROUP(177, _, _, _, _, _, _, _, _, _, _, egpio), + [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _, _, egpio), + [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _, _, egpio), + [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _, _, egpio), + [181] = PINGROUP(181, _, _, _, _, _, _, _, _, _, _, egpio), + [182] = PINGROUP(182, _, _, _, _, _, _, _, _, _, _, egpio), + [183] = PINGROUP(183, _, _, _, _, _, _, _, _, _, _, egpio), + [184] = PINGROUP(184, _, _, _, _, _, _, _, _, _, _, egpio), + [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _, _, egpio), + [186] = PINGROUP(186, _, _, _, _, _, _, _, _, _, _, egpio), + [187] = PINGROUP(187, _, _, _, _, _, _, _, _, _, _, egpio), + [188] = PINGROUP(188, _, _, _, _, _, _, _, _, _, _, egpio), + [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _, _, egpio), + [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _, _, egpio), + [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _, _, egpio), + [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _, _, egpio), + [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _, _, egpio), + [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _, _, egpio), + [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _, _, egpio), + [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _, _, egpio), + [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _, _, egpio), + [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _, _, egpio), + [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _, _, egpio), + [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _, _, egpio), + [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _, _, egpio), + [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _, _, egpio), + [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _, _, egpio), + [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _, _, egpio), + [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _, _, egpio), + [206] = PINGROUP(206, i2chub0_se8, _, _, _, _, _, _, _, _, _, egpio), + [207] = PINGROUP(207, i2chub0_se8, _, _, _, _, _, _, _, _, _, egpio), + [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _, _, egpio), + [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _, _, egpio), + [210] = PINGROUP(210, _, _, _, _, _, _, _, _, _, _, egpio), + [211] = PINGROUP(211, _, _, _, _, _, _, _, _, _, _, egpio), + [212] = PINGROUP(212, _, _, _, _, _, _, _, _, _, _, egpio), + [213] = PINGROUP(213, _, _, _, _, _, _, _, _, _, _, egpio), + [214] = PINGROUP(214, _, _, _, _, _, _, _, _, _, _, egpio), + [215] = UFS_RESET(ufs_reset, 0xe2004, 0xe3000), + [216] = SDC_QDSD_PINGROUP(sdc2_clk, 0xdb000, 14, 6), + [217] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xdb000, 11, 3), + [218] = SDC_QDSD_PINGROUP(sdc2_data, 0xdb000, 9, 0), +}; + +static const struct msm_gpio_wakeirq_map sm8750_pdc_map[] = { + { 0, 72 }, { 3, 80 }, { 4, 73 }, { 7, 74 }, { 8, 75 }, + { 11, 76 }, { 12, 87 }, { 15, 98 }, { 18, 110 }, { 19, 79 }, + { 23, 82 }, { 24, 83 }, { 27, 84 }, { 28, 85 }, { 31, 86 }, + { 32, 92 }, { 35, 68 }, { 36, 93 }, { 39, 94 }, { 43, 95 }, + { 46, 96 }, { 47, 121 }, { 48, 97 }, { 51, 118 }, { 54, 102 }, + { 55, 71 }, { 56, 103 }, { 57, 104 }, { 59, 105 }, { 61, 81 }, + { 63, 91 }, { 64, 77 }, { 65, 90 }, { 66, 106 }, { 67, 99 }, + { 68, 112 }, { 69, 113 }, { 75, 114 }, { 78, 115 }, { 79, 116 }, + { 80, 122 }, { 81, 123 }, { 84, 101 }, { 85, 124 }, { 86, 125 }, + { 87, 126 }, { 88, 127 }, { 95, 128 }, { 96, 129 }, { 97, 100 }, + { 98, 117 }, { 99, 78 }, { 102, 130 }, { 103, 131 }, { 104, 132 }, + { 108, 133 }, { 133, 134 }, { 137, 67 }, { 148, 135 }, { 150, 136 }, + { 152, 137 }, { 154, 138 }, { 155, 89 }, { 156, 139 }, { 159, 140 }, + { 162, 109 }, { 163, 108 }, { 166, 141 }, { 169, 142 }, { 171, 143 }, + { 172, 144 }, { 174, 145 }, { 176, 146 }, { 177, 120 }, { 181, 147 }, + { 182, 148 }, { 185, 149 }, { 188, 111 }, { 190, 88 }, { 191, 150 }, + { 192, 151 }, { 193, 152 }, { 196, 153 }, { 197, 154 }, { 198, 70 }, + { 199, 119 }, { 200, 69 }, { 201, 155 }, { 202, 156 }, { 203, 157 }, + { 204, 158 }, { 205, 107 }, { 209, 159 }, +}; + +static const struct msm_pinctrl_soc_data sm8750_tlmm = { + .pins = sm8750_pins, + .npins = ARRAY_SIZE(sm8750_pins), + .functions = sm8750_functions, + .nfunctions = ARRAY_SIZE(sm8750_functions), + .groups = sm8750_groups, + .ngroups = ARRAY_SIZE(sm8750_groups), + .ngpios = 216, + .wakeirq_map = sm8750_pdc_map, + .nwakeirq_map = ARRAY_SIZE(sm8750_pdc_map), + .egpio_func = 11, +}; + +static int sm8750_tlmm_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &sm8750_tlmm); +} + +static const struct of_device_id sm8750_tlmm_of_match[] = { + { .compatible = "qcom,sm8750-tlmm", }, + {}, +}; + +static struct platform_driver sm8750_tlmm_driver = { + .driver = { + .name = "sm8750-tlmm", + .of_match_table = sm8750_tlmm_of_match, + }, + .probe = sm8750_tlmm_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init sm8750_tlmm_init(void) +{ + return platform_driver_register(&sm8750_tlmm_driver); +} +arch_initcall(sm8750_tlmm_init); + +static void __exit sm8750_tlmm_exit(void) +{ + platform_driver_unregister(&sm8750_tlmm_driver); +} +module_exit(sm8750_tlmm_exit); + +MODULE_DESCRIPTION("QTI SM8750 TLMM driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, sm8750_tlmm_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index d2dd66769aa8..0c806b8128b6 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -667,7 +667,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, "push-pull", "open-drain", "open-source" }; static const char *const strengths[] = { - "no", "high", "medium", "low" + "no", "low", "medium", "high" }; pad = pctldev->desc->pins[pin].drv_data; @@ -1169,7 +1169,7 @@ static int pmic_gpio_probe(struct platform_device *pdev) * files which don't set the "gpio-ranges" property or systems that * utilize ACPI the driver has to call gpiochip_add_pin_range(). */ - if (!of_property_read_bool(dev->of_node, "gpio-ranges")) { + if (!of_property_present(dev->of_node, "gpio-ranges")) { ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins); if (ret) { @@ -1226,6 +1226,8 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm8550ve-gpio", .data = (void *) 8 }, { .compatible = "qcom,pm8550vs-gpio", .data = (void *) 6 }, { .compatible = "qcom,pm8916-gpio", .data = (void *) 4 }, + /* pm8937 has 8 GPIOs with holes on 3, 4 and 6 */ + { .compatible = "qcom,pm8937-gpio", .data = (void *) 8 }, { .compatible = "qcom,pm8941-gpio", .data = (void *) 36 }, /* pm8950 has 8 GPIOs with holes on 3 */ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 }, @@ -1268,7 +1270,7 @@ static struct platform_driver pmic_gpio_driver = { .of_match_table = pmic_gpio_of_match, }, .probe = pmic_gpio_probe, - .remove_new = pmic_gpio_remove, + .remove = pmic_gpio_remove, }; module_platform_driver(pmic_gpio_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index d16ece90d926..84de584cf7eb 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -983,6 +983,7 @@ static const struct of_device_id pmic_mpp_of_match[] = { { .compatible = "qcom,pm8226-mpp", .data = (void *) 8 }, { .compatible = "qcom,pm8841-mpp", .data = (void *) 4 }, { .compatible = "qcom,pm8916-mpp", .data = (void *) 4 }, + { .compatible = "qcom,pm8937-mpp", .data = (void *) 4 }, { .compatible = "qcom,pm8941-mpp", .data = (void *) 8 }, { .compatible = "qcom,pm8950-mpp", .data = (void *) 4 }, { .compatible = "qcom,pmi8950-mpp", .data = (void *) 4 }, @@ -1000,7 +1001,7 @@ static struct platform_driver pmic_mpp_driver = { .of_match_table = pmic_mpp_of_match, }, .probe = pmic_mpp_probe, - .remove_new = pmic_mpp_remove, + .remove = pmic_mpp_remove, }; module_platform_driver(pmic_mpp_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index 9cd5247ea574..2225dc49d477 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -832,7 +832,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) * files which don't set the "gpio-ranges" property or systems that * utilize ACPI the driver has to call gpiochip_add_pin_range(). */ - if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) { + if (!of_property_present(pctrl->dev->of_node, "gpio-ranges")) { ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, pctrl->chip.ngpio); if (ret) { @@ -866,7 +866,7 @@ static struct platform_driver pm8xxx_gpio_driver = { .of_match_table = pm8xxx_gpio_of_match, }, .probe = pm8xxx_gpio_probe, - .remove_new = pm8xxx_gpio_remove, + .remove = pm8xxx_gpio_remove, }; module_platform_driver(pm8xxx_gpio_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 3aee6835a2de..9b1039c08aa6 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -949,7 +949,7 @@ static struct platform_driver pm8xxx_mpp_driver = { .of_match_table = pm8xxx_mpp_of_match, }, .probe = pm8xxx_mpp_probe, - .remove_new = pm8xxx_mpp_remove, + .remove = pm8xxx_mpp_remove, }; module_platform_driver(pm8xxx_mpp_driver); diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c index abfcdd3da9e8..419cb8facb2f 100644 --- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c +++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c @@ -1861,7 +1861,7 @@ static struct platform_driver x1e80100_pinctrl_driver = { .of_match_table = x1e80100_pinctrl_of_match, }, .probe = x1e80100_pinctrl_probe, - .remove_new = msm_pinctrl_remove, + .remove = msm_pinctrl_remove, }; static int __init x1e80100_pinctrl_init(void) diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig index 14bd55d64731..7f3f41c7fe54 100644 --- a/drivers/pinctrl/renesas/Kconfig +++ b/drivers/pinctrl/renesas/Kconfig @@ -41,6 +41,7 @@ config PINCTRL_RENESAS select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0 select PINCTRL_RZG2L if ARCH_RZG2L select PINCTRL_RZV2M if ARCH_R9A09G011 + select PINCTRL_RZG2L if ARCH_R9A09G057 select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203 select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264 select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269 diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c index 6527872813dc..b1058504e0bb 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza1.c +++ b/drivers/pinctrl/renesas/pinctrl-rza1.c @@ -19,6 +19,7 @@ #include <linux/ioport.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/pinctrl/consumer.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> @@ -750,6 +751,11 @@ static int rza1_pin_mux_single(struct rza1_pinctrl *rza1_pctl, static int rza1_gpio_request(struct gpio_chip *chip, unsigned int gpio) { struct rza1_port *port = gpiochip_get_data(chip); + int ret; + + ret = pinctrl_gpio_request(chip, gpio); + if (ret) + return ret; rza1_pin_reset(port, gpio); @@ -771,6 +777,7 @@ static void rza1_gpio_free(struct gpio_chip *chip, unsigned int gpio) struct rza1_port *port = gpiochip_get_data(chip); rza1_pin_reset(port, gpio); + pinctrl_gpio_free(chip, gpio); } static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio) diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index af689d7c117f..dd1f8c29d3e7 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> +#include <linux/pinctrl/consumer.h> #include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> @@ -229,6 +230,8 @@ static const char * const rza2_gpio_names[] = { static struct gpio_chip chip = { .names = rza2_gpio_names, .base = -1, + .request = pinctrl_gpio_request, + .free = pinctrl_gpio_free, .get_direction = rza2_chip_get_direction, .direction_input = rza2_chip_direction_input, .direction_output = rza2_chip_direction_output, diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 5a403915fed2..8ffb9430a134 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -139,6 +139,8 @@ #define IEN(off) (0x1800 + (off) * 8) #define PUPD(off) (0x1C00 + (off) * 8) #define ISEL(off) (0x2C00 + (off) * 8) +#define NOD(off) (0x3000 + (off) * 8) +#define SMT(off) (0x3400 + (off) * 8) #define SD_CH(off, ch) ((off) + (ch) * 4) #define ETH_POC(off, ch) ((off) + (ch) * 4) #define QSPI (0x3008) @@ -160,6 +162,8 @@ #define IOLH_MASK 0x03 #define SR_MASK 0x01 #define PUPD_MASK 0x03 +#define NOD_MASK 0x01 +#define SMT_MASK 0x01 #define PM_INPUT 0x1 #define PM_OUTPUT 0x2 @@ -168,7 +172,6 @@ #define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT) #define RZG2L_TINT_MAX_INTERRUPT 32 -#define RZG2L_TINT_IRQ_START_INDEX 9 #define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i)) /* Custom pinconf parameters */ @@ -247,6 +250,7 @@ enum rzg2l_iolh_index { * @iolh_groupb_ua: IOLH group B uA specific values * @iolh_groupc_ua: IOLH group C uA specific values * @iolh_groupb_oi: IOLH group B output impedance specific values + * @tint_start_index: the start index for the TINT interrupts * @drive_strength_ua: drive strength in uA is supported (otherwise mA is supported) * @func_base: base number for port function (see register PFC) * @oen_max_pin: the maximum pin number supporting output enable @@ -258,6 +262,7 @@ struct rzg2l_hwcfg { u16 iolh_groupb_ua[RZG2L_IOLH_IDX_MAX]; u16 iolh_groupc_ua[RZG2L_IOLH_IDX_MAX]; u16 iolh_groupb_oi[4]; + u16 tint_start_index; bool drive_strength_ua; u8 func_base; u8 oen_max_pin; @@ -1337,6 +1342,27 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev, break; } + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + case PIN_CONFIG_DRIVE_PUSH_PULL: + if (!(cfg & PIN_CFG_NOD)) + return -EINVAL; + + arg = rzg2l_read_pin_config(pctrl, NOD(off), bit, NOD_MASK); + if (!arg && param != PIN_CONFIG_DRIVE_PUSH_PULL) + return -EINVAL; + if (arg && param != PIN_CONFIG_DRIVE_OPEN_DRAIN) + return -EINVAL; + break; + + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (!(cfg & PIN_CFG_SMT)) + return -EINVAL; + + arg = rzg2l_read_pin_config(pctrl, SMT(off), bit, SMT_MASK); + if (!arg) + return -EINVAL; + break; + case RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE: if (!(cfg & PIN_CFG_IOLH_RZV2H)) return -EINVAL; @@ -1466,6 +1492,22 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index); break; + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + case PIN_CONFIG_DRIVE_PUSH_PULL: + if (!(cfg & PIN_CFG_NOD)) + return -EINVAL; + + rzg2l_rmw_pin_config(pctrl, NOD(off), bit, NOD_MASK, + param == PIN_CONFIG_DRIVE_OPEN_DRAIN ? 1 : 0); + break; + + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (!(cfg & PIN_CFG_SMT)) + return -EINVAL; + + rzg2l_rmw_pin_config(pctrl, SMT(off), bit, SMT_MASK, arg); + break; + case RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE: if (!(cfg & PIN_CFG_IOLH_RZV2H)) return -EINVAL; @@ -2379,7 +2421,7 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc, rzg2l_gpio_irq_endisable(pctrl, child, true); pctrl->hwirq[irq] = child; - irq += RZG2L_TINT_IRQ_START_INDEX; + irq += pctrl->data->hwcfg->tint_start_index; /* All these interrupts are level high in the CPU */ *parent_type = IRQ_TYPE_LEVEL_HIGH; @@ -2391,21 +2433,6 @@ err: return ret; } -static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip, - union gpio_irq_fwspec *gfwspec, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - struct irq_fwspec *fwspec = &gfwspec->fwspec; - - fwspec->fwnode = chip->irq.parent_domain->fwnode; - fwspec->param_count = 2; - fwspec->param[0] = parent_hwirq; - fwspec->param[1] = parent_type; - - return 0; -} - static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl) { struct irq_domain *domain = pctrl->gpio_chip.irq.domain; @@ -2607,7 +2634,7 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) girq->fwnode = dev_fwnode(pctrl->dev); girq->parent_domain = parent_domain; girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq; - girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec; + girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell; girq->child_irq_domain_ops.free = rzg2l_gpio_irq_domain_free; girq->init_valid_mask = rzg2l_init_irq_valid_mask; @@ -2710,7 +2737,7 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl) ret = pinctrl_enable(pctrl->pctl); if (ret) - dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n"); + return dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n"); ret = rzg2l_gpio_register(pctrl); if (ret) @@ -3034,6 +3061,7 @@ static const struct rzg2l_hwcfg rzg2l_hwcfg = { [RZG2L_IOLH_IDX_3V3] = 2000, 4000, 8000, 12000, }, .iolh_groupb_oi = { 100, 66, 50, 33, }, + .tint_start_index = 9, .oen_max_pin = 0, }; @@ -3063,6 +3091,7 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = { /* 3v3 power source */ [RZG2L_IOLH_IDX_3V3] = 4500, 5200, 5700, 6050, }, + .tint_start_index = 9, .drive_strength_ua = true, .func_base = 1, .oen_max_pin = 1, /* Pin 1 of P0 and P7 is the maximum OEN pin. */ @@ -3073,6 +3102,7 @@ static const struct rzg2l_hwcfg rzv2h_hwcfg = { .regs = { .pwpr = 0x3c04, }, + .tint_start_index = 17, }; static struct rzg2l_pinctrl_data r9a07g043_data = { diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c index 39af1fe79c84..d442d4f9981c 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzn1.c +++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c @@ -925,7 +925,7 @@ MODULE_DEVICE_TABLE(of, rzn1_pinctrl_match); static struct platform_driver rzn1_pinctrl_driver = { .probe = rzn1_pinctrl_probe, - .remove_new = rzn1_pinctrl_remove, + .remove = rzn1_pinctrl_remove, .driver = { .name = "rzn1-pinctrl", .of_match_table = rzn1_pinctrl_match, diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index 5480e0884abe..3ea7106ce5ea 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -58,6 +58,15 @@ static const struct samsung_pin_bank_type exynos850_bank_type_alive = { .reg_offset = { 0x00, 0x04, 0x08, 0x0c, }, }; +/* + * Bank type for non-alive type. Bit fields: + * CON: 4, DAT: 1, PUD: 2, DRV: 3, CONPDN: 2, PUDPDN: 2 + */ +static const struct samsung_pin_bank_type exynos8895_bank_type_off = { + .fld_width = { 4, 1, 2, 3, 2, 2, }, + .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, }, +}; + /* Pad retention control code for accessing PMU regmap */ static atomic_t exynos_shared_retention_refcnt; @@ -618,6 +627,300 @@ const struct samsung_pinctrl_of_match_data exynos850_of_data __initconst = { .num_ctrl = ARRAY_SIZE(exynos850_pin_ctrl), }; +/* pin banks of exynos990 pin-controller 0 (ALIVE) */ +static struct samsung_pin_bank_data exynos990_pin_banks0[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), + EXYNOS850_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), + EXYNOS850_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), + EXYNOS850_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), + EXYNOS850_PIN_BANK_EINTW(2, 0x080, "gpa4", 0x10), + EXYNOS850_PIN_BANK_EINTN(7, 0x0A0, "gpq0"), +}; + +/* pin banks of exynos990 pin-controller 1 (CMGP) */ +static struct samsung_pin_bank_data exynos990_pin_banks1[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTN(1, 0x000, "gpm0"), + EXYNOS850_PIN_BANK_EINTN(1, 0x020, "gpm1"), + EXYNOS850_PIN_BANK_EINTN(1, 0x040, "gpm2"), + EXYNOS850_PIN_BANK_EINTN(1, 0x060, "gpm3"), + EXYNOS850_PIN_BANK_EINTW(1, 0x080, "gpm4", 0x00), + EXYNOS850_PIN_BANK_EINTW(1, 0x0A0, "gpm5", 0x04), + EXYNOS850_PIN_BANK_EINTW(1, 0x0C0, "gpm6", 0x08), + EXYNOS850_PIN_BANK_EINTW(1, 0x0E0, "gpm7", 0x0c), + EXYNOS850_PIN_BANK_EINTW(1, 0x100, "gpm8", 0x10), + EXYNOS850_PIN_BANK_EINTW(1, 0x120, "gpm9", 0x14), + EXYNOS850_PIN_BANK_EINTW(1, 0x140, "gpm10", 0x18), + EXYNOS850_PIN_BANK_EINTW(1, 0x160, "gpm11", 0x1c), + EXYNOS850_PIN_BANK_EINTW(1, 0x180, "gpm12", 0x20), + EXYNOS850_PIN_BANK_EINTW(1, 0x1A0, "gpm13", 0x24), + EXYNOS850_PIN_BANK_EINTW(1, 0x1C0, "gpm14", 0x28), + EXYNOS850_PIN_BANK_EINTW(1, 0x1E0, "gpm15", 0x2c), + EXYNOS850_PIN_BANK_EINTW(1, 0x200, "gpm16", 0x30), + EXYNOS850_PIN_BANK_EINTW(1, 0x220, "gpm17", 0x34), + EXYNOS850_PIN_BANK_EINTW(1, 0x240, "gpm18", 0x38), + EXYNOS850_PIN_BANK_EINTW(1, 0x260, "gpm19", 0x3c), + EXYNOS850_PIN_BANK_EINTW(1, 0x280, "gpm20", 0x40), + EXYNOS850_PIN_BANK_EINTW(1, 0x2A0, "gpm21", 0x44), + EXYNOS850_PIN_BANK_EINTW(1, 0x2C0, "gpm22", 0x48), + EXYNOS850_PIN_BANK_EINTW(1, 0x2E0, "gpm23", 0x4c), + EXYNOS850_PIN_BANK_EINTW(1, 0x300, "gpm24", 0x50), + EXYNOS850_PIN_BANK_EINTW(1, 0x320, "gpm25", 0x54), + EXYNOS850_PIN_BANK_EINTW(1, 0x340, "gpm26", 0x58), + EXYNOS850_PIN_BANK_EINTW(1, 0x360, "gpm27", 0x5c), + EXYNOS850_PIN_BANK_EINTW(1, 0x380, "gpm28", 0x60), + EXYNOS850_PIN_BANK_EINTW(1, 0x3A0, "gpm29", 0x64), + EXYNOS850_PIN_BANK_EINTW(1, 0x3C0, "gpm30", 0x68), + EXYNOS850_PIN_BANK_EINTW(1, 0x3E0, "gpm31", 0x6c), + EXYNOS850_PIN_BANK_EINTW(1, 0x400, "gpm32", 0x70), + EXYNOS850_PIN_BANK_EINTW(1, 0x420, "gpm33", 0x74), + +}; + +/* pin banks of exynos990 pin-controller 2 (HSI1) */ +static struct samsung_pin_bank_data exynos990_pin_banks2[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTG(4, 0x000, "gpf0", 0x00), + EXYNOS850_PIN_BANK_EINTG(6, 0x020, "gpf1", 0x04), + EXYNOS850_PIN_BANK_EINTG(3, 0x040, "gpf2", 0x08), +}; + +/* pin banks of exynos990 pin-controller 3 (HSI2) */ +static struct samsung_pin_bank_data exynos990_pin_banks3[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTG(2, 0x000, "gpf3", 0x00), +}; + +/* pin banks of exynos990 pin-controller 4 (PERIC0) */ +static struct samsung_pin_bank_data exynos990_pin_banks4[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTG(8, 0x000, "gpp0", 0x00), + EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpp1", 0x04), + EXYNOS850_PIN_BANK_EINTG(8, 0x040, "gpp2", 0x08), + EXYNOS850_PIN_BANK_EINTG(8, 0x060, "gpp3", 0x0C), + EXYNOS850_PIN_BANK_EINTG(8, 0x080, "gpp4", 0x10), + EXYNOS850_PIN_BANK_EINTG(2, 0x0A0, "gpg0", 0x14), +}; + +/* pin banks of exynos990 pin-controller 5 (PERIC1) */ +static struct samsung_pin_bank_data exynos990_pin_banks5[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTG(8, 0x000, "gpp5", 0x00), + EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpp6", 0x04), + EXYNOS850_PIN_BANK_EINTG(8, 0x040, "gpp7", 0x08), + EXYNOS850_PIN_BANK_EINTG(8, 0x060, "gpp8", 0x0C), + EXYNOS850_PIN_BANK_EINTG(8, 0x080, "gpp9", 0x10), + EXYNOS850_PIN_BANK_EINTG(6, 0x0A0, "gpc0", 0x14), + EXYNOS850_PIN_BANK_EINTG(4, 0x0C0, "gpg1", 0x18), + EXYNOS850_PIN_BANK_EINTG(8, 0x0E0, "gpb0", 0x1C), + EXYNOS850_PIN_BANK_EINTG(8, 0x100, "gpb1", 0x20), + EXYNOS850_PIN_BANK_EINTG(8, 0x120, "gpb2", 0x24), +}; + +/* pin banks of exynos990 pin-controller 6 (VTS) */ +static struct samsung_pin_bank_data exynos990_pin_banks6[] = { + /* Must start with EINTG banks, ordered by EINT group number. */ + EXYNOS850_PIN_BANK_EINTG(7, 0x000, "gpv0", 0x00), +}; + +static const struct samsung_pin_ctrl exynos990_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 ALIVE data */ + .pin_banks = exynos990_pin_banks0, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks0), + .eint_wkup_init = exynos_eint_wkup_init, + }, { + /* pin-controller instance 1 CMGP data */ + .pin_banks = exynos990_pin_banks1, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks1), + .eint_wkup_init = exynos_eint_wkup_init, + }, { + /* pin-controller instance 2 HSI1 data */ + .pin_banks = exynos990_pin_banks2, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks2), + .eint_gpio_init = exynos_eint_gpio_init, + }, { + /* pin-controller instance 3 HSI2 data */ + .pin_banks = exynos990_pin_banks3, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks3), + .eint_gpio_init = exynos_eint_gpio_init, + }, { + /* pin-controller instance 4 PERIC0 data */ + .pin_banks = exynos990_pin_banks4, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks4), + .eint_gpio_init = exynos_eint_gpio_init, + }, { + /* pin-controller instance 5 PERIC1 data */ + .pin_banks = exynos990_pin_banks5, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks5), + .eint_gpio_init = exynos_eint_gpio_init, + }, { + /* pin-controller instance 6 VTS data */ + .pin_banks = exynos990_pin_banks6, + .nr_banks = ARRAY_SIZE(exynos990_pin_banks6), + }, +}; + +const struct samsung_pinctrl_of_match_data exynos990_of_data __initconst = { + .ctrl = exynos990_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos990_pin_ctrl), +}; + +/* pin banks of exynos9810 pin-controller 0 (ALIVE) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks0[] __initconst = { + EXYNOS850_PIN_BANK_EINTN(6, 0x000, "etc1"), + EXYNOS850_PIN_BANK_EINTW(8, 0x020, "gpa0", 0x00), + EXYNOS850_PIN_BANK_EINTW(8, 0x040, "gpa1", 0x04), + EXYNOS850_PIN_BANK_EINTW(8, 0x060, "gpa2", 0x08), + EXYNOS850_PIN_BANK_EINTW(8, 0x080, "gpa3", 0x0c), + EXYNOS850_PIN_BANK_EINTN(6, 0x0A0, "gpq0"), + EXYNOS850_PIN_BANK_EINTW(2, 0x0C0, "gpa4", 0x10), +}; + +/* pin banks of exynos9810 pin-controller 1 (AUD) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks1[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00), + EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpb1", 0x04), + EXYNOS850_PIN_BANK_EINTG(4, 0x040, "gpb2", 0x08), +}; + +/* pin banks of exynos9810 pin-controller 2 (CHUB) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks2[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(8, 0x000, "gph0", 0x00), + EXYNOS850_PIN_BANK_EINTG(5, 0x020, "gph1", 0x04), +}; + +/* pin banks of exynos9810 pin-controller 3 (CMGP) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks3[] __initconst = { + EXYNOS850_PIN_BANK_EINTW(1, 0x000, "gpm0", 0x00), + EXYNOS850_PIN_BANK_EINTW(1, 0x020, "gpm1", 0x04), + EXYNOS850_PIN_BANK_EINTW(1, 0x040, "gpm2", 0x08), + EXYNOS850_PIN_BANK_EINTW(1, 0x060, "gpm3", 0x0C), + EXYNOS850_PIN_BANK_EINTW(1, 0x080, "gpm4", 0x10), + EXYNOS850_PIN_BANK_EINTW(1, 0x0A0, "gpm5", 0x14), + EXYNOS850_PIN_BANK_EINTW(1, 0x0C0, "gpm6", 0x18), + EXYNOS850_PIN_BANK_EINTW(1, 0x0E0, "gpm7", 0x1C), + EXYNOS850_PIN_BANK_EINTW(1, 0x100, "gpm10", 0x20), + EXYNOS850_PIN_BANK_EINTW(1, 0x120, "gpm11", 0x24), + EXYNOS850_PIN_BANK_EINTW(1, 0x140, "gpm12", 0x28), + EXYNOS850_PIN_BANK_EINTW(1, 0x160, "gpm13", 0x2C), + EXYNOS850_PIN_BANK_EINTW(1, 0x180, "gpm14", 0x30), + EXYNOS850_PIN_BANK_EINTW(1, 0x1A0, "gpm15", 0x34), + EXYNOS850_PIN_BANK_EINTW(1, 0x1C0, "gpm16", 0x38), + EXYNOS850_PIN_BANK_EINTW(1, 0x1E0, "gpm17", 0x3C), + EXYNOS850_PIN_BANK_EINTW(1, 0x200, "gpm40", 0x40), + EXYNOS850_PIN_BANK_EINTW(1, 0x220, "gpm41", 0x44), + EXYNOS850_PIN_BANK_EINTW(1, 0x240, "gpm42", 0x48), + EXYNOS850_PIN_BANK_EINTW(1, 0x260, "gpm43", 0x4C), +}; + +/* pin banks of exynos9810 pin-controller 4 (FSYS0) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks4[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(2, 0x000, "gpf0", 0x00), +}; + +/* pin banks of exynos9810 pin-controller 5 (FSYS1) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks5[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(7, 0x000, "gpf1", 0x00), + EXYNOS850_PIN_BANK_EINTG(6, 0x020, "gpf2", 0x04), +}; + +/* pin banks of exynos9810 pin-controller 6 (PERIC0) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks6[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(8, 0x000, "gpp0", 0x00), + EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpp1", 0x04), + EXYNOS850_PIN_BANK_EINTG(8, 0x040, "gpp2", 0x08), + EXYNOS850_PIN_BANK_EINTG(4, 0x060, "gpp3", 0x0C), + EXYNOS850_PIN_BANK_EINTG(8, 0x080, "gpg0", 0x10), + EXYNOS850_PIN_BANK_EINTG(8, 0x0A0, "gpg1", 0x14), + EXYNOS850_PIN_BANK_EINTG(8, 0x0C0, "gpg2", 0x18), +}; + +/* pin banks of exynos9810 pin-controller 7 (PERIC1) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks7[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(8, 0x000, "gpp4", 0x00), + EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpp5", 0x04), + EXYNOS850_PIN_BANK_EINTG(4, 0x040, "gpp6", 0x08), + EXYNOS850_PIN_BANK_EINTG(8, 0x060, "gpc0", 0x0C), + EXYNOS850_PIN_BANK_EINTG(8, 0x080, "gpc1", 0x10), + EXYNOS850_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14), + EXYNOS850_PIN_BANK_EINTG(7, 0x0C0, "gpg3", 0x18), +}; + +/* pin banks of exynos9810 pin-controller 8 (VTS) */ +static const struct samsung_pin_bank_data exynos9810_pin_banks8[] __initconst = { + EXYNOS850_PIN_BANK_EINTG(3, 0x000, "gpt0", 0x00), +}; + +static const struct samsung_pin_ctrl exynos9810_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 ALIVE data */ + .pin_banks = exynos9810_pin_banks0, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks0), + .eint_wkup_init = exynos_eint_wkup_init, + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 1 AUD data */ + .pin_banks = exynos9810_pin_banks1, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks1), + }, { + /* pin-controller instance 2 CHUB data */ + .pin_banks = exynos9810_pin_banks2, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks2), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 3 CMGP data */ + .pin_banks = exynos9810_pin_banks3, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks3), + .eint_wkup_init = exynos_eint_wkup_init, + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 4 FSYS0 data */ + .pin_banks = exynos9810_pin_banks4, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks4), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 5 FSYS1 data */ + .pin_banks = exynos9810_pin_banks5, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks5), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 6 PERIC0 data */ + .pin_banks = exynos9810_pin_banks6, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks6), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 7 PERIC1 data */ + .pin_banks = exynos9810_pin_banks7, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks7), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 8 VTS data */ + .pin_banks = exynos9810_pin_banks8, + .nr_banks = ARRAY_SIZE(exynos9810_pin_banks8), + }, +}; + +const struct samsung_pinctrl_of_match_data exynos9810_of_data __initconst = { + .ctrl = exynos9810_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos9810_pin_ctrl), +}; + /* pin banks of exynosautov9 pin-controller 0 (ALIVE) */ static const struct samsung_pin_bank_data exynosautov9_pin_banks0[] __initconst = { EXYNOS850_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), @@ -866,6 +1169,134 @@ const struct samsung_pinctrl_of_match_data exynosautov920_of_data __initconst = .num_ctrl = ARRAY_SIZE(exynosautov920_pin_ctrl), }; +/* pin banks of exynos8895 pin-controller 0 (ALIVE) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa0", 0x00), + EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa1", 0x04), + EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa2", 0x08), + EXYNOS_PIN_BANK_EINTW(8, 0x080, "gpa3", 0x0c), + EXYNOS_PIN_BANK_EINTW(7, 0x0a0, "gpa4", 0x24), +}; + +/* pin banks of exynos8895 pin-controller 1 (ABOX) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks1[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gph0", 0x00), + EXYNOS_PIN_BANK_EINTG(7, 0x020, "gph1", 0x04), + EXYNOS_PIN_BANK_EINTG(4, 0x040, "gph3", 0x08), +}; + +/* pin banks of exynos8895 pin-controller 2 (VTS) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks2[] __initconst = { + EXYNOS_PIN_BANK_EINTG(3, 0x000, "gph2", 0x00), +}; + +/* pin banks of exynos8895 pin-controller 3 (FSYS0) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks3[] __initconst = { + EXYNOS8895_PIN_BANK_EINTG(3, 0x000, "gpi0", 0x00), + EXYNOS8895_PIN_BANK_EINTG(8, 0x020, "gpi1", 0x04), +}; + +/* pin banks of exynos8895 pin-controller 4 (FSYS1) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks4[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj1", 0x00), + EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpj0", 0x04), +}; + +/* pin banks of exynos8895 pin-controller 5 (BUSC) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks5[] __initconst = { + EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpb2", 0x00), +}; + +/* pin banks of exynos8895 pin-controller 6 (PERIC0) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks6[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpd0", 0x00), + EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpd1", 0x04), + EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpd2", 0x08), + EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpd3", 0x0C), + EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpb1", 0x10), + EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpe7", 0x14), + EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpf1", 0x18), +}; + +/* pin banks of exynos8895 pin-controller 7 (PERIC1) */ +static const struct samsung_pin_bank_data exynos8895_pin_banks7[] __initconst = { + EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpb0", 0x00), + EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpc0", 0x04), + EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpc1", 0x08), + EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpc2", 0x0C), + EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpc3", 0x10), + EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpk0", 0x14), + EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpe5", 0x18), + EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe6", 0x1C), + EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe2", 0x20), + EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpe3", 0x24), + EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpe4", 0x28), + EXYNOS_PIN_BANK_EINTG(4, 0x160, "gpf0", 0x2C), + EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpe1", 0x30), + EXYNOS_PIN_BANK_EINTG(2, 0x1a0, "gpg0", 0x34), +}; + +static const struct samsung_pin_ctrl exynos8895_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 ALIVE data */ + .pin_banks = exynos8895_pin_banks0, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks0), + .eint_gpio_init = exynos_eint_gpio_init, + .eint_wkup_init = exynos_eint_wkup_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 1 ABOX data */ + .pin_banks = exynos8895_pin_banks1, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks1), + }, { + /* pin-controller instance 2 VTS data */ + .pin_banks = exynos8895_pin_banks2, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks2), + .eint_gpio_init = exynos_eint_gpio_init, + }, { + /* pin-controller instance 3 FSYS0 data */ + .pin_banks = exynos8895_pin_banks3, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks3), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 4 FSYS1 data */ + .pin_banks = exynos8895_pin_banks4, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks4), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 5 BUSC data */ + .pin_banks = exynos8895_pin_banks5, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks5), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 6 PERIC0 data */ + .pin_banks = exynos8895_pin_banks6, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks6), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, { + /* pin-controller instance 7 PERIC1 data */ + .pin_banks = exynos8895_pin_banks7, + .nr_banks = ARRAY_SIZE(exynos8895_pin_banks7), + .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, + }, +}; + +const struct samsung_pinctrl_of_match_data exynos8895_of_data __initconst = { + .ctrl = exynos8895_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos8895_pin_ctrl), +}; + /* * Pinctrl driver data for Tesla FSD SoC. FSD SoC includes three * gpio/pin-mux/pinconfig controllers. diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index 305cb1d31de4..7b7ff7ffeb56 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -141,6 +141,16 @@ .name = id \ } +#define EXYNOS8895_PIN_BANK_EINTG(pins, reg, id, offs) \ + { \ + .type = &exynos8895_bank_type_off, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_GPIO, \ + .eint_offset = offs, \ + .name = id \ + } + #define EXYNOSV920_PIN_BANK_EINTG(pins, reg, id, con_offs, mask_offs, pend_offs) \ { \ .type = &exynos850_bank_type_off, \ diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 675efa5d86a9..bbedd980ec67 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1477,6 +1477,12 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = { .data = &exynos7885_of_data }, { .compatible = "samsung,exynos850-pinctrl", .data = &exynos850_of_data }, + { .compatible = "samsung,exynos8895-pinctrl", + .data = &exynos8895_of_data }, + { .compatible = "samsung,exynos9810-pinctrl", + .data = &exynos9810_of_data }, + { .compatible = "samsung,exynos990-pinctrl", + .data = &exynos990_of_data }, { .compatible = "samsung,exynosautov9-pinctrl", .data = &exynosautov9_of_data }, { .compatible = "samsung,exynosautov920-pinctrl", diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index a1e7377bd890..bb0689d52ea0 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -384,6 +384,9 @@ extern const struct samsung_pinctrl_of_match_data exynos5433_of_data; extern const struct samsung_pinctrl_of_match_data exynos7_of_data; extern const struct samsung_pinctrl_of_match_data exynos7885_of_data; extern const struct samsung_pinctrl_of_match_data exynos850_of_data; +extern const struct samsung_pinctrl_of_match_data exynos8895_of_data; +extern const struct samsung_pinctrl_of_match_data exynos9810_of_data; +extern const struct samsung_pinctrl_of_match_data exynos990_of_data; extern const struct samsung_pinctrl_of_match_data exynosautov9_of_data; extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data; extern const struct samsung_pinctrl_of_match_data fsd_of_data; diff --git a/drivers/pinctrl/sophgo/Kconfig b/drivers/pinctrl/sophgo/Kconfig index b14792ee46fc..c05f909a8838 100644 --- a/drivers/pinctrl/sophgo/Kconfig +++ b/drivers/pinctrl/sophgo/Kconfig @@ -43,7 +43,7 @@ config PINCTRL_SOPHGO_SG2000 pinctrl-sg2000. config PINCTRL_SOPHGO_SG2002 - tristate "Sophgo SG2000 SoC Pinctrl driver" + tristate "Sophgo SG2002 SoC Pinctrl driver" depends on ARCH_SOPHGO || COMPILE_TEST depends on OF select PINCTRL_SOPHGO_CV18XX diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig new file mode 100644 index 000000000000..168f8a5ffbb9 --- /dev/null +++ b/drivers/pinctrl/spacemit/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Sophgo SoC PINCTRL drivers +# + +config PINCTRL_SPACEMIT_K1 + tristate "SpacemiT K1 SoC Pinctrl driver" + depends on ARCH_SPACEMIT || COMPILE_TEST + depends on OF + select GENERIC_PINCTRL_GROUPS + select GENERIC_PINMUX_FUNCTIONS + select GENERIC_PINCONF + help + Say Y to select the pinctrl driver for K1 SoC. + This pin controller allows selecting the mux function for + each pin. This driver can also be built as a module called + pinctrl-k1. diff --git a/drivers/pinctrl/spacemit/Makefile b/drivers/pinctrl/spacemit/Makefile new file mode 100644 index 000000000000..fede1e80fe0f --- /dev/null +++ b/drivers/pinctrl/spacemit/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_PINCTRL_SPACEMIT_K1) += pinctrl-k1.o diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c new file mode 100644 index 000000000000..a32579d73613 --- /dev/null +++ b/drivers/pinctrl/spacemit/pinctrl-k1.c @@ -0,0 +1,1051 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Yixun Lan <dlan@gentoo.org> */ + +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <linux/module.h> + +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include "../core.h" +#include "../pinctrl-utils.h" +#include "../pinconf.h" +#include "../pinmux.h" +#include "pinctrl-k1.h" + +/* + * +---------+----------+-----------+--------+--------+----------+--------+ + * | pull | drive | schmitter | slew | edge | strong | mux | + * | up/down | strength | trigger | rate | detect | pull | mode | + * +---------+----------+-----------+--------+--------+----------+--------+ + * 3 bits 3 bits 2 bits 1 bit 3 bits 1 bit 3 bits + */ + +#define PAD_MUX GENMASK(2, 0) +#define PAD_STRONG_PULL BIT(3) +#define PAD_EDGE_RISE BIT(4) +#define PAD_EDGE_FALL BIT(5) +#define PAD_EDGE_CLEAR BIT(6) +#define PAD_SLEW_RATE GENMASK(12, 11) +#define PAD_SLEW_RATE_EN BIT(7) +#define PAD_SCHMITT GENMASK(9, 8) +#define PAD_DRIVE GENMASK(12, 10) +#define PAD_PULLDOWN BIT(13) +#define PAD_PULLUP BIT(14) +#define PAD_PULL_EN BIT(15) + +struct spacemit_pin { + u16 pin; + u16 flags; + u8 gpiofunc; +}; + +struct spacemit_pinctrl { + struct device *dev; + struct pinctrl_dev *pctl_dev; + const struct spacemit_pinctrl_data *data; + struct pinctrl_desc pdesc; + + struct mutex mutex; + raw_spinlock_t lock; + + void __iomem *regs; +}; + +struct spacemit_pinctrl_data { + const struct pinctrl_pin_desc *pins; + const struct spacemit_pin *data; + u16 npins; +}; + +struct spacemit_pin_mux_config { + const struct spacemit_pin *pin; + u32 config; +}; + +struct spacemit_pin_drv_strength { + u8 val; + u32 mA; +}; + +/* map pin id to pinctrl register offset, refer MFPR definition */ +static unsigned int spacemit_pin_to_offset(unsigned int pin) +{ + unsigned int offset = 0; + + switch (pin) { + case 0 ... 85: + offset = pin + 1; + break; + case 86 ... 92: + offset = pin + 37; + break; + case 93 ... 97: + offset = pin + 24; + break; + case 98: + offset = 93; + break; + case 99: + offset = 92; + break; + case 100: + offset = 91; + break; + case 101: + offset = 90; + break; + case 102: + offset = 95; + break; + case 103: + offset = 94; + break; + case 104 ... 110: + offset = pin + 6; + break; + case 111 ... 127: + offset = pin + 20; + break; + default: + break; + } + + return offset << 2; +} + +static inline void __iomem *spacemit_pin_to_reg(struct spacemit_pinctrl *pctrl, + unsigned int pin) +{ + return pctrl->regs + spacemit_pin_to_offset(pin); +} + +static u16 spacemit_dt_get_pin(u32 value) +{ + return value >> 16; +} + +static u16 spacemit_dt_get_pin_mux(u32 value) +{ + return value & GENMASK(15, 0); +} + +static const struct spacemit_pin *spacemit_get_pin(struct spacemit_pinctrl *pctrl, + unsigned long pin) +{ + const struct spacemit_pin *pdata = pctrl->data->data; + int i; + + for (i = 0; i < pctrl->data->npins; i++) { + if (pin == pdata[i].pin) + return &pdata[i]; + } + + return NULL; +} + +static inline enum spacemit_pin_io_type spacemit_to_pin_io_type( + const struct spacemit_pin *pin) +{ + return K1_PIN_GET_IO_TYPE(pin->flags); +} + +/* External: IO voltage via external source, can be 1.8V or 3.3V */ +static const char * const io_type_desc[] = { + "None", + "Fixed/1V8", + "Fixed/3V3", + "External", +}; + +static void spacemit_pctrl_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *seq, unsigned int pin) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct spacemit_pin *spin = spacemit_get_pin(pctrl, pin); + enum spacemit_pin_io_type type = spacemit_to_pin_io_type(spin); + void __iomem *reg; + u32 value; + + seq_printf(seq, "offset: 0x%04x ", spacemit_pin_to_offset(pin)); + seq_printf(seq, "type: %s ", io_type_desc[type]); + + reg = spacemit_pin_to_reg(pctrl, pin); + value = readl(reg); + seq_printf(seq, "mux: %ld reg: 0x%04x", (value & PAD_MUX), value); +} + +/* use IO high level output current as the table */ +static struct spacemit_pin_drv_strength spacemit_ds_1v8_tbl[4] = { + { 0, 11 }, + { 2, 21 }, + { 4, 32 }, + { 6, 42 }, +}; + +static struct spacemit_pin_drv_strength spacemit_ds_3v3_tbl[8] = { + { 0, 7 }, + { 2, 10 }, + { 4, 13 }, + { 6, 16 }, + { 1, 19 }, + { 3, 23 }, + { 5, 26 }, + { 7, 29 }, +}; + +static inline u8 spacemit_get_ds_value(struct spacemit_pin_drv_strength *tbl, + u32 num, u32 mA) +{ + int i; + + for (i = 0; i < num; i++) + if (mA <= tbl[i].mA) + return tbl[i].val; + + return tbl[num - 1].val; +} + +static inline u32 spacemit_get_ds_mA(struct spacemit_pin_drv_strength *tbl, + u32 num, u32 val) +{ + int i; + + for (i = 0; i < num; i++) + if (val == tbl[i].val) + return tbl[i].mA; + + return 0; +} + +static inline u8 spacemit_get_driver_strength(enum spacemit_pin_io_type type, + u32 mA) +{ + switch (type) { + case IO_TYPE_1V8: + return spacemit_get_ds_value(spacemit_ds_1v8_tbl, + ARRAY_SIZE(spacemit_ds_1v8_tbl), + mA); + case IO_TYPE_3V3: + return spacemit_get_ds_value(spacemit_ds_3v3_tbl, + ARRAY_SIZE(spacemit_ds_3v3_tbl), + mA); + default: + return 0; + } +} + +static inline u32 spacemit_get_drive_strength_mA(enum spacemit_pin_io_type type, + u32 value) +{ + switch (type) { + case IO_TYPE_1V8: + return spacemit_get_ds_mA(spacemit_ds_1v8_tbl, + ARRAY_SIZE(spacemit_ds_1v8_tbl), + value & 0x6); + case IO_TYPE_3V3: + return spacemit_get_ds_mA(spacemit_ds_3v3_tbl, + ARRAY_SIZE(spacemit_ds_3v3_tbl), + value); + default: + return 0; + } +} + +static int spacemit_pctrl_check_power(struct pinctrl_dev *pctldev, + struct device_node *dn, + struct spacemit_pin_mux_config *pinmuxs, + int num_pins, const char *grpname) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = pctrl->dev; + enum spacemit_pin_io_type type; + u32 power = 0, i; + + of_property_read_u32(dn, "power-source", &power); + + for (i = 0; i < num_pins; i++) { + type = spacemit_to_pin_io_type(pinmuxs[i].pin); + + if (type != IO_TYPE_EXTERNAL) + continue; + + switch (power) { + case PIN_POWER_STATE_1V8: + case PIN_POWER_STATE_3V3: + break; + default: + dev_err(dev, "group %s has unsupported power\n", + grpname); + return -ENOTSUPP; + } + } + + return 0; +} + +static int spacemit_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **maps, + unsigned int *num_maps) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = pctrl->dev; + struct device_node *child; + struct pinctrl_map *map; + const char **grpnames; + const char *grpname; + int ngroups = 0; + int nmaps = 0; + int ret; + + for_each_available_child_of_node(np, child) + ngroups += 1; + + grpnames = devm_kcalloc(dev, ngroups, sizeof(*grpnames), GFP_KERNEL); + if (!grpnames) + return -ENOMEM; + + map = kcalloc(ngroups * 2, sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + ngroups = 0; + guard(mutex)(&pctrl->mutex); + for_each_available_child_of_node_scoped(np, child) { + struct spacemit_pin_mux_config *pinmuxs; + unsigned int config, *pins; + int i, npins; + + npins = of_property_count_u32_elems(child, "pinmux"); + + if (npins < 1) { + dev_err(dev, "invalid pinctrl group %pOFn.%pOFn\n", + np, child); + return -EINVAL; + } + + grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", + np, child); + if (!grpname) + return -ENOMEM; + + grpnames[ngroups++] = grpname; + + pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + + pinmuxs = devm_kcalloc(dev, npins, sizeof(*pinmuxs), GFP_KERNEL); + if (!pinmuxs) + return -ENOMEM; + + for (i = 0; i < npins; i++) { + ret = of_property_read_u32_index(child, "pinmux", + i, &config); + + if (ret) + return -EINVAL; + + pins[i] = spacemit_dt_get_pin(config); + pinmuxs[i].config = config; + pinmuxs[i].pin = spacemit_get_pin(pctrl, pins[i]); + + if (!pinmuxs[i].pin) + return dev_err_probe(dev, -ENODEV, "failed to get pin %d\n", pins[i]); + } + + ret = spacemit_pctrl_check_power(pctldev, child, pinmuxs, + npins, grpname); + if (ret < 0) + return ret; + + map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP; + map[nmaps].data.mux.function = np->name; + map[nmaps].data.mux.group = grpname; + nmaps += 1; + + ret = pinctrl_generic_add_group(pctldev, grpname, + pins, npins, pinmuxs); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to add group %s: %d\n", grpname, ret); + + ret = pinconf_generic_parse_dt_config(child, pctldev, + &map[nmaps].data.configs.configs, + &map[nmaps].data.configs.num_configs); + if (ret) + return dev_err_probe(dev, ret, "failed to parse pin config of group %s\n", + grpname); + + if (map[nmaps].data.configs.num_configs == 0) + continue; + + map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP; + map[nmaps].data.configs.group_or_pin = grpname; + nmaps += 1; + } + + ret = pinmux_generic_add_function(pctldev, np->name, + grpnames, ngroups, NULL); + if (ret < 0) { + pinctrl_utils_free_map(pctldev, map, nmaps); + return dev_err_probe(dev, ret, "error adding function %s\n", np->name); + } + + *maps = map; + *num_maps = nmaps; + + return 0; +} + +static const struct pinctrl_ops spacemit_pctrl_ops = { + .get_groups_count = pinctrl_generic_get_group_count, + .get_group_name = pinctrl_generic_get_group_name, + .get_group_pins = pinctrl_generic_get_group_pins, + .pin_dbg_show = spacemit_pctrl_dbg_show, + .dt_node_to_map = spacemit_pctrl_dt_node_to_map, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int spacemit_pmx_set_mux(struct pinctrl_dev *pctldev, + unsigned int fsel, unsigned int gsel) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct group_desc *group; + const struct spacemit_pin_mux_config *configs; + unsigned int i, mux; + void __iomem *reg; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + configs = group->data; + + for (i = 0; i < group->grp.npins; i++) { + const struct spacemit_pin *spin = configs[i].pin; + u32 value = configs[i].config; + + reg = spacemit_pin_to_reg(pctrl, spin->pin); + mux = spacemit_dt_get_pin_mux(value); + + guard(raw_spinlock_irqsave)(&pctrl->lock); + value = readl_relaxed(reg) & ~PAD_MUX; + writel_relaxed(mux | value, reg); + } + + return 0; +} + +static int spacemit_request_gpio(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct spacemit_pin *spin = spacemit_get_pin(pctrl, pin); + void __iomem *reg; + + reg = spacemit_pin_to_reg(pctrl, pin); + guard(raw_spinlock_irqsave)(&pctrl->lock); + writel_relaxed(spin->gpiofunc, reg); + + return 0; +} + +static const struct pinmux_ops spacemit_pmx_ops = { + .get_functions_count = pinmux_generic_get_function_count, + .get_function_name = pinmux_generic_get_function_name, + .get_function_groups = pinmux_generic_get_function_groups, + .set_mux = spacemit_pmx_set_mux, + .gpio_request_enable = spacemit_request_gpio, + .strict = true, +}; + +static int spacemit_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + int param = pinconf_to_config_param(*config); + u32 value, arg = 0; + + if (!pin) + return -EINVAL; + + value = readl(spacemit_pin_to_reg(pctrl, pin)); + + switch (param) { + case PIN_CONFIG_SLEW_RATE: + if (FIELD_GET(PAD_SLEW_RATE_EN, value)) + arg = FIELD_GET(PAD_SLEW_RATE, value) + 2; + else + arg = 0; + break; + default: + return -EINVAL; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +#define ENABLE_DRV_STRENGTH BIT(1) +#define ENABLE_SLEW_RATE BIT(2) +static int spacemit_pinconf_generate_config(const struct spacemit_pin *spin, + unsigned long *configs, + unsigned int num_configs, + u32 *value) +{ + enum spacemit_pin_io_type type; + int i, param; + u32 v = 0, voltage = 0, arg, val; + u32 flag = 0, drv_strength, slew_rate; + + if (!spin) + return -EINVAL; + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + v &= ~(PAD_PULL_EN | PAD_PULLDOWN | PAD_PULLUP); + v &= ~PAD_STRONG_PULL; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + v &= ~(PAD_PULLUP | PAD_STRONG_PULL); + v |= (PAD_PULL_EN | PAD_PULLDOWN); + break; + case PIN_CONFIG_BIAS_PULL_UP: + v &= ~PAD_PULLDOWN; + v |= (PAD_PULL_EN | PAD_PULLUP); + + if (arg == 1) + v |= PAD_STRONG_PULL; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + flag |= ENABLE_DRV_STRENGTH; + drv_strength = arg; + break; + case PIN_CONFIG_INPUT_SCHMITT: + v &= ~PAD_SCHMITT; + v |= FIELD_PREP(PAD_SCHMITT, arg); + break; + case PIN_CONFIG_POWER_SOURCE: + voltage = arg; + break; + case PIN_CONFIG_SLEW_RATE: + if (arg) { + flag |= ENABLE_SLEW_RATE; + v |= PAD_SLEW_RATE_EN; + slew_rate = arg; + } else { + v &= ~PAD_SLEW_RATE_EN; + } + break; + default: + return -EINVAL; + } + } + + if (flag & ENABLE_DRV_STRENGTH) { + type = spacemit_to_pin_io_type(spin); + + /* fix external io type */ + if (type == IO_TYPE_EXTERNAL) { + switch (voltage) { + case 1800: + type = IO_TYPE_1V8; + break; + case 3300: + type = IO_TYPE_3V3; + break; + default: + return -EINVAL; + } + } + + val = spacemit_get_driver_strength(type, drv_strength); + + v &= ~PAD_DRIVE; + v |= FIELD_PREP(PAD_DRIVE, val); + } + + if (flag & ENABLE_SLEW_RATE) { + /* check, driver strength & slew rate */ + if (flag & ENABLE_DRV_STRENGTH) { + val = FIELD_GET(PAD_SLEW_RATE, v) + 2; + if (slew_rate > 1 && slew_rate != val) { + pr_err("slew rate conflict with drive strength\n"); + return -EINVAL; + } + } else { + v &= ~PAD_SLEW_RATE; + slew_rate = slew_rate > 1 ? (slew_rate - 2) : 0; + v |= FIELD_PREP(PAD_SLEW_RATE, slew_rate); + } + } + + *value = v; + + return 0; +} + +static int spacemit_pin_set_config(struct spacemit_pinctrl *pctrl, + unsigned int pin, u32 value) +{ + const struct spacemit_pin *spin = spacemit_get_pin(pctrl, pin); + void __iomem *reg; + unsigned int mux; + + if (!pin) + return -EINVAL; + + reg = spacemit_pin_to_reg(pctrl, spin->pin); + + guard(raw_spinlock_irqsave)(&pctrl->lock); + mux = readl_relaxed(reg) & PAD_MUX; + writel_relaxed(mux | value, reg); + + return 0; +} + +static int spacemit_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct spacemit_pin *spin = spacemit_get_pin(pctrl, pin); + u32 value; + + if (spacemit_pinconf_generate_config(spin, configs, num_configs, &value)) + return -EINVAL; + + return spacemit_pin_set_config(pctrl, pin, value); +} + +static int spacemit_pinconf_group_set(struct pinctrl_dev *pctldev, + unsigned int gsel, + unsigned long *configs, + unsigned int num_configs) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct spacemit_pin *spin; + const struct group_desc *group; + u32 value; + int i; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + spin = spacemit_get_pin(pctrl, group->grp.pins[0]); + if (spacemit_pinconf_generate_config(spin, configs, num_configs, &value)) + return -EINVAL; + + for (i = 0; i < group->grp.npins; i++) + spacemit_pin_set_config(pctrl, group->grp.pins[i], value); + + return 0; +} + +static void spacemit_pinconf_dbg_pull(struct seq_file *seq, unsigned int value) +{ + u32 normal, strong; + + if (!FIELD_GET(PAD_PULL_EN, value)) { + seq_puts(seq, ", bias pull disabled"); + return; + } + + if (FIELD_GET(PAD_PULLDOWN, value)) + seq_puts(seq, ", bias pull down"); + + normal = FIELD_GET(PAD_PULLUP, value); + strong = FIELD_GET(PAD_STRONG_PULL, value); + + if (normal && strong) + seq_puts(seq, ", bias strong pull up"); + else if (normal) + seq_puts(seq, ", bias normal pull up"); +} + +static void spacemit_pinconf_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *seq, unsigned int pin) +{ + struct spacemit_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct spacemit_pin *spin = spacemit_get_pin(pctrl, pin); + enum spacemit_pin_io_type type = spacemit_to_pin_io_type(spin); + void __iomem *reg = spacemit_pin_to_reg(pctrl, pin); + u32 value, tmp, mA; + + value = readl(reg); + spacemit_pinconf_dbg_pull(seq, value); + + seq_printf(seq, ", io type (%s)", io_type_desc[type]); + + tmp = FIELD_GET(PAD_DRIVE, value); + if (type == IO_TYPE_1V8 || type == IO_TYPE_3V3) { + mA = spacemit_get_drive_strength_mA(type, tmp); + seq_printf(seq, ", drive strength (%d mA)", mA); + } + + /* drive strength depend on power source, so show all values */ + if (type == IO_TYPE_EXTERNAL) + seq_printf(seq, ", drive strength (%d or %d mA)", + spacemit_get_drive_strength_mA(IO_TYPE_1V8, tmp), + spacemit_get_drive_strength_mA(IO_TYPE_3V3, tmp)); + + seq_printf(seq, ", register (0x%04x)\n", value); +} + +static const struct pinconf_ops spacemit_pinconf_ops = { + .pin_config_get = spacemit_pinconf_get, + .pin_config_set = spacemit_pinconf_set, + .pin_config_group_set = spacemit_pinconf_group_set, + .pin_config_dbg_show = spacemit_pinconf_dbg_show, + .is_generic = true, +}; + +static int spacemit_pinctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spacemit_pinctrl *pctrl; + const struct spacemit_pinctrl_data *pctrl_data; + int ret; + + pctrl_data = device_get_match_data(dev); + if (!pctrl_data) + return -ENODEV; + + if (pctrl_data->npins == 0) + return dev_err_probe(dev, -EINVAL, "invalid pin data\n"); + + pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + + pctrl->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pctrl->regs)) + return PTR_ERR(pctrl->regs); + + pctrl->pdesc.name = dev_name(dev); + pctrl->pdesc.pins = pctrl_data->pins; + pctrl->pdesc.npins = pctrl_data->npins; + pctrl->pdesc.pctlops = &spacemit_pctrl_ops; + pctrl->pdesc.pmxops = &spacemit_pmx_ops; + pctrl->pdesc.confops = &spacemit_pinconf_ops; + pctrl->pdesc.owner = THIS_MODULE; + + pctrl->data = pctrl_data; + pctrl->dev = dev; + raw_spin_lock_init(&pctrl->lock); + mutex_init(&pctrl->mutex); + + platform_set_drvdata(pdev, pctrl); + + ret = devm_pinctrl_register_and_init(dev, &pctrl->pdesc, + pctrl, &pctrl->pctl_dev); + if (ret) + return dev_err_probe(dev, ret, + "fail to register pinctrl driver\n"); + + return pinctrl_enable(pctrl->pctl_dev); +} + +static const struct pinctrl_pin_desc k1_pin_desc[] = { + PINCTRL_PIN(0, "GPIO_00"), + PINCTRL_PIN(1, "GPIO_01"), + PINCTRL_PIN(2, "GPIO_02"), + PINCTRL_PIN(3, "GPIO_03"), + PINCTRL_PIN(4, "GPIO_04"), + PINCTRL_PIN(5, "GPIO_05"), + PINCTRL_PIN(6, "GPIO_06"), + PINCTRL_PIN(7, "GPIO_07"), + PINCTRL_PIN(8, "GPIO_08"), + PINCTRL_PIN(9, "GPIO_09"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70/PRI_DTI"), + PINCTRL_PIN(71, "GPIO_71/PRI_TMS"), + PINCTRL_PIN(72, "GPIO_72/PRI_TCK"), + PINCTRL_PIN(73, "GPIO_73/PRI_TDO"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93/PWR_SCL"), + PINCTRL_PIN(94, "GPIO_94/PWR_SDA"), + PINCTRL_PIN(95, "GPIO_95/VCX0_EN"), + PINCTRL_PIN(96, "GPIO_96/DVL0"), + PINCTRL_PIN(97, "GPIO_97/DVL1"), + PINCTRL_PIN(98, "GPIO_98/QSPI_DAT3"), + PINCTRL_PIN(99, "GPIO_99/QSPI_DAT2"), + PINCTRL_PIN(100, "GPIO_100/QSPI_DAT1"), + PINCTRL_PIN(101, "GPIO_101/QSPI_DAT0"), + PINCTRL_PIN(102, "GPIO_102/QSPI_CLK"), + PINCTRL_PIN(103, "GPIO_103/QSPI_CS1"), + PINCTRL_PIN(104, "GPIO_104/MMC1_DAT3"), + PINCTRL_PIN(105, "GPIO_105/MMC1_DAT2"), + PINCTRL_PIN(106, "GPIO_106/MMC1_DAT1"), + PINCTRL_PIN(107, "GPIO_107/MMC1_DAT0"), + PINCTRL_PIN(108, "GPIO_108/MMC1_CMD"), + PINCTRL_PIN(109, "GPIO_109/MMC1_CLK"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), +}; + +static const struct spacemit_pin k1_pin_data[ARRAY_SIZE(k1_pin_desc)] = { + K1_FUNC_PIN(0, 0, IO_TYPE_1V8), + K1_FUNC_PIN(1, 0, IO_TYPE_1V8), + K1_FUNC_PIN(2, 0, IO_TYPE_1V8), + K1_FUNC_PIN(3, 0, IO_TYPE_1V8), + K1_FUNC_PIN(4, 0, IO_TYPE_1V8), + K1_FUNC_PIN(5, 0, IO_TYPE_1V8), + K1_FUNC_PIN(6, 0, IO_TYPE_1V8), + K1_FUNC_PIN(7, 0, IO_TYPE_1V8), + K1_FUNC_PIN(8, 0, IO_TYPE_1V8), + K1_FUNC_PIN(9, 0, IO_TYPE_1V8), + K1_FUNC_PIN(10, 0, IO_TYPE_1V8), + K1_FUNC_PIN(11, 0, IO_TYPE_1V8), + K1_FUNC_PIN(12, 0, IO_TYPE_1V8), + K1_FUNC_PIN(13, 0, IO_TYPE_1V8), + K1_FUNC_PIN(14, 0, IO_TYPE_1V8), + K1_FUNC_PIN(15, 0, IO_TYPE_1V8), + K1_FUNC_PIN(16, 0, IO_TYPE_1V8), + K1_FUNC_PIN(17, 0, IO_TYPE_1V8), + K1_FUNC_PIN(18, 0, IO_TYPE_1V8), + K1_FUNC_PIN(19, 0, IO_TYPE_1V8), + K1_FUNC_PIN(20, 0, IO_TYPE_1V8), + K1_FUNC_PIN(21, 0, IO_TYPE_1V8), + K1_FUNC_PIN(22, 0, IO_TYPE_1V8), + K1_FUNC_PIN(23, 0, IO_TYPE_1V8), + K1_FUNC_PIN(24, 0, IO_TYPE_1V8), + K1_FUNC_PIN(25, 0, IO_TYPE_1V8), + K1_FUNC_PIN(26, 0, IO_TYPE_1V8), + K1_FUNC_PIN(27, 0, IO_TYPE_1V8), + K1_FUNC_PIN(28, 0, IO_TYPE_1V8), + K1_FUNC_PIN(29, 0, IO_TYPE_1V8), + K1_FUNC_PIN(30, 0, IO_TYPE_1V8), + K1_FUNC_PIN(31, 0, IO_TYPE_1V8), + K1_FUNC_PIN(32, 0, IO_TYPE_1V8), + K1_FUNC_PIN(33, 0, IO_TYPE_1V8), + K1_FUNC_PIN(34, 0, IO_TYPE_1V8), + K1_FUNC_PIN(35, 0, IO_TYPE_1V8), + K1_FUNC_PIN(36, 0, IO_TYPE_1V8), + K1_FUNC_PIN(37, 0, IO_TYPE_1V8), + K1_FUNC_PIN(38, 0, IO_TYPE_1V8), + K1_FUNC_PIN(39, 0, IO_TYPE_1V8), + K1_FUNC_PIN(40, 0, IO_TYPE_1V8), + K1_FUNC_PIN(41, 0, IO_TYPE_1V8), + K1_FUNC_PIN(42, 0, IO_TYPE_1V8), + K1_FUNC_PIN(43, 0, IO_TYPE_1V8), + K1_FUNC_PIN(44, 0, IO_TYPE_1V8), + K1_FUNC_PIN(45, 0, IO_TYPE_1V8), + K1_FUNC_PIN(46, 0, IO_TYPE_1V8), + K1_FUNC_PIN(47, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(48, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(49, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(50, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(51, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(52, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(53, 0, IO_TYPE_1V8), + K1_FUNC_PIN(54, 0, IO_TYPE_1V8), + K1_FUNC_PIN(55, 0, IO_TYPE_1V8), + K1_FUNC_PIN(56, 0, IO_TYPE_1V8), + K1_FUNC_PIN(57, 0, IO_TYPE_1V8), + K1_FUNC_PIN(58, 0, IO_TYPE_1V8), + K1_FUNC_PIN(59, 0, IO_TYPE_1V8), + K1_FUNC_PIN(60, 0, IO_TYPE_1V8), + K1_FUNC_PIN(61, 0, IO_TYPE_1V8), + K1_FUNC_PIN(62, 0, IO_TYPE_1V8), + K1_FUNC_PIN(63, 0, IO_TYPE_1V8), + K1_FUNC_PIN(64, 0, IO_TYPE_1V8), + K1_FUNC_PIN(65, 0, IO_TYPE_1V8), + K1_FUNC_PIN(66, 0, IO_TYPE_1V8), + K1_FUNC_PIN(67, 0, IO_TYPE_1V8), + K1_FUNC_PIN(68, 0, IO_TYPE_1V8), + K1_FUNC_PIN(69, 0, IO_TYPE_1V8), + K1_FUNC_PIN(70, 1, IO_TYPE_1V8), + K1_FUNC_PIN(71, 1, IO_TYPE_1V8), + K1_FUNC_PIN(72, 1, IO_TYPE_1V8), + K1_FUNC_PIN(73, 1, IO_TYPE_1V8), + K1_FUNC_PIN(74, 0, IO_TYPE_1V8), + K1_FUNC_PIN(75, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(76, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(77, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(78, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(79, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(80, 0, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(81, 0, IO_TYPE_1V8), + K1_FUNC_PIN(82, 0, IO_TYPE_1V8), + K1_FUNC_PIN(83, 0, IO_TYPE_1V8), + K1_FUNC_PIN(84, 0, IO_TYPE_1V8), + K1_FUNC_PIN(85, 0, IO_TYPE_1V8), + K1_FUNC_PIN(86, 0, IO_TYPE_1V8), + K1_FUNC_PIN(87, 0, IO_TYPE_1V8), + K1_FUNC_PIN(88, 0, IO_TYPE_1V8), + K1_FUNC_PIN(89, 0, IO_TYPE_1V8), + K1_FUNC_PIN(90, 0, IO_TYPE_1V8), + K1_FUNC_PIN(91, 0, IO_TYPE_1V8), + K1_FUNC_PIN(92, 0, IO_TYPE_1V8), + K1_FUNC_PIN(93, 1, IO_TYPE_1V8), + K1_FUNC_PIN(94, 1, IO_TYPE_1V8), + K1_FUNC_PIN(95, 1, IO_TYPE_1V8), + K1_FUNC_PIN(96, 1, IO_TYPE_1V8), + K1_FUNC_PIN(97, 1, IO_TYPE_1V8), + K1_FUNC_PIN(98, 1, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(99, 1, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(100, 1, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(101, 1, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(102, 1, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(103, 1, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(104, 4, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(105, 4, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(106, 4, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(107, 4, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(108, 4, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(109, 4, IO_TYPE_EXTERNAL), + K1_FUNC_PIN(110, 0, IO_TYPE_1V8), + K1_FUNC_PIN(111, 0, IO_TYPE_1V8), + K1_FUNC_PIN(112, 0, IO_TYPE_1V8), + K1_FUNC_PIN(113, 0, IO_TYPE_1V8), + K1_FUNC_PIN(114, 0, IO_TYPE_1V8), + K1_FUNC_PIN(115, 0, IO_TYPE_1V8), + K1_FUNC_PIN(116, 0, IO_TYPE_1V8), + K1_FUNC_PIN(117, 0, IO_TYPE_1V8), + K1_FUNC_PIN(118, 0, IO_TYPE_1V8), + K1_FUNC_PIN(119, 0, IO_TYPE_1V8), + K1_FUNC_PIN(120, 0, IO_TYPE_1V8), + K1_FUNC_PIN(121, 0, IO_TYPE_1V8), + K1_FUNC_PIN(122, 0, IO_TYPE_1V8), + K1_FUNC_PIN(123, 0, IO_TYPE_1V8), + K1_FUNC_PIN(124, 0, IO_TYPE_1V8), + K1_FUNC_PIN(125, 0, IO_TYPE_1V8), + K1_FUNC_PIN(126, 0, IO_TYPE_1V8), + K1_FUNC_PIN(127, 0, IO_TYPE_1V8), +}; + +static const struct spacemit_pinctrl_data k1_pinctrl_data = { + .pins = k1_pin_desc, + .data = k1_pin_data, + .npins = ARRAY_SIZE(k1_pin_desc), +}; + +static const struct of_device_id k1_pinctrl_ids[] = { + { .compatible = "spacemit,k1-pinctrl", .data = &k1_pinctrl_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, k1_pinctrl_ids); + +static struct platform_driver k1_pinctrl_driver = { + .probe = spacemit_pinctrl_probe, + .driver = { + .name = "k1-pinctrl", + .suppress_bind_attrs = true, + .of_match_table = k1_pinctrl_ids, + }, +}; +module_platform_driver(k1_pinctrl_driver); + +MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>"); +MODULE_DESCRIPTION("Pinctrl driver for the SpacemiT K1 SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.h b/drivers/pinctrl/spacemit/pinctrl-k1.h new file mode 100644 index 000000000000..16143fea469e --- /dev/null +++ b/drivers/pinctrl/spacemit/pinctrl-k1.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024 Yixun Lan <dlan@gentoo.org> */ + +#ifndef _PINCTRL_SPACEMIT_K1_H +#define _PINCTRL_SPACEMIT_K1_H + +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinconf.h> + +enum spacemit_pin_io_type { + IO_TYPE_NONE = 0, + IO_TYPE_1V8, + IO_TYPE_3V3, + IO_TYPE_EXTERNAL, +}; + +#define PIN_POWER_STATE_1V8 1800 +#define PIN_POWER_STATE_3V3 3300 + +#define K1_PIN_IO_TYPE GENMASK(2, 1) + +#define K1_PIN_CAP_IO_TYPE(type) \ + FIELD_PREP_CONST(K1_PIN_IO_TYPE, type) +#define K1_PIN_GET_IO_TYPE(val) \ + FIELD_GET(K1_PIN_IO_TYPE, val) + +#define K1_FUNC_PIN(_id, _gpiofunc, _io) \ + { \ + .pin = (_id), \ + .gpiofunc = (_gpiofunc), \ + .flags = (K1_PIN_CAP_IO_TYPE(_io)), \ + } + +#endif /* _PINCTRL_SPACEMIT_K1_H */ diff --git a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c index 9c3c39dc6550..d14f382f2392 100644 --- a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c +++ b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c @@ -943,7 +943,7 @@ static struct platform_driver sprd_pinctrl_driver = { .of_match_table = sprd_pinctrl_of_match, }, .probe = sprd_pinctrl_probe, - .remove_new = sprd_pinctrl_remove, + .remove = sprd_pinctrl_remove, .shutdown = sprd_pinctrl_shutdown, }; module_platform_driver(sprd_pinctrl_driver); diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c index 1aabd15e2714..99437b2ccc25 100644 --- a/drivers/platform/x86/intel/bxtwc_tmu.c +++ b/drivers/platform/x86/intel/bxtwc_tmu.c @@ -48,9 +48,8 @@ static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data) static int bxt_wcove_tmu_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); - struct regmap_irq_chip_data *regmap_irq_chip; struct wcove_tmu *wctmu; - int ret, virq, irq; + int ret; wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL); if (!wctmu) @@ -59,27 +58,18 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev) wctmu->dev = &pdev->dev; wctmu->regmap = pmic->regmap; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; + wctmu->irq = platform_get_irq(pdev, 0); + if (wctmu->irq < 0) + return wctmu->irq; - regmap_irq_chip = pmic->irq_chip_data_tmu; - virq = regmap_irq_get_virq(regmap_irq_chip, irq); - if (virq < 0) { - dev_err(&pdev->dev, - "failed to get virtual interrupt=%d\n", irq); - return virq; - } - - ret = devm_request_threaded_irq(&pdev->dev, virq, + ret = devm_request_threaded_irq(&pdev->dev, wctmu->irq, NULL, bxt_wcove_tmu_irq_handler, IRQF_ONESHOT, "bxt_wcove_tmu", wctmu); if (ret) { dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n", - ret, virq); + ret, wctmu->irq); return ret; } - wctmu->irq = virq; /* Unmask TMU second level Wake & System alarm */ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG, diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c index 725948044da4..735df818f76b 100644 --- a/drivers/platform/x86/x86-android-tablets/other.c +++ b/drivers/platform/x86/x86-android-tablets/other.c @@ -41,7 +41,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst = { /* Novatek NVT-ts touchscreen */ .board_info = { - .type = "NVT-ts", + .type = "nt11205-ts", .addr = 0x34, .dev_name = "NVT-ts", }, diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 389d5a193e5d..f5fc33a8bf44 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -79,6 +79,7 @@ config POWER_RESET_EP93XX bool "Cirrus EP93XX reset driver" if COMPILE_TEST depends on MFD_SYSCON default ARCH_EP93XX + select AUXILIARY_BUS help This driver provides restart support for Cirrus EP93XX SoC. diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c index 93eece027865..7bc779055cf3 100644 --- a/drivers/power/reset/at91-poweroff.c +++ b/drivers/power/reset/at91-poweroff.c @@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, at91_poweroff_of_match); static struct platform_driver at91_poweroff_driver = { .probe = at91_poweroff_probe, - .remove_new = at91_poweroff_remove, + .remove = at91_poweroff_remove, .driver = { .name = "at91-poweroff", .of_match_table = at91_poweroff_of_match, diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 16512654295f..036b18a1f90f 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -427,7 +427,7 @@ static void at91_reset_remove(struct platform_device *pdev) static struct platform_driver at91_reset_driver = { .probe = at91_reset_probe, - .remove_new = at91_reset_remove, + .remove = at91_reset_remove, .driver = { .name = "at91-reset", .of_match_table = at91_reset_of_match, diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index 959ce0dbe91d..edb0df86aff4 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -441,7 +441,7 @@ static void at91_shdwc_remove(struct platform_device *pdev) static struct platform_driver at91_shdwc_driver = { .probe = at91_shdwc_probe, - .remove_new = at91_shdwc_remove, + .remove = at91_shdwc_remove, .driver = { .name = "at91-shdwc", .of_match_table = at91_shdwc_of_match, diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c index dbc4ff61cd74..cfaa54ced0d0 100644 --- a/drivers/power/reset/keystone-reset.c +++ b/drivers/power/reset/keystone-reset.c @@ -16,7 +16,6 @@ #include <linux/mfd/syscon.h> #include <linux/of.h> -#define RSTYPE_RG 0x0 #define RSCTRL_RG 0x4 #define RSCFG_RG 0x8 #define RSISO_RG 0xc @@ -28,7 +27,6 @@ #define RSMUX_OMODE_MASK 0xe #define RSMUX_OMODE_RESET_ON 0xa #define RSMUX_OMODE_RESET_OFF 0x0 -#define RSMUX_LOCK_MASK 0x1 #define RSMUX_LOCK_SET 0x1 #define RSCFG_RSTYPE_SOFT 0x300f diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index fa25fbd53934..1a6fc8d38e20 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -305,7 +305,7 @@ MODULE_DEVICE_TABLE(of, of_ltc2952_poweroff_match); static struct platform_driver ltc2952_poweroff_driver = { .probe = ltc2952_poweroff_probe, - .remove_new = ltc2952_poweroff_remove, + .remove = ltc2952_poweroff_remove, .driver = { .name = "ltc2952-poweroff", .of_match_table = of_ltc2952_poweroff_match, diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c index e0f2ff6b147c..f7d8fc935d51 100644 --- a/drivers/power/reset/qnap-poweroff.c +++ b/drivers/power/reset/qnap-poweroff.c @@ -118,7 +118,7 @@ static void qnap_power_off_remove(struct platform_device *pdev) static struct platform_driver qnap_power_off_driver = { .probe = qnap_power_off_probe, - .remove_new = qnap_power_off_remove, + .remove = qnap_power_off_remove, .driver = { .name = "qnap_power_off", .of_match_table = of_match_ptr(qnap_power_off_of_match_table), diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c index 4d622c19bc48..d623d77e657e 100644 --- a/drivers/power/reset/syscon-reboot.c +++ b/drivers/power/reset/syscon-reboot.c @@ -61,7 +61,8 @@ static int syscon_reboot_probe(struct platform_device *pdev) priority = 192; if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset)) - return -EINVAL; + if (of_property_read_u32(pdev->dev.of_node, "reg", &ctx->offset)) + return -EINVAL; value_err = of_property_read_u32(pdev->dev.of_node, "value", &ctx->value); mask_err = of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask); diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 34619c4d4ece..b7938fbb24a5 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -422,7 +422,7 @@ static irqreturn_t pm860x_batt_handler(int irq, void *data) info->temp_type = PM860X_TEMP_TINT; } mutex_unlock(&info->lock); - /* clear ccnt since battery is attached or dettached */ + /* clear ccnt since battery is attached or detached */ clear_ccnt(info, &ccnt_data); return IRQ_HANDLED; } @@ -566,7 +566,7 @@ static int measure_temp(struct pm860x_battery_info *info, int *data) ret = measure_12bit_voltage(info, PM8607_GPADC1_MEAS1, data); if (ret) return ret; - /* meausered Vtbat(mV) / Ibias_current(11uA)*/ + /* measured Vtbat(mV) / Ibias_current(11uA)*/ *data = (*data * 1000) / GPBIAS2_GPADC1_UA; if (*data > TBAT_NEG_25D) { diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index bcfa63fb9f1e..9f2eef6787f7 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -493,6 +493,16 @@ config CHARGER_TWL4030 help Say Y here to enable support for TWL4030 Battery Charge Interface. +config CHARGER_TWL6030 + tristate "OMAP TWL6030 BCI charger driver" + depends on IIO && TWL4030_CORE + help + Say Y here to enable support for TWL6030/6032 Battery Charge + Interface. + + This driver can be build as a module. If so, the module will be + called twl6030_charger. + config CHARGER_LP8727 tristate "TI/National Semiconductor LP8727 charger driver" depends on I2C diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index 8dcb41545317..59c4a9f40d28 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_CHARGER_CPCAP) += cpcap-charger.o obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o +obj-$(CONFIG_CHARGER_TWL6030) += twl6030_charger.o obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c index 3e6ea22372b2..19ed52852804 100644 --- a/drivers/power/supply/ab8500_bmdata.c +++ b/drivers/power/supply/ab8500_bmdata.c @@ -16,7 +16,7 @@ /* Default: temperature hysteresis */ #define AB8500_TEMP_HYSTERESIS 3 -static struct power_supply_battery_ocv_table ocv_cap_tbl[] = { +static const struct power_supply_battery_ocv_table ocv_cap_tbl[] = { { .ocv = 4186000, .capacity = 100}, { .ocv = 4163000, .capacity = 99}, { .ocv = 4114000, .capacity = 95}, @@ -48,7 +48,7 @@ static struct power_supply_battery_ocv_table ocv_cap_tbl[] = { * temperature values to work. Factory resistance is 300 mOhm and the * resistance values to the right are percentages of 300 mOhm. */ -static struct power_supply_resistance_temp_table temp_to_batres_tbl_thermistor[] = { +static const struct power_supply_resistance_temp_table temp_to_batres_tbl_thermistor[] = { { .temp = 40, .resistance = 40 /* 120 mOhm */ }, { .temp = 30, .resistance = 45 /* 135 mOhm */ }, { .temp = 20, .resistance = 55 /* 165 mOhm */ }, diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 56f136b2d071..37039e28fc4b 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -283,7 +283,7 @@ static void ab8500_btemp_periodic_work(struct work_struct *work) dev_warn(di->dev, "failed to identify the battery\n"); } - /* Failover if a reading is erroneous, use last meausurement */ + /* Failover if a reading is erroneous, use last measurement */ ret = thermal_zone_get_temp(di->tz, &bat_temp); if (ret) { dev_err(di->dev, "error reading temperature\n"); @@ -818,7 +818,7 @@ MODULE_DEVICE_TABLE(of, ab8500_btemp_match); struct platform_driver ab8500_btemp_driver = { .probe = ab8500_btemp_probe, - .remove_new = ab8500_btemp_remove, + .remove = ab8500_btemp_remove, .driver = { .name = "ab8500-btemp", .of_match_table = ab8500_btemp_match, diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c index 854491ad3ecd..14e1b448bd39 100644 --- a/drivers/power/supply/ab8500_chargalg.c +++ b/drivers/power/supply/ab8500_chargalg.c @@ -1837,7 +1837,7 @@ static const struct of_device_id ab8500_chargalg_match[] = { struct platform_driver ab8500_chargalg_driver = { .probe = ab8500_chargalg_probe, - .remove_new = ab8500_chargalg_remove, + .remove = ab8500_chargalg_remove, .driver = { .name = "ab8500_chargalg", .of_match_table = ab8500_chargalg_match, diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 93181ebfb324..cece8d6753ac 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -3711,7 +3711,7 @@ MODULE_DEVICE_TABLE(of, ab8500_charger_match); static struct platform_driver ab8500_charger_driver = { .probe = ab8500_charger_probe, - .remove_new = ab8500_charger_remove, + .remove = ab8500_charger_remove, .driver = { .name = "ab8500-charger", .of_match_table = ab8500_charger_match, diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index a71903b1bf78..78871a2143de 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -3242,7 +3242,7 @@ MODULE_DEVICE_TABLE(of, ab8500_fg_match); struct platform_driver ab8500_fg_driver = { .probe = ab8500_fg_probe, - .remove_new = ab8500_fg_remove, + .remove = ab8500_fg_remove, .driver = { .name = "ab8500-fg", .of_match_table = ab8500_fg_match, diff --git a/drivers/power/supply/acer_a500_battery.c b/drivers/power/supply/acer_a500_battery.c index ef5c419b1b7f..39d85b11a13c 100644 --- a/drivers/power/supply/acer_a500_battery.c +++ b/drivers/power/supply/acer_a500_battery.c @@ -233,14 +233,15 @@ static int a500_battery_probe(struct platform_device *pdev) psy_cfg.of_node = pdev->dev.parent->of_node; psy_cfg.drv_data = bat; + psy_cfg.no_wakeup_source = true; bat->regmap = dev_get_regmap(pdev->dev.parent, "KB930"); if (!bat->regmap) return -EINVAL; - bat->psy = devm_power_supply_register_no_ws(&pdev->dev, - &a500_battery_desc, - &psy_cfg); + bat->psy = devm_power_supply_register(&pdev->dev, + &a500_battery_desc, + &psy_cfg); if (IS_ERR(bat->psy)) return dev_err_probe(&pdev->dev, PTR_ERR(bat->psy), "failed to register battery\n"); @@ -285,7 +286,7 @@ static struct platform_driver a500_battery_driver = { .pm = &a500_battery_pm_ops, }, .probe = a500_battery_probe, - .remove_new = a500_battery_remove, + .remove = a500_battery_remove, }; module_platform_driver(a500_battery_driver); diff --git a/drivers/power/supply/act8945a_charger.c b/drivers/power/supply/act8945a_charger.c index 51122bfbf196..b2b82f97a471 100644 --- a/drivers/power/supply/act8945a_charger.c +++ b/drivers/power/supply/act8945a_charger.c @@ -651,7 +651,7 @@ static struct platform_driver act8945a_charger_driver = { .name = "act8945a-charger", }, .probe = act8945a_charger_probe, - .remove_new = act8945a_charger_remove, + .remove = act8945a_charger_remove, }; module_platform_driver(act8945a_charger_driver); diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c index dac9875d993c..458fd3024373 100644 --- a/drivers/power/supply/adp5061.c +++ b/drivers/power/supply/adp5061.c @@ -590,7 +590,7 @@ static int adp5061_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_AVG: /* * This property is used to set the VWEAK threshold - * bellow this value, weak charge mode is entered + * below this value, weak charge mode is entered * above this value, fast chargerge mode is entered */ return adp5061_get_vweak_th(st, val); diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index f71cc90fea12..fa27195f074e 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -354,17 +354,18 @@ static int axp20x_battery_get_prop(struct power_supply *psy, if (ret) return ret; + /* IIO framework gives mA but Power Supply framework gives uA */ if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) { - ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + ret = iio_read_channel_processed_scale(axp20x_batt->batt_chrg_i, + &val->intval, 1000); } else { - ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1); + ret = iio_read_channel_processed_scale(axp20x_batt->batt_dischrg_i, + &val1, 1000); val->intval = -val1; } if (ret) return ret; - /* IIO framework gives mA but Power Supply framework gives uA */ - val->intval *= 1000; break; case POWER_SUPPLY_PROP_CAPACITY: @@ -406,13 +407,12 @@ static int axp20x_battery_get_prop(struct power_supply *psy, break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = iio_read_channel_processed(axp20x_batt->batt_v, - &val->intval); + /* IIO framework gives mV but Power Supply framework gives uV */ + ret = iio_read_channel_processed_scale(axp20x_batt->batt_v, + &val->intval, 1000); if (ret) return ret; - /* IIO framework gives mV but Power Supply framework gives uV */ - val->intval *= 1000; break; default: @@ -519,13 +519,15 @@ static int axp717_battery_get_prop(struct power_supply *psy, * The offset of this value is currently unknown and is * not documented in the datasheet. Based on * observation it's assumed to be somewhere around - * 450ma. I will leave the value raw for now. + * 450ma. I will leave the value raw for now. Note that + * IIO framework gives mA but Power Supply framework + * gives uA. */ - ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + ret = iio_read_channel_processed_scale(axp20x_batt->batt_chrg_i, + &val->intval, 1000); if (ret) return ret; - /* IIO framework gives mA but Power Supply framework gives uA */ - val->intval *= 1000; + return 0; case POWER_SUPPLY_PROP_CAPACITY: @@ -564,13 +566,12 @@ static int axp717_battery_get_prop(struct power_supply *psy, return 0; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = iio_read_channel_processed(axp20x_batt->batt_v, - &val->intval); + /* IIO framework gives mV but Power Supply framework gives uV */ + ret = iio_read_channel_processed_scale(axp20x_batt->batt_v, + &val->intval, 1000); if (ret) return ret; - /* IIO framework gives mV but Power Supply framework gives uV */ - val->intval *= 1000; return 0; case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c index 2766352ab737..9722912268fe 100644 --- a/drivers/power/supply/axp20x_usb_power.c +++ b/drivers/power/supply/axp20x_usb_power.c @@ -220,16 +220,15 @@ static int axp20x_usb_power_get_property(struct power_supply *psy, return 0; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (IS_ENABLED(CONFIG_AXP20X_ADC)) { - ret = iio_read_channel_processed(power->vbus_v, - &val->intval); - if (ret) - return ret; - /* * IIO framework gives mV but Power Supply framework * gives uV. */ - val->intval *= 1000; + ret = iio_read_channel_processed_scale(power->vbus_v, + &val->intval, 1000); + if (ret) + return ret; + return 0; } @@ -253,16 +252,15 @@ static int axp20x_usb_power_get_property(struct power_supply *psy, return 0; case POWER_SUPPLY_PROP_CURRENT_NOW: if (IS_ENABLED(CONFIG_AXP20X_ADC)) { - ret = iio_read_channel_processed(power->vbus_i, - &val->intval); - if (ret) - return ret; - /* * IIO framework gives mA but Power Supply framework * gives uA. */ - val->intval *= 1000; + ret = iio_read_channel_processed_scale(power->vbus_i, + &val->intval, 1000); + if (ret) + return ret; + return 0; } @@ -374,16 +372,15 @@ static int axp717_usb_power_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (IS_ENABLED(CONFIG_AXP20X_ADC)) { - ret = iio_read_channel_processed(power->vbus_v, - &val->intval); - if (ret) - return ret; - /* * IIO framework gives mV but Power Supply framework * gives uV. */ - val->intval *= 1000; + ret = iio_read_channel_processed_scale(power->vbus_v, + &val->intval, 1000); + if (ret) + return ret; + return 0; } diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 750fda543308..40c5ac7a1118 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -449,9 +449,29 @@ static u8 [BQ27XXX_REG_AP] = 0x18, BQ27XXX_DM_REG_ROWS, }, + bq27426_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x02, + [BQ27XXX_REG_INT_TEMP] = 0x1e, + [BQ27XXX_REG_VOLT] = 0x04, + [BQ27XXX_REG_AI] = 0x10, + [BQ27XXX_REG_FLAGS] = 0x06, + [BQ27XXX_REG_TTE] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTF] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = 0x08, + [BQ27XXX_REG_RC] = 0x0c, + [BQ27XXX_REG_FCC] = 0x0e, + [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR, + [BQ27XXX_REG_AE] = INVALID_REG_ADDR, + [BQ27XXX_REG_SOC] = 0x1c, + [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR, + [BQ27XXX_REG_AP] = 0x18, + BQ27XXX_DM_REG_ROWS, + }, #define bq27411_regs bq27421_regs #define bq27425_regs bq27421_regs -#define bq27426_regs bq27421_regs #define bq27441_regs bq27421_regs #define bq27621_regs bq27421_regs bq27z561_regs[BQ27XXX_REG_MAX] = { @@ -769,10 +789,23 @@ static enum power_supply_property bq27421_props[] = { }; #define bq27411_props bq27421_props #define bq27425_props bq27421_props -#define bq27426_props bq27421_props #define bq27441_props bq27421_props #define bq27621_props bq27421_props +static enum power_supply_property bq27426_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + static enum power_supply_property bq27z561_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -2131,6 +2164,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) struct power_supply_config psy_cfg = { .of_node = di->dev->of_node, .drv_data = di, + .no_wakeup_source = true, }; int ret; @@ -2157,7 +2191,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) psy_desc->get_property = bq27xxx_battery_get_property; psy_desc->external_power_changed = bq27xxx_external_power_changed; - di->bat = devm_power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); + di->bat = devm_power_supply_register(di->dev, psy_desc, &psy_cfg); if (IS_ERR(di->bat)) return dev_err_probe(di->dev, PTR_ERR(di->bat), "failed to register battery\n"); diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index 09ec0ecf1486..a69faef444c0 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -221,7 +221,7 @@ static bool is_charging(struct charger_manager *cm) /* If at least one of the charger is charging, return yes */ for (i = 0; cm->desc->psy_charger_stat[i]; i++) { - /* 1. The charger sholuld not be DISABLED */ + /* 1. The charger should not be DISABLED */ if (cm->emergency_stop) continue; if (!cm->charger_enabled) @@ -1739,7 +1739,7 @@ static struct platform_driver charger_manager_driver = { .of_match_table = charger_manager_match, }, .probe = charger_manager_probe, - .remove_new = charger_manager_remove, + .remove = charger_manager_remove, .id_table = charger_manager_id, }; diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 30ec76cdf34b..813037c00ded 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -1169,7 +1169,7 @@ static struct platform_driver cpcap_battery_driver = { .of_match_table = of_match_ptr(cpcap_battery_id_table), }, .probe = cpcap_battery_probe, - .remove_new = cpcap_battery_remove, + .remove = cpcap_battery_remove, }; module_platform_driver(cpcap_battery_driver); diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c index 91e7292d86bb..7781b45a67a7 100644 --- a/drivers/power/supply/cpcap-charger.c +++ b/drivers/power/supply/cpcap-charger.c @@ -969,7 +969,7 @@ static struct platform_driver cpcap_charger_driver = { .of_match_table = cpcap_charger_id_table, }, .shutdown = cpcap_charger_shutdown, - .remove_new = cpcap_charger_remove, + .remove = cpcap_charger_remove, }; module_platform_driver(cpcap_charger_driver); diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index bed3e2e9bfea..47d3f58aa15c 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -618,6 +618,7 @@ static int cros_usbpd_charger_probe(struct platform_device *pd) psy_desc->external_power_changed = cros_usbpd_charger_power_changed; psy_cfg.drv_data = port; + psy_cfg.no_wakeup_source = true; if (cros_usbpd_charger_port_is_dedicated(port)) { sprintf(port->name, CHARGER_DEDICATED_DIR_NAME); @@ -644,8 +645,7 @@ static int cros_usbpd_charger_probe(struct platform_device *pd) psy_desc->name = port->name; - psy = devm_power_supply_register_no_ws(dev, psy_desc, - &psy_cfg); + psy = devm_power_supply_register(dev, psy_desc, &psy_cfg); if (IS_ERR(psy)) { dev_err(dev, "Failed to register power supply\n"); continue; diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c index 04e0f4162d42..34328f5d556e 100644 --- a/drivers/power/supply/da9030_battery.c +++ b/drivers/power/supply/da9030_battery.c @@ -269,7 +269,7 @@ static void da9030_charger_check_state(struct da9030_charger *charger) } if (charger->adc.vchmax_res > charger->thresholds.vcharge_max || charger->adc.vchmin_res < charger->thresholds.vcharge_min || - /* Tempreture readings are negative */ + /* Temperature readings are negative */ charger->adc.tbat_res < charger->thresholds.tbat_high || charger->adc.tbat_res > charger->thresholds.tbat_low) { /* disable charger */ @@ -470,7 +470,7 @@ static int da9030_battery_charger_init(struct da9030_charger *charger) if (ret) return ret; - /* enable auto ADC measuremnts */ + /* enable auto ADC measurements */ return da903x_write(charger->master, DA9030_ADC_AUTO_CONTROL, DA9030_ADC_TBAT_ENABLE | DA9030_ADC_VBAT_IN_TXON | DA9030_ADC_VCH_ENABLE | DA9030_ADC_ICH_ENABLE | @@ -571,7 +571,7 @@ static struct platform_driver da903x_battery_driver = { .name = "da903x-battery", }, .probe = da9030_battery_probe, - .remove_new = da9030_battery_remove, + .remove = da9030_battery_remove, }; module_platform_driver(da903x_battery_driver); diff --git a/drivers/power/supply/da9052-battery.c b/drivers/power/supply/da9052-battery.c index 0d84c42c624e..6f17e2fa1a28 100644 --- a/drivers/power/supply/da9052-battery.c +++ b/drivers/power/supply/da9052-battery.c @@ -648,7 +648,7 @@ static void da9052_bat_remove(struct platform_device *pdev) static struct platform_driver da9052_bat_driver = { .probe = da9052_bat_probe, - .remove_new = da9052_bat_remove, + .remove = da9052_bat_remove, .driver = { .name = "da9052-bat", }, diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c index b13cecd84f58..27f36ef5b88d 100644 --- a/drivers/power/supply/da9150-charger.c +++ b/drivers/power/supply/da9150-charger.c @@ -636,7 +636,7 @@ static struct platform_driver da9150_charger_driver = { .name = "da9150-charger", }, .probe = da9150_charger_probe, - .remove_new = da9150_charger_remove, + .remove = da9150_charger_remove, }; module_platform_driver(da9150_charger_driver); diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c index 7bdc6b263609..d5d215f5ad8b 100644 --- a/drivers/power/supply/generic-adc-battery.c +++ b/drivers/power/supply/generic-adc-battery.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Generic battery driver using IIO - * Copyright (C) 2012, Anish Kumar <anish198519851985@gmail.com> + * Copyright (C) 2012, Anish Kumar <yesanishhere@gmail.com> * Copyright (c) 2023, Sebastian Reichel <sre@kernel.org> */ #include <linux/interrupt.h> @@ -295,6 +295,6 @@ static struct platform_driver gab_driver = { }; module_platform_driver(gab_driver); -MODULE_AUTHOR("anish kumar <anish198519851985@gmail.com>"); +MODULE_AUTHOR("anish kumar <yesanishhere@gmail.com>"); MODULE_DESCRIPTION("generic battery driver using IIO"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c index 66cc649f702a..7e0568a5353f 100644 --- a/drivers/power/supply/ipaq_micro_battery.c +++ b/drivers/power/supply/ipaq_micro_battery.c @@ -302,7 +302,7 @@ static struct platform_driver micro_batt_device_driver = { .pm = µ_batt_dev_pm_ops, }, .probe = micro_batt_probe, - .remove_new = micro_batt_remove, + .remove = micro_batt_remove, }; module_platform_driver(micro_batt_device_driver); diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c index 860d8614c98f..237912a92272 100644 --- a/drivers/power/supply/isp1704_charger.c +++ b/drivers/power/supply/isp1704_charger.c @@ -501,7 +501,7 @@ static struct platform_driver isp1704_charger_driver = { .of_match_table = of_match_ptr(omap_isp1704_of_match), }, .probe = isp1704_charger_probe, - .remove_new = isp1704_charger_remove, + .remove = isp1704_charger_remove, }; module_platform_driver(isp1704_charger_driver); diff --git a/drivers/power/supply/lenovo_yoga_c630_battery.c b/drivers/power/supply/lenovo_yoga_c630_battery.c index f98f65e00831..7a6c8af9e8c2 100644 --- a/drivers/power/supply/lenovo_yoga_c630_battery.c +++ b/drivers/power/supply/lenovo_yoga_c630_battery.c @@ -368,11 +368,12 @@ static int yoga_c630_psy_register_bat_psy(struct yoga_c630_psy *ecbat) bat_cfg.drv_data = ecbat; bat_cfg.fwnode = ecbat->fwnode; - ecbat->bat_psy = power_supply_register_no_ws(ecbat->dev, - ecbat->unit_mA ? - &yoga_c630_psy_bat_psy_desc_mA : - &yoga_c630_psy_bat_psy_desc_mWh, - &bat_cfg); + bat_cfg.no_wakeup_source = true; + ecbat->bat_psy = power_supply_register(ecbat->dev, + ecbat->unit_mA ? + &yoga_c630_psy_bat_psy_desc_mA : + &yoga_c630_psy_bat_psy_desc_mWh, + &bat_cfg); if (IS_ERR(ecbat->bat_psy)) { dev_err(ecbat->dev, "failed to register battery supply\n"); return PTR_ERR(ecbat->bat_psy); @@ -442,7 +443,8 @@ static int yoga_c630_psy_probe(struct auxiliary_device *adev, adp_cfg.fwnode = ecbat->fwnode; adp_cfg.supplied_to = (char **)&yoga_c630_psy_bat_psy_desc_mA.name; adp_cfg.num_supplicants = 1; - ecbat->adp_psy = devm_power_supply_register_no_ws(dev, &yoga_c630_psy_adpt_psy_desc, &adp_cfg); + adp_cfg.no_wakeup_source = true; + ecbat->adp_psy = devm_power_supply_register(dev, &yoga_c630_psy_adpt_psy_desc, &adp_cfg); if (IS_ERR(ecbat->adp_psy)) { dev_err(dev, "failed to register AC adapter supply\n"); return PTR_ERR(ecbat->adp_psy); diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 72b170b4ac46..f0a680c155c4 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -716,7 +716,7 @@ static void lp8788_charger_remove(struct platform_device *pdev) static struct platform_driver lp8788_charger_driver = { .probe = lp8788_charger_probe, - .remove_new = lp8788_charger_remove, + .remove = lp8788_charger_remove, .driver = { .name = LP8788_DEV_CHARGER, }, diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c index b28c04157709..1cef2f860b5f 100644 --- a/drivers/power/supply/max14577_charger.c +++ b/drivers/power/supply/max14577_charger.c @@ -634,7 +634,7 @@ static struct platform_driver max14577_charger_driver = { .of_match_table = of_max14577_charger_dt_match, }, .probe = max14577_charger_probe, - .remove_new = max14577_charger_remove, + .remove = max14577_charger_remove, .id_table = max14577_charger_id, }; module_platform_driver(max14577_charger_driver); diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c index 818e13c613e3..5f58c0c24b4d 100644 --- a/drivers/power/supply/max77650-charger.c +++ b/drivers/power/supply/max77650-charger.c @@ -364,7 +364,7 @@ static struct platform_driver max77650_charger_driver = { .of_match_table = max77650_charger_of_match, }, .probe = max77650_charger_probe, - .remove_new = max77650_charger_remove, + .remove = max77650_charger_remove, }; module_platform_driver(max77650_charger_driver); diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index 4caac142c428..cdea35c0d1de 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -798,7 +798,7 @@ static struct platform_driver max77693_charger_driver = { .name = "max77693-charger", }, .probe = max77693_charger_probe, - .remove_new = max77693_charger_remove, + .remove = max77693_charger_remove, .id_table = max77693_charger_id, }; module_platform_driver(max77693_charger_driver); diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c index d7e520da7688..e6fe68cebc32 100644 --- a/drivers/power/supply/max77976_charger.c +++ b/drivers/power/supply/max77976_charger.c @@ -452,6 +452,7 @@ static int max77976_probe(struct i2c_client *client) i2c_set_clientdata(client, chg); psy_cfg.drv_data = chg; + psy_cfg.no_wakeup_source = true; chg->client = client; chg->regmap = devm_regmap_init_i2c(client, &max77976_regmap_config); @@ -475,7 +476,7 @@ static int max77976_probe(struct i2c_client *client) if (err) return err; - psy = devm_power_supply_register_no_ws(dev, &max77976_psy_desc, &psy_cfg); + psy = devm_power_supply_register(dev, &max77976_psy_desc, &psy_cfg); if (IS_ERR(psy)) return dev_err_probe(dev, PTR_ERR(psy), "cannot register\n"); diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c index 621a006d52a9..d753145de3bb 100644 --- a/drivers/power/supply/max8925_power.c +++ b/drivers/power/supply/max8925_power.c @@ -73,7 +73,7 @@ struct max8925_power_info { unsigned usb_online:1; unsigned bat_online:1; unsigned chg_mode:2; - unsigned batt_detect:1; /* detecing MB by ID pin */ + unsigned batt_detect:1; /* detecting MB by ID pin */ unsigned topoff_threshold:2; unsigned fast_charge:3; unsigned no_temp_support:1; @@ -563,7 +563,7 @@ static void max8925_power_remove(struct platform_device *pdev) static struct platform_driver max8925_power_driver = { .probe = max8925_power_probe, - .remove_new = max8925_power_remove, + .remove = max8925_power_remove, .driver = { .name = "max8925-power", }, diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c index 0e980522fee5..0136bc87b105 100644 --- a/drivers/power/supply/pcf50633-charger.c +++ b/drivers/power/supply/pcf50633-charger.c @@ -455,7 +455,7 @@ static struct platform_driver pcf50633_mbc_driver = { .name = "pcf50633-mbc", }, .probe = pcf50633_mbc_probe, - .remove_new = pcf50633_mbc_remove, + .remove = pcf50633_mbc_remove, }; module_platform_driver(pcf50633_mbc_driver); diff --git a/drivers/power/supply/pmu_battery.c b/drivers/power/supply/pmu_battery.c index eaab7500d99b..ed83c5e05ca3 100644 --- a/drivers/power/supply/pmu_battery.c +++ b/drivers/power/supply/pmu_battery.c @@ -170,6 +170,7 @@ static int __init pmu_bat_init(void) pbat->bat_desc.properties = pmu_bat_props; pbat->bat_desc.num_properties = ARRAY_SIZE(pmu_bat_props); pbat->bat_desc.get_property = pmu_bat_get_property; + pbat->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY; pbat->pbi = &pmu_batteries[i]; psy_cfg.drv_data = pbat; diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h index 3cbafc58bdad..7434a6f24775 100644 --- a/drivers/power/supply/power_supply.h +++ b/drivers/power/supply/power_supply.h @@ -13,9 +13,12 @@ struct device; struct device_type; struct power_supply; +extern int power_supply_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp); + #ifdef CONFIG_SYSFS -extern void power_supply_init_attrs(void); +extern void __init power_supply_init_attrs(void); extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env); extern const struct attribute_group *power_supply_attr_groups[]; @@ -41,3 +44,20 @@ static inline int power_supply_create_triggers(struct power_supply *psy) static inline void power_supply_remove_triggers(struct power_supply *psy) {} #endif /* CONFIG_LEDS_TRIGGERS */ + +#ifdef CONFIG_POWER_SUPPLY_HWMON + +int power_supply_add_hwmon_sysfs(struct power_supply *psy); +void power_supply_remove_hwmon_sysfs(struct power_supply *psy); + +#else + +static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) +{ + return 0; +} + +static inline +void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} + +#endif /* CONFIG_POWER_SUPPLY_HWMON */ diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 49534458a9f7..16085eff0084 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -152,7 +152,7 @@ static void power_supply_deferred_register_work(struct work_struct *work) deferred_register_work.work); if (psy->dev.parent) { - while (!mutex_trylock(&psy->dev.parent->mutex)) { + while (!device_trylock(psy->dev.parent)) { if (psy->removing) return; msleep(10); @@ -162,7 +162,7 @@ static void power_supply_deferred_register_work(struct work_struct *work) power_supply_changed(psy); if (psy->dev.parent) - mutex_unlock(&psy->dev.parent->mutex); + device_unlock(psy->dev.parent); } #ifdef CONFIG_OF @@ -484,8 +484,6 @@ EXPORT_SYMBOL_GPL(power_supply_get_by_name); */ void power_supply_put(struct power_supply *psy) { - might_sleep(); - atomic_dec(&psy->use_cnt); put_device(&psy->dev); } @@ -777,7 +775,7 @@ int power_supply_get_battery_info(struct power_supply *psy, tab_len = size / (2 * sizeof(__be32)); info->ocv_table_size[index] = tab_len; - table = info->ocv_table[index] = + info->ocv_table[index] = table = devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL); if (!info->ocv_table[index]) { power_supply_put_battery_info(psy, info); @@ -798,7 +796,7 @@ int power_supply_get_battery_info(struct power_supply *psy, goto out_ret_pointer; info->resist_table_size = len / (2 * sizeof(__be32)); - resist_table = info->resist_table = devm_kcalloc(&psy->dev, + info->resist_table = resist_table = devm_kcalloc(&psy->dev, info->resist_table_size, sizeof(*resist_table), GFP_KERNEL); @@ -982,7 +980,7 @@ EXPORT_SYMBOL_GPL(power_supply_battery_info_get_prop); * * Return: the battery internal resistance percent */ -int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, +int power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table, int table_len, int temp) { int i, high, low; @@ -1093,7 +1091,7 @@ EXPORT_SYMBOL_GPL(power_supply_get_maintenance_charging_setting); * * Return: the battery capacity. */ -int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, +int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table, int table_len, int ocv) { int i, high, low; @@ -1118,7 +1116,7 @@ int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, } EXPORT_SYMBOL_GPL(power_supply_ocv2cap_simple); -struct power_supply_battery_ocv_table * +const struct power_supply_battery_ocv_table * power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, int temp, int *table_len) { @@ -1149,7 +1147,7 @@ EXPORT_SYMBOL_GPL(power_supply_find_ocv2cap_table); int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, int ocv, int temp) { - struct power_supply_battery_ocv_table *table; + const struct power_supply_battery_ocv_table *table; int table_len; table = power_supply_find_ocv2cap_table(info, temp, &table_len); @@ -1233,7 +1231,6 @@ int power_supply_property_is_writeable(struct power_supply *psy, { return psy->desc->property_is_writeable && psy->desc->property_is_writeable(psy, psp); } -EXPORT_SYMBOL_GPL(power_supply_property_is_writeable); void power_supply_external_power_changed(struct power_supply *psy) { @@ -1342,8 +1339,7 @@ static void psy_unregister_thermal(struct power_supply *psy) static struct power_supply *__must_check __power_supply_register(struct device *parent, const struct power_supply_desc *desc, - const struct power_supply_config *cfg, - bool ws) + const struct power_supply_config *cfg) { struct device *dev; struct power_supply *psy; @@ -1410,7 +1406,7 @@ __power_supply_register(struct device *parent, if (rc) goto device_add_failed; - rc = device_init_wakeup(dev, ws); + rc = device_init_wakeup(dev, cfg ? !cfg->no_wakeup_source : true); if (rc) goto wakeup_init_failed; @@ -1476,33 +1472,10 @@ struct power_supply *__must_check power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) { - return __power_supply_register(parent, desc, cfg, true); + return __power_supply_register(parent, desc, cfg); } EXPORT_SYMBOL_GPL(power_supply_register); -/** - * power_supply_register_no_ws() - Register new non-waking-source power supply - * @parent: Device to be a parent of power supply's device, usually - * the device which probe function calls this - * @desc: Description of power supply, must be valid through whole - * lifetime of this power supply - * @cfg: Run-time specific configuration accessed during registering, - * may be NULL - * - * Return: A pointer to newly allocated power_supply on success - * or ERR_PTR otherwise. - * Use power_supply_unregister() on returned power_supply pointer to release - * resources. - */ -struct power_supply *__must_check -power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg) -{ - return __power_supply_register(parent, desc, cfg, false); -} -EXPORT_SYMBOL_GPL(power_supply_register_no_ws); - static void devm_power_supply_release(struct device *dev, void *res) { struct power_supply **psy = res; @@ -1535,7 +1508,7 @@ devm_power_supply_register(struct device *parent, if (!ptr) return ERR_PTR(-ENOMEM); - psy = __power_supply_register(parent, desc, cfg, true); + psy = __power_supply_register(parent, desc, cfg); if (IS_ERR(psy)) { devres_free(ptr); } else { @@ -1547,42 +1520,6 @@ devm_power_supply_register(struct device *parent, EXPORT_SYMBOL_GPL(devm_power_supply_register); /** - * devm_power_supply_register_no_ws() - Register managed non-waking-source power supply - * @parent: Device to be a parent of power supply's device, usually - * the device which probe function calls this - * @desc: Description of power supply, must be valid through whole - * lifetime of this power supply - * @cfg: Run-time specific configuration accessed during registering, - * may be NULL - * - * Return: A pointer to newly allocated power_supply on success - * or ERR_PTR otherwise. - * The returned power_supply pointer will be automatically unregistered - * on driver detach. - */ -struct power_supply *__must_check -devm_power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg) -{ - struct power_supply **ptr, *psy; - - ptr = devres_alloc(devm_power_supply_release, sizeof(*ptr), GFP_KERNEL); - - if (!ptr) - return ERR_PTR(-ENOMEM); - psy = __power_supply_register(parent, desc, cfg, false); - if (IS_ERR(psy)) { - devres_free(ptr); - } else { - *ptr = psy; - devres_add(parent, ptr); - } - return psy; -} -EXPORT_SYMBOL_GPL(devm_power_supply_register_no_ws); - -/** * power_supply_unregister() - Remove this power supply from system * @psy: Pointer to power supply to unregister * diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index 6fbbfb1c685e..01be04903d7d 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -7,6 +7,7 @@ #include <linux/hwmon.h> #include <linux/power_supply.h> #include <linux/slab.h> +#include "power_supply.h" struct power_supply_hwmon { struct power_supply *psy; diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 16b3c5880cd8..571de43fcca9 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -142,7 +142,7 @@ static const char * const POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[] = { [POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE] = "force-discharge", }; -static struct power_supply_attr power_supply_attrs[] = { +static struct power_supply_attr power_supply_attrs[] __ro_after_init = { /* Properties of type `int' */ POWER_SUPPLY_ENUM_ATTR(STATUS), POWER_SUPPLY_ENUM_ATTR(CHARGE_TYPE), @@ -225,9 +225,9 @@ static struct power_supply_attr power_supply_attrs[] = { #define POWER_SUPPLY_ATTR_CNT ARRAY_SIZE(power_supply_attrs) static struct attribute * -__power_supply_attrs[POWER_SUPPLY_ATTR_CNT + 1]; +__power_supply_attrs[POWER_SUPPLY_ATTR_CNT + 1] __ro_after_init; -static struct power_supply_attr *to_ps_attr(struct device_attribute *attr) +static const struct power_supply_attr *to_ps_attr(struct device_attribute *attr) { return container_of(attr, struct power_supply_attr, dev_attr); } @@ -273,7 +273,7 @@ static ssize_t power_supply_show_property(struct device *dev, char *buf) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); - struct power_supply_attr *ps_attr = to_ps_attr(attr); + const struct power_supply_attr *ps_attr = to_ps_attr(attr); enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; @@ -326,7 +326,7 @@ static ssize_t power_supply_store_property(struct device *dev, const char *buf, size_t count) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); - struct power_supply_attr *ps_attr = to_ps_attr(attr); + const struct power_supply_attr *ps_attr = to_ps_attr(attr); enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; @@ -401,7 +401,7 @@ const struct attribute_group *power_supply_attr_groups[] = { NULL }; -void power_supply_init_attrs(void) +void __init power_supply_init_attrs(void) { int i; diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index f0a64c00ddaa..47d29271ddf4 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -151,7 +151,7 @@ struct qcom_battmgr_message { __le32 capacity_low; __le32 capacity_warning; __le32 cycle_count; - /* thousandth of persent */ + /* thousandth of percent */ __le32 accuracy; __le32 max_sample_time_ms; __le32 min_sample_time_ms; diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c index 81acbd8b2169..3b4132376649 100644 --- a/drivers/power/supply/qcom_pmi8998_charger.c +++ b/drivers/power/supply/qcom_pmi8998_charger.c @@ -832,7 +832,7 @@ static const struct smb2_register smb2_init_seq[] = { AUTO_RECHG_BIT | EN_ANALOG_DROP_IN_VBATT_BIT | CHARGER_INHIBIT_BIT, .val = CHARGER_INHIBIT_BIT }, - /* STAT pin software override, match downstream. Parallell charging? */ + /* STAT pin software override, match downstream. Parallel charging? */ { .addr = STAT_CFG, .mask = STAT_SW_OVERRIDE_CFG_BIT, .val = STAT_SW_OVERRIDE_CFG_BIT }, diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c index 4e57762e27ba..a79563f6ff7a 100644 --- a/drivers/power/supply/qcom_smbb.c +++ b/drivers/power/supply/qcom_smbb.c @@ -1017,10 +1017,10 @@ static const struct of_device_id smbb_charger_id_table[] = { MODULE_DEVICE_TABLE(of, smbb_charger_id_table); static struct platform_driver smbb_charger_driver = { - .probe = smbb_charger_probe, - .remove_new = smbb_charger_remove, - .driver = { - .name = "qcom-smbb", + .probe = smbb_charger_probe, + .remove = smbb_charger_remove, + .driver = { + .name = "qcom-smbb", .of_match_table = smbb_charger_id_table, }, }; diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c index 57b6ddefad28..e5f35d083c23 100644 --- a/drivers/power/supply/rk817_charger.c +++ b/drivers/power/supply/rk817_charger.c @@ -240,9 +240,32 @@ static int rk817_record_battery_nvram_values(struct rk817_charger *charger) static int rk817_bat_calib_cap(struct rk817_charger *charger) { struct rk808 *rk808 = charger->rk808; - int tmp, charge_now, charge_now_adc, volt_avg; + int charge_now, charge_now_adc; u8 bulk_reg[4]; + /* Don't do anything if there's no battery. */ + if (!charger->battery_present) + return 0; + + /* + * When resuming from suspend, sometimes the voltage value would be + * incorrect. BSP would simply wait two seconds and try reading the + * values again. Do not do any sort of calibration activity when the + * reported value is incorrect. The next scheduled update of battery + * vaules should then return valid data and the driver can continue. + * Use 2.7v as the sanity value because per the datasheet the PMIC + * can in no way support a battery voltage lower than this. BSP only + * checked for values too low, but I'm adding in a check for values + * too high just in case; again the PMIC can in no way support + * voltages above 4.45v, so this seems like a good value. + */ + if ((charger->volt_avg_uv < 2700000) || (charger->volt_avg_uv > 4450000)) { + dev_dbg(charger->dev, + "Battery voltage of %d is invalid, ignoring.\n", + charger->volt_avg_uv); + return -EINVAL; + } + /* Calibrate the soc and fcc on a fully charged battery */ if (charger->charge_status == CHARGE_FINISH && (!charger->soc_cal)) { @@ -304,51 +327,6 @@ static int rk817_bat_calib_cap(struct rk817_charger *charger) } } - /* - * Calibrate the fully charged capacity when we previously had a full - * battery (soc_cal = 1) and are now empty (at or below minimum design - * voltage). If our columb counter is still positive, subtract that - * from our fcc value to get a calibrated fcc, and if our columb - * counter is negative add that to our fcc (but not to exceed our - * design capacity). - */ - regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_VOL_H, - bulk_reg, 2); - tmp = get_unaligned_be16(bulk_reg); - volt_avg = (charger->voltage_k * tmp) + 1000 * charger->voltage_b; - if (volt_avg <= charger->bat_voltage_min_design_uv && - charger->soc_cal) { - regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3, - bulk_reg, 4); - charge_now_adc = get_unaligned_be32(bulk_reg); - charge_now = ADC_TO_CHARGE_UAH(charge_now_adc, - charger->res_div); - /* - * Note, if charge_now is negative this will add it (what we - * want) and if it's positive this will subtract (also what - * we want). - */ - charger->fcc_mah = charger->fcc_mah - (charge_now / 1000); - - dev_dbg(charger->dev, - "Recalibrating full charge capacity to %d uah\n", - charger->fcc_mah * 1000); - } - - /* - * Set the SOC to 0 if we are below the minimum system voltage. - */ - if (volt_avg <= charger->bat_voltage_min_design_uv) { - charger->soc = 0; - charge_now_adc = CHARGE_TO_ADC(0, charger->res_div); - put_unaligned_be32(charge_now_adc, bulk_reg); - regmap_bulk_write(rk808->regmap, - RK817_GAS_GAUGE_Q_INIT_H3, bulk_reg, 4); - dev_warn(charger->dev, - "Battery voltage %d below minimum voltage %d\n", - volt_avg, charger->bat_voltage_min_design_uv); - } - rk817_record_battery_nvram_values(charger); return 0; @@ -648,6 +626,24 @@ static irqreturn_t rk817_plug_out_isr(int irq, void *cg) return IRQ_HANDLED; } +static int rk817_bat_set_prop(struct power_supply *ps, + enum power_supply_property prop, + const union power_supply_propval *val) +{ + struct rk817_charger *charger = power_supply_get_drvdata(ps); + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + if ((val->intval < 500000) || + (val->intval > charger->bat_charge_full_design_uah)) + return -EINVAL; + charger->fcc_mah = val->intval / 1000; + return rk817_bat_calib_cap(charger); + default: + return -EINVAL; + } +} + static enum power_supply_property rk817_bat_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_STATUS, @@ -673,12 +669,25 @@ static enum power_supply_property rk817_chg_props[] = { POWER_SUPPLY_PROP_VOLTAGE_AVG, }; +static int rk817_bat_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + return 1; + default: + return 0; + } +} + static const struct power_supply_desc rk817_bat_desc = { .name = "rk817-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = rk817_bat_props, + .property_is_writeable = rk817_bat_prop_writeable, .num_properties = ARRAY_SIZE(rk817_bat_props), .get_property = rk817_bat_get_prop, + .set_property = rk817_bat_set_prop, }; static const struct power_supply_desc rk817_chg_desc = { @@ -1202,6 +1211,15 @@ static int rk817_charger_probe(struct platform_device *pdev) return 0; } +static int __maybe_unused rk817_suspend(struct device *dev) +{ + struct rk817_charger *charger = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&charger->work); + + return 0; +} + static int __maybe_unused rk817_resume(struct device *dev) { @@ -1213,7 +1231,7 @@ static int __maybe_unused rk817_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(rk817_charger_pm, NULL, rk817_resume); +static SIMPLE_DEV_PM_OPS(rk817_charger_pm, rk817_suspend, rk817_resume); static struct platform_driver rk817_charger_driver = { .probe = rk817_charger_probe, diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c index c04af1ee89c6..67b86ac91a21 100644 --- a/drivers/power/supply/rt9471.c +++ b/drivers/power/supply/rt9471.c @@ -139,6 +139,19 @@ enum { RT9471_PORTSTAT_DCP, }; +enum { + RT9471_ICSTAT_SLEEP = 0, + RT9471_ICSTAT_VBUSRDY, + RT9471_ICSTAT_TRICKLECHG, + RT9471_ICSTAT_PRECHG, + RT9471_ICSTAT_FASTCHG, + RT9471_ICSTAT_IEOC, + RT9471_ICSTAT_BGCHG, + RT9471_ICSTAT_CHGDONE, + RT9471_ICSTAT_CHGFAULT, + RT9471_ICSTAT_OTG = 15, +}; + struct rt9471_chip { struct device *dev; struct regmap *regmap; @@ -153,8 +166,8 @@ struct rt9471_chip { }; static const struct reg_field rt9471_reg_fields[F_MAX_FIELDS] = { - [F_WDT] = REG_FIELD(RT9471_REG_TOP, 0, 0), - [F_WDT_RST] = REG_FIELD(RT9471_REG_TOP, 1, 1), + [F_WDT] = REG_FIELD(RT9471_REG_TOP, 0, 1), + [F_WDT_RST] = REG_FIELD(RT9471_REG_TOP, 2, 2), [F_CHG_EN] = REG_FIELD(RT9471_REG_FUNC, 0, 0), [F_HZ] = REG_FIELD(RT9471_REG_FUNC, 5, 5), [F_BATFET_DIS] = REG_FIELD(RT9471_REG_FUNC, 7, 7), @@ -255,31 +268,32 @@ static int rt9471_get_ieoc(struct rt9471_chip *chip, int *microamp) static int rt9471_get_status(struct rt9471_chip *chip, int *status) { - unsigned int chg_ready, chg_done, fault_stat; + unsigned int ic_stat; int ret; - ret = regmap_field_read(chip->rm_fields[F_ST_CHG_RDY], &chg_ready); - if (ret) - return ret; - - ret = regmap_field_read(chip->rm_fields[F_ST_CHG_DONE], &chg_done); + ret = regmap_field_read(chip->rm_fields[F_IC_STAT], &ic_stat); if (ret) return ret; - ret = regmap_read(chip->regmap, RT9471_REG_STAT1, &fault_stat); - if (ret) - return ret; - - fault_stat &= RT9471_CHGFAULT_MASK; - - if (chg_ready && chg_done) - *status = POWER_SUPPLY_STATUS_FULL; - else if (chg_ready && fault_stat) + switch (ic_stat) { + case RT9471_ICSTAT_VBUSRDY: + case RT9471_ICSTAT_CHGFAULT: *status = POWER_SUPPLY_STATUS_NOT_CHARGING; - else if (chg_ready && !fault_stat) + break; + case RT9471_ICSTAT_TRICKLECHG ... RT9471_ICSTAT_BGCHG: *status = POWER_SUPPLY_STATUS_CHARGING; - else + break; + case RT9471_ICSTAT_CHGDONE: + *status = POWER_SUPPLY_STATUS_FULL; + break; + case RT9471_ICSTAT_SLEEP: + case RT9471_ICSTAT_OTG: *status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + default: + *status = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } return 0; } diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c index b63fd2758c2f..33565002ee27 100644 --- a/drivers/power/supply/samsung-sdi-battery.c +++ b/drivers/power/supply/samsung-sdi-battery.c @@ -431,7 +431,7 @@ static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb58515 * temperature compensation tables so we just state 100% for every temperature. * If you have the datasheets, please provide these tables. */ -static struct power_supply_resistance_temp_table samsung_temp2res[] = { +static const struct power_supply_resistance_temp_table samsung_temp2res[] = { { .temp = 50, .resistance = 100 }, { .temp = 40, .resistance = 100 }, { .temp = 30, .resistance = 100 }, @@ -447,7 +447,7 @@ static struct power_supply_resistance_temp_table samsung_temp2res[] = { * These must be sorted by falling OCV value. */ -static struct power_supply_battery_ocv_table samsung_ocv_cap_eb485159lu[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_eb485159lu[] = { { .ocv = 4330000, .capacity = 100}, { .ocv = 4320000, .capacity = 99}, { .ocv = 4283000, .capacity = 95}, @@ -499,7 +499,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_eb485159lu[] = { }; /* Same capacity table is used by eb-l1m7flu, eb425161la, eb425161lu */ -static struct power_supply_battery_ocv_table samsung_ocv_cap_1500mah[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_1500mah[] = { { .ocv = 4328000, .capacity = 100}, { .ocv = 4299000, .capacity = 99}, { .ocv = 4281000, .capacity = 98}, @@ -540,7 +540,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_1500mah[] = { { .ocv = 3300000, .capacity = 0}, }; -static struct power_supply_battery_ocv_table samsung_ocv_cap_eb535151vu[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_eb535151vu[] = { { .ocv = 4178000, .capacity = 100}, { .ocv = 4148000, .capacity = 99}, { .ocv = 4105000, .capacity = 95}, @@ -572,7 +572,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_eb535151vu[] = { { .ocv = 3300000, .capacity = 0}, }; -static struct power_supply_battery_ocv_table samsung_ocv_cap_eb585157lu[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_eb585157lu[] = { { .ocv = 4320000, .capacity = 100}, { .ocv = 4296000, .capacity = 99}, { .ocv = 4283000, .capacity = 98}, diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c index b3d8b1ca97da..50d5157af927 100644 --- a/drivers/power/supply/sc2731_charger.c +++ b/drivers/power/supply/sc2731_charger.c @@ -530,7 +530,7 @@ static struct platform_driver sc2731_charger_driver = { .of_match_table = sc2731_charger_of_match, }, .probe = sc2731_charger_probe, - .remove_new = sc2731_charger_remove, + .remove = sc2731_charger_remove, }; module_platform_driver(sc2731_charger_driver); diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index bd23c4d9fed4..f36edc2ba708 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -992,7 +992,7 @@ static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data) static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data) { struct power_supply_battery_info *info; - struct power_supply_battery_ocv_table *table; + const struct power_supply_battery_ocv_table *table; int ret, delta_clbcnt, alarm_adc; ret = power_supply_get_battery_info(data->battery, &info); @@ -1183,10 +1183,14 @@ static int sc27xx_fgu_probe(struct platform_device *pdev) return PTR_ERR(data->charge_chan); } - data->gpiod = devm_gpiod_get(dev, "bat-detect", GPIOD_IN); + data->gpiod = devm_gpiod_get(dev, "battery-detect", GPIOD_IN); if (IS_ERR(data->gpiod)) { - dev_err(dev, "failed to get battery detection GPIO\n"); - return PTR_ERR(data->gpiod); + data->gpiod = devm_gpiod_get(dev, "bat-detect", GPIOD_IN); + if (IS_ERR(data->gpiod)) { + dev_err(dev, "failed to get battery detection GPIO\n"); + return PTR_ERR(data->gpiod); + } + dev_warn(dev, "bat-detect is deprecated, please use battery-detect\n"); } ret = gpiod_get_value_cansleep(data->gpiod); diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c index d41595764caa..d65193e410a6 100644 --- a/drivers/power/supply/tps65090-charger.c +++ b/drivers/power/supply/tps65090-charger.c @@ -343,7 +343,7 @@ static struct platform_driver tps65090_charger_driver = { .of_match_table = of_tps65090_charger_match, }, .probe = tps65090_charger_probe, - .remove_new = tps65090_charger_remove, + .remove = tps65090_charger_remove, }; module_platform_driver(tps65090_charger_driver); diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c index 2382749a2f53..6fff44e1ecac 100644 --- a/drivers/power/supply/tps65217_charger.c +++ b/drivers/power/supply/tps65217_charger.c @@ -269,7 +269,7 @@ MODULE_DEVICE_TABLE(of, tps65217_charger_match_table); static struct platform_driver tps65217_charger_driver = { .probe = tps65217_charger_probe, - .remove_new = tps65217_charger_remove, + .remove = tps65217_charger_remove, .driver = { .name = "tps65217-charger", .of_match_table = of_match_ptr(tps65217_charger_match_table), diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index f3f1a0862e93..9dcb5457bef4 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -1133,7 +1133,7 @@ MODULE_DEVICE_TABLE(of, twl_bci_of_match); static struct platform_driver twl4030_bci_driver = { .probe = twl4030_bci_probe, - .remove_new = twl4030_bci_remove, + .remove = twl4030_bci_remove, .driver = { .name = "twl4030_bci", .of_match_table = of_match_ptr(twl_bci_of_match), diff --git a/drivers/power/supply/twl6030_charger.c b/drivers/power/supply/twl6030_charger.c new file mode 100644 index 000000000000..b4ec26ff257c --- /dev/null +++ b/drivers/power/supply/twl6030_charger.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TWL6030 charger + * + * Copyright (C) 2024 Andreas Kemnade <andreas@kemnade.info> + * + * based on older 6030 driver found in a v3.0 vendor kernel + * + * based on twl4030_bci_battery.c by TI + * Copyright (C) 2008 Texas Instruments, Inc. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/bits.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/mfd/twl.h> +#include <linux/power_supply.h> +#include <linux/notifier.h> +#include <linux/usb/otg.h> +#include <linux/iio/consumer.h> +#include <linux/devm-helpers.h> + +#define CONTROLLER_INT_MASK 0x00 +#define CONTROLLER_CTRL1 0x01 +#define CONTROLLER_WDG 0x02 +#define CONTROLLER_STAT1 0x03 +#define CHARGERUSB_INT_STATUS 0x04 +#define CHARGERUSB_INT_MASK 0x05 +#define CHARGERUSB_STATUS_INT1 0x06 +#define CHARGERUSB_STATUS_INT2 0x07 +#define CHARGERUSB_CTRL1 0x08 +#define CHARGERUSB_CTRL2 0x09 +#define CHARGERUSB_CTRL3 0x0A +#define CHARGERUSB_STAT1 0x0B +#define CHARGERUSB_VOREG 0x0C +#define CHARGERUSB_VICHRG 0x0D +#define CHARGERUSB_CINLIMIT 0x0E +#define CHARGERUSB_CTRLLIMIT1 0x0F +#define CHARGERUSB_CTRLLIMIT2 0x10 +#define ANTICOLLAPSE_CTRL1 0x11 +#define ANTICOLLAPSE_CTRL2 0x12 + +/* TWL6032 registers 0xDA to 0xDE - TWL6032_MODULE_CHARGER */ +#define CONTROLLER_CTRL2 0x00 +#define CONTROLLER_VSEL_COMP 0x01 +#define CHARGERUSB_VSYSREG 0x02 +#define CHARGERUSB_VICHRG_PC 0x03 +#define LINEAR_CHRG_STS 0x04 + +#define LINEAR_CHRG_STS_CRYSTL_OSC_OK 0x40 +#define LINEAR_CHRG_STS_END_OF_CHARGE 0x20 +#define LINEAR_CHRG_STS_VBATOV 0x10 +#define LINEAR_CHRG_STS_VSYSOV 0x08 +#define LINEAR_CHRG_STS_DPPM_STS 0x04 +#define LINEAR_CHRG_STS_CV_STS 0x02 +#define LINEAR_CHRG_STS_CC_STS 0x01 + +#define FG_REG_00 0x00 +#define FG_REG_01 0x01 +#define FG_REG_02 0x02 +#define FG_REG_03 0x03 +#define FG_REG_04 0x04 +#define FG_REG_05 0x05 +#define FG_REG_06 0x06 +#define FG_REG_07 0x07 +#define FG_REG_08 0x08 +#define FG_REG_09 0x09 +#define FG_REG_10 0x0A +#define FG_REG_11 0x0B + +/* CONTROLLER_INT_MASK */ +#define MVAC_FAULT BIT(7) +#define MAC_EOC BIT(6) +#define LINCH_GATED BIT(5) +#define MBAT_REMOVED BIT(4) +#define MFAULT_WDG BIT(3) +#define MBAT_TEMP BIT(2) +#define MVBUS_DET BIT(1) +#define MVAC_DET BIT(0) + +/* CONTROLLER_CTRL1 */ +#define CONTROLLER_CTRL1_EN_LINCH BIT(5) +#define CONTROLLER_CTRL1_EN_CHARGER BIT(4) +#define CONTROLLER_CTRL1_SEL_CHARGER BIT(3) + +/* CONTROLLER_STAT1 */ +#define CONTROLLER_STAT1_EXTCHRG_STATZ BIT(7) +#define CONTROLLER_STAT1_LINCH_GATED BIT(6) +#define CONTROLLER_STAT1_CHRG_DET_N BIT(5) +#define CONTROLLER_STAT1_FAULT_WDG BIT(4) +#define CONTROLLER_STAT1_VAC_DET BIT(3) +#define VAC_DET BIT(3) +#define CONTROLLER_STAT1_VBUS_DET BIT(2) +#define VBUS_DET BIT(2) +#define CONTROLLER_STAT1_BAT_REMOVED BIT(1) +#define CONTROLLER_STAT1_BAT_TEMP_OVRANGE BIT(0) + +/* CHARGERUSB_INT_STATUS */ +#define EN_LINCH BIT(4) +#define CURRENT_TERM_INT BIT(3) +#define CHARGERUSB_STAT BIT(2) +#define CHARGERUSB_THMREG BIT(1) +#define CHARGERUSB_FAULT BIT(0) + +/* CHARGERUSB_INT_MASK */ +#define MASK_MCURRENT_TERM BIT(3) +#define MASK_MCHARGERUSB_STAT BIT(2) +#define MASK_MCHARGERUSB_THMREG BIT(1) +#define MASK_MCHARGERUSB_FAULT BIT(0) + +/* CHARGERUSB_STATUS_INT1 */ +#define CHARGERUSB_STATUS_INT1_TMREG BIT(7) +#define CHARGERUSB_STATUS_INT1_NO_BAT BIT(6) +#define CHARGERUSB_STATUS_INT1_BST_OCP BIT(5) +#define CHARGERUSB_STATUS_INT1_TH_SHUTD BIT(4) +#define CHARGERUSB_STATUS_INT1_BAT_OVP BIT(3) +#define CHARGERUSB_STATUS_INT1_POOR_SRC BIT(2) +#define CHARGERUSB_STATUS_INT1_SLP_MODE BIT(1) +#define CHARGERUSB_STATUS_INT1_VBUS_OVP BIT(0) + +/* CHARGERUSB_STATUS_INT2 */ +#define ICCLOOP BIT(3) +#define CURRENT_TERM BIT(2) +#define CHARGE_DONE BIT(1) +#define ANTICOLLAPSE BIT(0) + +/* CHARGERUSB_CTRL1 */ +#define SUSPEND_BOOT BIT(7) +#define OPA_MODE BIT(6) +#define HZ_MODE BIT(5) +#define TERM BIT(4) + +/* CHARGERUSB_CTRL2 */ +#define UA_TO_VITERM(x) (((x) / 50000 - 1) << 5) + +/* CHARGERUSB_CTRL3 */ +#define VBUSCHRG_LDO_OVRD BIT(7) +#define CHARGE_ONCE BIT(6) +#define BST_HW_PR_DIS BIT(5) +#define AUTOSUPPLY BIT(3) +#define BUCK_HSILIM BIT(0) + +/* CHARGERUSB_VOREG */ +#define UV_TO_VOREG(x) (((x) - 3500000) / 20000) +#define VOREG_TO_UV(x) (((x) & 0x3F) * 20000 + 3500000) +#define CHARGERUSB_VOREG_3P52 0x01 +#define CHARGERUSB_VOREG_4P0 0x19 +#define CHARGERUSB_VOREG_4P2 0x23 +#define CHARGERUSB_VOREG_4P76 0x3F + +/* CHARGERUSB_VICHRG */ +/* + * might be inaccurate for < 500 mA, diffent scale might apply, + * either starting from 100 mA or 300 mA + */ +#define UA_TO_VICHRG(x) (((x) / 100000) - 1) +#define VICHRG_TO_UA(x) (((x) & 0xf) * 100000 + 100000) + +/* CHARGERUSB_CINLIMIT */ +#define CHARGERUSB_CIN_LIMIT_100 0x1 +#define CHARGERUSB_CIN_LIMIT_300 0x5 +#define CHARGERUSB_CIN_LIMIT_500 0x9 +#define CHARGERUSB_CIN_LIMIT_NONE 0xF + +/* CHARGERUSB_CTRLLIMIT2 */ +#define CHARGERUSB_CTRLLIMIT2_1500 0x0E +#define LOCK_LIMIT BIT(4) + +/* ANTICOLLAPSE_CTRL2 */ +#define BUCK_VTH_SHIFT 5 + +/* FG_REG_00 */ +#define CC_ACTIVE_MODE_SHIFT 6 +#define CC_AUTOCLEAR BIT(2) +#define CC_CAL_EN BIT(1) +#define CC_PAUSE BIT(0) + +#define REG_TOGGLE1 0x90 +#define REG_PWDNSTATUS1 0x93 +#define FGDITHS BIT(7) +#define FGDITHR BIT(6) +#define FGS BIT(5) +#define FGR BIT(4) +#define BBSPOR_CFG 0xE6 +#define BB_CHG_EN BIT(3) + +struct twl6030_charger_info { + struct device *dev; + struct power_supply *usb; + struct power_supply_battery_info *binfo; + struct work_struct work; + int irq_chg; + int input_current_limit; + struct iio_channel *channel_vusb; + struct delayed_work charger_monitor; + bool extended_current_range; +}; + +struct twl6030_charger_chip_data { + bool extended_current_range; +}; + +static int twl6030_charger_read(u8 reg, u8 *val) +{ + return twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, val, reg); +} + +static int twl6030_charger_write(u8 reg, u8 val) +{ + return twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, val, reg); +} + +static int twl6030_config_cinlimit_reg(struct twl6030_charger_info *charger, + unsigned int ua) +{ + if (ua >= 50000 && ua <= 750000) { + ua = (ua - 50000) / 50000; + } else if ((ua > 750000) && (ua <= 1500000) && charger->extended_current_range) { + ua = ((ua % 100000) ? 0x30 : 0x20) + ((ua - 100000) / 100000); + } else { + if (ua < 50000) { + dev_err(charger->dev, "invalid input current limit\n"); + return -EINVAL; + } + /* This is no current limit */ + ua = 0x0F; + } + + return twl6030_charger_write(CHARGERUSB_CINLIMIT, ua); +} + +/* + * rewriting all stuff here, resets to extremely conservative defaults were + * seen under some circumstances, like charge voltage to 3.5V + */ +static int twl6030_enable_usb(struct twl6030_charger_info *charger) +{ + int ret; + + ret = twl6030_charger_write(CHARGERUSB_VICHRG, + UA_TO_VICHRG(charger->binfo->constant_charge_current_max_ua)); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CONTROLLER_WDG, 0xff); + if (ret < 0) + return ret; + + charger->input_current_limit = 500000; + ret = twl6030_config_cinlimit_reg(charger, charger->input_current_limit); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CHARGERUSB_CINLIMIT, CHARGERUSB_CIN_LIMIT_500); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CHARGERUSB_VOREG, + UV_TO_VOREG(charger->binfo->constant_charge_voltage_max_uv)); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CHARGERUSB_CTRL1, TERM); + if (ret < 0) + return ret; + + if (charger->binfo->charge_term_current_ua != -EINVAL) { + ret = twl6030_charger_write(CHARGERUSB_CTRL2, + UA_TO_VITERM(charger->binfo->charge_term_current_ua)); + if (ret < 0) + return ret; + } + + return twl6030_charger_write(CONTROLLER_CTRL1, CONTROLLER_CTRL1_EN_CHARGER); +} + +static void twl6030_charger_wdg(struct work_struct *data) +{ + struct twl6030_charger_info *charger = + container_of(data, struct twl6030_charger_info, + charger_monitor.work); + + u8 val; + u8 int_stat; + u8 stat_int1; + u8 stat_int2; + + twl6030_charger_read(CONTROLLER_STAT1, &val); + twl6030_charger_read(CHARGERUSB_INT_STATUS, &int_stat); + twl6030_charger_read(CHARGERUSB_STATUS_INT1, &stat_int1); + twl6030_charger_read(CHARGERUSB_STATUS_INT2, &stat_int2); + dev_dbg(charger->dev, + "wdg: stat1: %02x %s INT_STATUS %02x STATUS_INT1 %02x STATUS_INT2 %02x\n", + val, (val & VBUS_DET) ? "usb online" : "usb offline", + int_stat, stat_int1, stat_int2); + + twl6030_charger_write(CONTROLLER_WDG, 0xff); + schedule_delayed_work(&charger->charger_monitor, + msecs_to_jiffies(10000)); +} + +static irqreturn_t twl6030_charger_interrupt(int irq, void *arg) +{ + struct twl6030_charger_info *charger = arg; + u8 val; + u8 int_stat; + u8 stat_int1; + u8 stat_int2; + + if (twl6030_charger_read(CONTROLLER_STAT1, &val) < 0) + return IRQ_HANDLED; + + if (twl6030_charger_read(CHARGERUSB_INT_STATUS, &int_stat) < 0) + return IRQ_HANDLED; + + if (twl6030_charger_read(CHARGERUSB_STATUS_INT1, &stat_int1) < 0) + return IRQ_HANDLED; + + if (twl6030_charger_read(CHARGERUSB_STATUS_INT2, &stat_int2) < 0) + return IRQ_HANDLED; + + dev_dbg(charger->dev, + "charger irq: stat1: %02x %s INT_STATUS %02x STATUS_INT1 %02x STATUS_INT2 %02x\n", + val, (val & VBUS_DET) ? "usb online" : "usb offline", + int_stat, stat_int1, stat_int2); + power_supply_changed(charger->usb); + + if (val & VBUS_DET) { + if (twl6030_charger_read(CONTROLLER_CTRL1, &val) < 0) + return IRQ_HANDLED; + + if (!(val & CONTROLLER_CTRL1_EN_CHARGER)) { + if (twl6030_enable_usb(charger) < 0) + return IRQ_HANDLED; + + schedule_delayed_work(&charger->charger_monitor, + msecs_to_jiffies(10000)); + } + } else { + cancel_delayed_work(&charger->charger_monitor); + } + return IRQ_HANDLED; +} + +static int twl6030_charger_usb_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct twl6030_charger_info *charger = power_supply_get_drvdata(psy); + int ret; + u8 stat1; + u8 intstat; + + ret = twl6030_charger_read(CONTROLLER_STAT1, &stat1); + if (ret) + return ret; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + if (!(stat1 & VBUS_DET)) { + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + } + ret = twl6030_charger_read(CHARGERUSB_STATUS_INT2, &intstat); + if (ret) + return ret; + + if (intstat & CHARGE_DONE) + val->intval = POWER_SUPPLY_STATUS_FULL; + else if (intstat & CURRENT_TERM) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_CHARGING; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + if (!charger->channel_vusb) + return -ENODATA; + + ret = iio_read_channel_processed_scale(charger->channel_vusb, &val->intval, 1000); + if (ret < 0) + return ret; + + break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = !!(stat1 & VBUS_DET); + break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + val->intval = charger->input_current_limit; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int twl6030_charger_usb_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct twl6030_charger_info *charger = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + charger->input_current_limit = val->intval; + return twl6030_config_cinlimit_reg(charger, charger->input_current_limit); + default: + return -EINVAL; + } + + return 0; +} + +static int twl6030_charger_usb_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + dev_info(&psy->dev, "is %d writeable?\n", (int)psp); + switch (psp) { + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + return true; + default: + return false; + } +} + +static enum power_supply_property twl6030_charger_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, +}; + +static const struct power_supply_desc twl6030_charger_usb_desc = { + .name = "twl6030_usb", + .type = POWER_SUPPLY_TYPE_USB, + .properties = twl6030_charger_props, + .num_properties = ARRAY_SIZE(twl6030_charger_props), + .get_property = twl6030_charger_usb_get_property, + .set_property = twl6030_charger_usb_set_property, + .property_is_writeable = twl6030_charger_usb_property_is_writeable, +}; + +static int twl6030_charger_probe(struct platform_device *pdev) +{ + struct twl6030_charger_info *charger; + const struct twl6030_charger_chip_data *chip_data; + struct power_supply_config psy_cfg = {}; + int ret; + u8 val; + + charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL); + if (!charger) + return -ENOMEM; + + charger->dev = &pdev->dev; + charger->irq_chg = platform_get_irq(pdev, 0); + + chip_data = device_get_match_data(&pdev->dev); + if (!chip_data) + return dev_err_probe(&pdev->dev, -EINVAL, "missing chip data\n"); + + charger->extended_current_range = chip_data->extended_current_range; + platform_set_drvdata(pdev, charger); + psy_cfg.drv_data = charger; + psy_cfg.fwnode = dev_fwnode(&pdev->dev); + + charger->channel_vusb = devm_iio_channel_get(&pdev->dev, "vusb"); + if (IS_ERR(charger->channel_vusb)) { + ret = PTR_ERR(charger->channel_vusb); + if (ret == -EPROBE_DEFER) + return ret; /* iio not ready */ + dev_warn(&pdev->dev, "could not request vusb iio channel (%d)", + ret); + charger->channel_vusb = NULL; + } + + charger->usb = devm_power_supply_register(&pdev->dev, + &twl6030_charger_usb_desc, + &psy_cfg); + if (IS_ERR(charger->usb)) + return dev_err_probe(&pdev->dev, PTR_ERR(charger->usb), + "Failed to register usb\n"); + + ret = power_supply_get_battery_info(charger->usb, &charger->binfo); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "Failed to get battery info\n"); + + dev_info(&pdev->dev, "battery with vmax %d imax: %d\n", + charger->binfo->constant_charge_voltage_max_uv, + charger->binfo->constant_charge_current_max_ua); + + if (charger->binfo->constant_charge_voltage_max_uv == -EINVAL) { + ret = twl6030_charger_read(CHARGERUSB_CTRLLIMIT1, &val); + if (ret < 0) + return ret; + + charger->binfo->constant_charge_voltage_max_uv = + VOREG_TO_UV(val); + } + + if (charger->binfo->constant_charge_voltage_max_uv > 4760000 || + charger->binfo->constant_charge_voltage_max_uv < 350000) + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid charge voltage\n"); + + if (charger->binfo->constant_charge_current_max_ua == -EINVAL) { + ret = twl6030_charger_read(CHARGERUSB_CTRLLIMIT2, &val); + if (ret < 0) + return ret; + + charger->binfo->constant_charge_current_max_ua = VICHRG_TO_UA(val); + } + + if (charger->binfo->constant_charge_current_max_ua < 100000 || + charger->binfo->constant_charge_current_max_ua > 1500000) { + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid charge current\n"); + } + + if ((charger->binfo->charge_term_current_ua != -EINVAL) && + (charger->binfo->charge_term_current_ua > 400000 || + charger->binfo->charge_term_current_ua < 50000)) { + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid charge termination current\n"); + } + + ret = devm_delayed_work_autocancel(&pdev->dev, + &charger->charger_monitor, + twl6030_charger_wdg); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "Failed to register delayed work\n"); + + ret = devm_request_threaded_irq(&pdev->dev, charger->irq_chg, NULL, + twl6030_charger_interrupt, + IRQF_ONESHOT, pdev->name, + charger); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not request irq %d\n", + charger->irq_chg); + + /* turing to charging to configure things */ + twl6030_charger_write(CONTROLLER_CTRL1, 0); + twl6030_charger_interrupt(0, charger); + + return 0; +} + +static const struct twl6030_charger_chip_data twl6030_data = { + .extended_current_range = false, +}; + +static const struct twl6030_charger_chip_data twl6032_data = { + .extended_current_range = true, +}; + +static const struct of_device_id twl_charger_of_match[] = { + {.compatible = "ti,twl6030-charger", .data = &twl6030_data}, + {.compatible = "ti,twl6032-charger", .data = &twl6032_data}, + { } +}; +MODULE_DEVICE_TABLE(of, twl_charger_of_match); + +static struct platform_driver twl6030_charger_driver = { + .probe = twl6030_charger_probe, + .driver = { + .name = "twl6030_charger", + .of_match_table = twl_charger_of_match, + }, +}; +module_platform_driver(twl6030_charger_driver); + +MODULE_DESCRIPTION("TWL6030 Battery Charger Interface driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c index d56e499ac59f..538055b29dec 100644 --- a/drivers/power/supply/wm831x_power.c +++ b/drivers/power/supply/wm831x_power.c @@ -720,7 +720,7 @@ static void wm831x_power_remove(struct platform_device *pdev) static struct platform_driver wm831x_power_driver = { .probe = wm831x_power_probe, - .remove_new = wm831x_power_remove, + .remove = wm831x_power_remove, .driver = { .name = "wm831x-power", }, diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index 3f79ab6f6abf..b0eb6e0ce8bc 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -577,7 +577,7 @@ static void wm8350_power_remove(struct platform_device *pdev) static struct platform_driver wm8350_power_driver = { .probe = wm8350_power_probe, - .remove_new = wm8350_power_remove, + .remove = wm8350_power_remove, .driver = { .name = "wm8350-power", }, diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c index 1cc38d1437d9..b3b0c37a9dd2 100644 --- a/drivers/power/supply/wm97xx_battery.c +++ b/drivers/power/supply/wm97xx_battery.c @@ -265,7 +265,7 @@ static struct platform_driver wm97xx_bat_driver = { #endif }, .probe = wm97xx_bat_probe, - .remove_new = wm97xx_bat_remove, + .remove = wm97xx_bat_remove, }; module_platform_driver(wm97xx_bat_driver); diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c index 200ad8751860..188ae2572674 100644 --- a/drivers/ps3/ps3-lpm.c +++ b/drivers/ps3/ps3-lpm.c @@ -91,7 +91,7 @@ struct ps3_lpm_shadow_regs { * struct ps3_lpm_priv - Private lpm device data. * * @open: An atomic variable indicating the lpm driver has been opened. - * @rights: The lpm rigths granted by the system policy module. A logical + * @rights: The lpm rights granted by the system policy module. A logical * OR of enum ps3_lpm_rights. * @node_id: The node id of a BE processor whose performance monitor this * lpar has the right to use. diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c index ad8ef59dea34..ab798b52910e 100644 --- a/drivers/ps3/ps3-sys-manager.c +++ b/drivers/ps3/ps3-sys-manager.c @@ -362,7 +362,7 @@ static int ps3_sys_manager_send_request_shutdown( * ps3_sys_manager_send_response - Send a 'response' to the system manager. * @status: zero = success, others fail. * - * The guest sends this message to the system manager to acnowledge success or + * The guest sends this message to the system manager to acknowledge success or * failure of a command sent by the system manager. */ diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index 6328abd51ffa..5cb92535a4a1 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c @@ -467,8 +467,8 @@ struct list_buffer { * * If the port is idle on entry as much of the incoming data is written to * the port as the port will accept. Otherwise a list buffer is created - * and any remaning incoming data is copied to that buffer. The buffer is - * then enqueued for transmision via the transmit interrupt. + * and any remaining incoming data is copied to that buffer. The buffer is + * then enqueued for transmission via the transmit interrupt. */ int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf, diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c index e061b7d0632b..f50032ad9702 100644 --- a/drivers/ps3/sys-manager-core.c +++ b/drivers/ps3/sys-manager-core.c @@ -12,7 +12,7 @@ #include <asm/ps3.h> /** - * Staticly linked routines that allow late binding of a loaded sys-manager + * Statically linked routines that allow late binding of a loaded sys-manager * module. */ diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index c56cd0f63909..77a36e7bddd5 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -150,7 +150,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) if (ppb > ops->max_adj || ppb < -ops->max_adj) return -ERANGE; err = ops->adjfine(ops, tx->freq); - ptp->dialed_frequency = tx->freq; + if (!err) + ptp->dialed_frequency = tx->freq; } else if (tx->modes & ADJ_OFFSET) { if (ops->adjphase) { s32 max_phase_adj = ops->getmaxphase(ops); diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index a8e91d9d028b..e3cc59b82ea6 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -1341,6 +1341,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) step = 150; break; case AXP313A_ID: + case AXP323_ID: case AXP717_ID: case AXP15060_ID: /* The DCDC PWM frequency seems to be fixed to 3 MHz. */ @@ -1527,6 +1528,15 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id) } break; + case AXP323_ID: + regmap_read(axp20x->regmap, AXP323_DCDC_MODE_CTRL2, ®); + + switch (id) { + case AXP313A_DCDC2: + return !!(reg & BIT(1)); + } + break; + default: return false; } @@ -1565,6 +1575,7 @@ static int axp20x_regulator_probe(struct platform_device *pdev) "x-powers,drive-vbus-en"); break; case AXP313A_ID: + case AXP323_ID: regulators = axp313a_regulators; nregulators = AXP313A_REG_ID_MAX; break; @@ -1597,7 +1608,7 @@ static int axp20x_regulator_probe(struct platform_device *pdev) nregulators = AXP15060_REG_ID_MAX; break; default: - dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n", + dev_err(&pdev->dev, "Unsupported AXP variant: %d\n", axp20x->variant); return -EINVAL; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 84d48e310aa8..8cb948a91e60 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -497,7 +497,8 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, return -EPERM; } - if (*max_uA > rdev->constraints->max_uA) + if (*max_uA > rdev->constraints->max_uA && + rdev->constraints->max_uA) *max_uA = rdev->constraints->max_uA; if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 6c343b4b9d15..7870722b6ee2 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -843,26 +843,15 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = { .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; -static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = { +static const struct rpmh_vreg_hw_data pmic5_ftsmps525 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, .voltage_ranges = (struct linear_range[]) { REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000), + REGULATOR_LINEAR_RANGE(1376000, 268, 438, 8000), }, - .n_linear_ranges = 1, - .n_voltages = 268, - .pmic_mode_map = pmic_mode_map_pmic5_smps, - .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, -}; - -static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = { - .regulator_type = VRM, - .ops = &rpmh_regulator_vrm_ops, - .voltage_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000), - }, - .n_linear_ranges = 1, - .n_voltages = 268, + .n_linear_ranges = 2, + .n_voltages = 439, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; @@ -1190,12 +1179,12 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_mv, "vdd-s6"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), @@ -1203,14 +1192,14 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), - RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), - RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"), + RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), @@ -1218,14 +1207,14 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { }; static const struct rpmh_vreg_init_data pmc8380_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), - RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), - RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"), + RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), @@ -1409,16 +1398,16 @@ static const struct rpmh_vreg_init_data pmx65_vreg_data[] = { }; static const struct rpmh_vreg_init_data pmx75_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), - RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), - RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), - RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525_lv, "vdd-s9"), - RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525_lv, "vdd-s10"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"), + RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"), + RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525, "vdd-s9"), + RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525, "vdd-s10"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2-18"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 955e4e38477e..83962a114dc9 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -329,7 +329,8 @@ config STM32_RPROC config TI_K3_DSP_REMOTEPROC tristate "TI K3 DSP remoteproc support" - depends on ARCH_K3 + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n) depends on OMAP2PLUS_MBOX help Say m here to support TI's C66x and C71x DSP remote processor @@ -341,9 +342,9 @@ config TI_K3_DSP_REMOTEPROC config TI_K3_M4_REMOTEPROC tristate "TI K3 M4 remoteproc support" - depends on ARCH_OMAP2PLUS || ARCH_K3 - select MAILBOX - select OMAP2PLUS_MBOX + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n) + depends on OMAP2PLUS_MBOX help Say m here to support TI's M4 remote processor subsystems on various TI K3 family of SoCs through the remote processor @@ -354,7 +355,8 @@ config TI_K3_M4_REMOTEPROC config TI_K3_R5_REMOTEPROC tristate "TI K3 R5 remoteproc support" - depends on ARCH_K3 + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n) depends on OMAP2PLUS_MBOX help Say m here to support TI's R5F remote processor subsystems diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c index 8770d0cf1255..93031f0867d1 100644 --- a/drivers/remoteproc/da8xx_remoteproc.c +++ b/drivers/remoteproc/da8xx_remoteproc.c @@ -251,10 +251,8 @@ static int da8xx_rproc_probe(struct platform_device *pdev) return irq; irq_data = irq_get_irq_data(irq); - if (!irq_data) { - dev_err(dev, "irq_get_irq_data(%d): NULL\n", irq); - return -EINVAL; - } + if (!irq_data) + return dev_err_probe(dev, -EINVAL, "irq_get_irq_data(%d): NULL\n", irq); bootreg = devm_platform_ioremap_resource_byname(pdev, "host1cfg"); if (IS_ERR(bootreg)) @@ -265,28 +263,17 @@ static int da8xx_rproc_probe(struct platform_device *pdev) return PTR_ERR(chipsig); dsp_clk = devm_clk_get(dev, NULL); - if (IS_ERR(dsp_clk)) { - dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk)); - - return PTR_ERR(dsp_clk); - } + if (IS_ERR(dsp_clk)) + return dev_err_probe(dev, PTR_ERR(dsp_clk), "clk_get error\n"); dsp_reset = devm_reset_control_get_exclusive(dev, NULL); - if (IS_ERR(dsp_reset)) { - if (PTR_ERR(dsp_reset) != -EPROBE_DEFER) - dev_err(dev, "unable to get reset control: %ld\n", - PTR_ERR(dsp_reset)); - - return PTR_ERR(dsp_reset); - } + if (IS_ERR(dsp_reset)) + return dev_err_probe(dev, PTR_ERR(dsp_reset), "unable to get reset control\n"); if (dev->of_node) { ret = of_reserved_mem_device_init(dev); - if (ret) { - dev_err(dev, "device does not have specific CMA pool: %d\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "device does not have specific CMA pool\n"); } rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name, @@ -378,7 +365,7 @@ MODULE_DEVICE_TABLE(of, davinci_rproc_of_match); static struct platform_driver da8xx_rproc_driver = { .probe = da8xx_rproc_probe, - .remove_new = da8xx_rproc_remove, + .remove = da8xx_rproc_remove, .driver = { .name = "davinci-rproc", .of_match_table = of_match_ptr(davinci_rproc_of_match), diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c index 376187ad5754..ea5024919c2f 100644 --- a/drivers/remoteproc/imx_dsp_rproc.c +++ b/drivers/remoteproc/imx_dsp_rproc.c @@ -1258,7 +1258,7 @@ MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match); static struct platform_driver imx_dsp_rproc_driver = { .probe = imx_dsp_rproc_probe, - .remove_new = imx_dsp_rproc_remove, + .remove = imx_dsp_rproc_remove, .driver = { .name = "imx-dsp-rproc", .of_match_table = imx_dsp_rproc_of_match, diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 800015ff7ff9..74299af1d7f1 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -1198,7 +1198,7 @@ MODULE_DEVICE_TABLE(of, imx_rproc_of_match); static struct platform_driver imx_rproc_driver = { .probe = imx_rproc_probe, - .remove_new = imx_rproc_remove, + .remove = imx_rproc_remove, .driver = { .name = "imx-rproc", .of_match_table = imx_rproc_of_match, diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c index 8f0f7a4cfef2..6e54093d1732 100644 --- a/drivers/remoteproc/keystone_remoteproc.c +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -490,7 +490,7 @@ MODULE_DEVICE_TABLE(of, keystone_rproc_of_match); static struct platform_driver keystone_rproc_driver = { .probe = keystone_rproc_probe, - .remove_new = keystone_rproc_remove, + .remove = keystone_rproc_remove, .driver = { .name = "keystone-rproc", .of_match_table = keystone_rproc_of_match, diff --git a/drivers/remoteproc/meson_mx_ao_arc.c b/drivers/remoteproc/meson_mx_ao_arc.c index f6744b538323..7dfdf11b0036 100644 --- a/drivers/remoteproc/meson_mx_ao_arc.c +++ b/drivers/remoteproc/meson_mx_ao_arc.c @@ -246,7 +246,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_ao_arc_rproc_match); static struct platform_driver meson_mx_ao_arc_rproc_driver = { .probe = meson_mx_ao_arc_rproc_probe, - .remove_new = meson_mx_ao_arc_rproc_remove, + .remove = meson_mx_ao_arc_rproc_remove, .driver = { .name = "meson-mx-ao-arc-rproc", .of_match_table = meson_mx_ao_arc_rproc_match, diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index e744c07507ee..0f4a7065d0bd 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -1521,7 +1521,7 @@ MODULE_DEVICE_TABLE(of, mtk_scp_of_match); static struct platform_driver mtk_scp_driver = { .probe = scp_probe, - .remove_new = scp_remove, + .remove = scp_remove, .driver = { .name = "mtk-scp", .of_match_table = mtk_scp_of_match, diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 327f0c7ee3d6..1656574b7317 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -1132,7 +1132,7 @@ static struct platform_driver pru_rproc_driver = { .suppress_bind_attrs = true, }, .probe = pru_rproc_probe, - .remove_new = pru_rproc_remove, + .remove = pru_rproc_remove, }; module_platform_driver(pru_rproc_driver); diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index 572dcb0f055b..94af77baa7a1 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -534,15 +534,11 @@ static const struct rproc_ops adsp_ops = { static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids) { int num_clks = 0; - int i, ret; + int i; adsp->xo = devm_clk_get(adsp->dev, "xo"); - if (IS_ERR(adsp->xo)) { - ret = PTR_ERR(adsp->xo); - if (ret != -EPROBE_DEFER) - dev_err(adsp->dev, "failed to get xo clock"); - return ret; - } + if (IS_ERR(adsp->xo)) + return dev_err_probe(adsp->dev, PTR_ERR(adsp->xo), "failed to get xo clock"); for (i = 0; clk_ids[i]; i++) num_clks++; @@ -708,10 +704,9 @@ static int adsp_probe(struct platform_device *pdev) return ret; ret = qcom_rproc_pds_attach(adsp, desc->pd_names, desc->num_pds); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to attach proxy power domains\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "Failed to attach proxy power domains\n"); ret = adsp_init_reset(adsp); if (ret) @@ -734,15 +729,22 @@ static int adsp_probe(struct platform_device *pdev) desc->ssctl_id); if (IS_ERR(adsp->sysmon)) { ret = PTR_ERR(adsp->sysmon); - goto disable_pm; + goto deinit_remove_glink_pdm_ssr; } ret = rproc_add(rproc); if (ret) - goto disable_pm; + goto remove_sysmon; return 0; +remove_sysmon: + qcom_remove_sysmon_subdev(adsp->sysmon); +deinit_remove_glink_pdm_ssr: + qcom_q6v5_deinit(&adsp->q6v5); + qcom_remove_glink_subdev(rproc, &adsp->glink_subdev); + qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev); + qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev); disable_pm: qcom_rproc_pds_detach(adsp); @@ -840,7 +842,7 @@ MODULE_DEVICE_TABLE(of, adsp_of_match); static struct platform_driver adsp_pil_driver = { .probe = adsp_probe, - .remove_new = adsp_remove, + .remove = adsp_remove, .driver = { .name = "qcom_q6v5_adsp", .of_match_table = adsp_of_match, diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 2a42215ce8e0..e78bd986dc3f 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -261,7 +261,6 @@ enum { static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, const struct qcom_mss_reg_res *reg_res) { - int rc; int i; if (!reg_res) @@ -269,13 +268,10 @@ static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, for (i = 0; reg_res[i].supply; i++) { regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); - if (IS_ERR(regs[i].reg)) { - rc = PTR_ERR(regs[i].reg); - if (rc != -EPROBE_DEFER) - dev_err(dev, "Failed to get %s\n regulator", - reg_res[i].supply); - return rc; - } + if (IS_ERR(regs[i].reg)) + return dev_err_probe(dev, PTR_ERR(regs[i].reg), + "Failed to get %s\n regulator", + reg_res[i].supply); regs[i].uV = reg_res[i].uV; regs[i].uA = reg_res[i].uA; @@ -1162,6 +1158,9 @@ static int q6v5_mba_load(struct q6v5 *qproc) goto disable_active_clks; } + if (qproc->has_mba_logs) + qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE); + writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); if (qproc->dp_size) { writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); @@ -1172,9 +1171,6 @@ static int q6v5_mba_load(struct q6v5 *qproc) if (ret) goto reclaim_mba; - if (qproc->has_mba_logs) - qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE); - ret = q6v5_rmb_mba_wait(qproc, 0, 5000); if (ret == -ETIMEDOUT) { dev_err(qproc->dev, "MBA boot timed out\n"); @@ -1813,14 +1809,10 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks, for (i = 0; clk_names[i]; i++) { clks[i] = devm_clk_get(dev, clk_names[i]); - if (IS_ERR(clks[i])) { - int rc = PTR_ERR(clks[i]); - - if (rc != -EPROBE_DEFER) - dev_err(dev, "Failed to get %s clock\n", - clk_names[i]); - return rc; - } + if (IS_ERR(clks[i])) + return dev_err_probe(dev, PTR_ERR(clks[i]), + "Failed to get %s clock\n", + clk_names[i]); } return i; @@ -2028,42 +2020,32 @@ static int q6v5_probe(struct platform_device *pdev) ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, desc->proxy_clk_names); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); + if (ret < 0) return ret; - } qproc->proxy_clk_count = ret; ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, desc->reset_clk_names); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get reset clocks.\n"); + if (ret < 0) return ret; - } qproc->reset_clk_count = ret; ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, desc->active_clk_names); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get active clocks.\n"); + if (ret < 0) return ret; - } qproc->active_clk_count = ret; ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, desc->proxy_supply); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); + if (ret < 0) return ret; - } qproc->proxy_reg_count = ret; ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, desc->active_supply); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get active regulators.\n"); + if (ret < 0) return ret; - } qproc->active_reg_count = ret; ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, @@ -2073,10 +2055,8 @@ static int q6v5_probe(struct platform_device *pdev) ret = q6v5_regulator_init(&pdev->dev, qproc->fallback_proxy_regs, desc->fallback_proxy_supply); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n"); + if (ret < 0) return ret; - } qproc->fallback_proxy_reg_count = ret; } else if (ret < 0) { dev_err(&pdev->dev, "Failed to init power domains\n"); @@ -2533,7 +2513,7 @@ MODULE_DEVICE_TABLE(of, q6v5_of_match); static struct platform_driver q6v5_driver = { .probe = q6v5_probe, - .remove_new = q6v5_remove, + .remove = q6v5_remove, .driver = { .name = "qcom-q6v5-mss", .of_match_table = q6v5_of_match, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index ef82835e98a4..97c4bdd9222a 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -453,24 +453,16 @@ static const struct rproc_ops adsp_minidump_ops = { static int adsp_init_clock(struct qcom_adsp *adsp) { - int ret; - adsp->xo = devm_clk_get(adsp->dev, "xo"); - if (IS_ERR(adsp->xo)) { - ret = PTR_ERR(adsp->xo); - if (ret != -EPROBE_DEFER) - dev_err(adsp->dev, "failed to get xo clock"); - return ret; - } + if (IS_ERR(adsp->xo)) + return dev_err_probe(adsp->dev, PTR_ERR(adsp->xo), + "failed to get xo clock"); + adsp->aggre2_clk = devm_clk_get_optional(adsp->dev, "aggre2"); - if (IS_ERR(adsp->aggre2_clk)) { - ret = PTR_ERR(adsp->aggre2_clk); - if (ret != -EPROBE_DEFER) - dev_err(adsp->dev, - "failed to get aggre2 clock"); - return ret; - } + if (IS_ERR(adsp->aggre2_clk)) + return dev_err_probe(adsp->dev, PTR_ERR(adsp->aggre2_clk), + "failed to get aggre2 clock"); return 0; } @@ -716,7 +708,7 @@ static int adsp_probe(struct platform_device *pdev) if (desc->minidump_id) ops = &adsp_minidump_ops; - rproc = devm_rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp)); + rproc = devm_rproc_alloc(&pdev->dev, desc->sysmon_name, ops, fw_name, sizeof(*adsp)); if (!rproc) { dev_err(&pdev->dev, "unable to allocate remoteproc\n"); @@ -759,16 +751,16 @@ static int adsp_probe(struct platform_device *pdev) ret = adsp_init_clock(adsp); if (ret) - goto free_rproc; + goto unassign_mem; ret = adsp_init_regulator(adsp); if (ret) - goto free_rproc; + goto unassign_mem; ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds, desc->proxy_pd_names); if (ret < 0) - goto free_rproc; + goto unassign_mem; adsp->proxy_pd_count = ret; ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state, @@ -784,18 +776,28 @@ static int adsp_probe(struct platform_device *pdev) desc->ssctl_id); if (IS_ERR(adsp->sysmon)) { ret = PTR_ERR(adsp->sysmon); - goto detach_proxy_pds; + goto deinit_remove_pdm_smd_glink; } qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name); ret = rproc_add(rproc); if (ret) - goto detach_proxy_pds; + goto remove_ssr_sysmon; return 0; +remove_ssr_sysmon: + qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev); + qcom_remove_sysmon_subdev(adsp->sysmon); +deinit_remove_pdm_smd_glink: + qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev); + qcom_remove_smd_subdev(rproc, &adsp->smd_subdev); + qcom_remove_glink_subdev(rproc, &adsp->glink_subdev); + qcom_q6v5_deinit(&adsp->q6v5); detach_proxy_pds: adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); +unassign_mem: + adsp_unassign_memory_region(adsp); free_rproc: device_init_wakeup(adsp->dev, false); @@ -907,6 +909,7 @@ static const struct adsp_data sm8250_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, + .minidump_id = 5, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -1124,6 +1127,7 @@ static const struct adsp_data sm8350_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, + .minidump_id = 7, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -1344,7 +1348,7 @@ static const struct adsp_data sc7280_wpss_resource = { .crash_reason_smem = 626, .firmware_name = "wpss.mdt", .pas_id = 6, - .auto_boot = true, + .auto_boot = false, .proxy_pd_names = (char*[]){ "cx", "mx", @@ -1421,6 +1425,7 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource}, { .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource}, { .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource}, + { .compatible = "qcom,sar2130p-adsp-pas", .data = &sm8350_adsp_resource}, { .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource}, { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init}, { .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource}, @@ -1477,7 +1482,7 @@ MODULE_DEVICE_TABLE(of, adsp_of_match); static struct platform_driver adsp_driver = { .probe = adsp_probe, - .remove_new = adsp_remove, + .remove = adsp_remove, .driver = { .name = "qcom_q6v5_pas", .of_match_table = adsp_of_match, diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index e913dabae992..93648734a2f2 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -902,90 +902,58 @@ static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss) static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss) { - int ret; - wcss->xo = devm_clk_get(wcss->dev, "xo"); - if (IS_ERR(wcss->xo)) { - ret = PTR_ERR(wcss->xo); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get xo clock"); - return ret; - } + if (IS_ERR(wcss->xo)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->xo), + "failed to get xo clock"); wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr"); - if (IS_ERR(wcss->gcc_abhs_cbcr)) { - ret = PTR_ERR(wcss->gcc_abhs_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get gcc abhs clock"); - return ret; - } + if (IS_ERR(wcss->gcc_abhs_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->gcc_abhs_cbcr), + "failed to get gcc abhs clock"); wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr"); - if (IS_ERR(wcss->gcc_axim_cbcr)) { - ret = PTR_ERR(wcss->gcc_axim_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get gcc axim clock\n"); - return ret; - } + if (IS_ERR(wcss->gcc_axim_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->gcc_axim_cbcr), + "failed to get gcc axim clock\n"); wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev, "lcc_ahbfabric_cbc"); - if (IS_ERR(wcss->ahbfabric_cbcr_clk)) { - ret = PTR_ERR(wcss->ahbfabric_cbcr_clk); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get ahbfabric clock\n"); - return ret; - } + if (IS_ERR(wcss->ahbfabric_cbcr_clk)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->ahbfabric_cbcr_clk), + "failed to get ahbfabric clock\n"); wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc"); - if (IS_ERR(wcss->lcc_csr_cbcr)) { - ret = PTR_ERR(wcss->lcc_csr_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get csr cbcr clk\n"); - return ret; - } + if (IS_ERR(wcss->lcc_csr_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->lcc_csr_cbcr), + "failed to get csr cbcr clk\n"); wcss->ahbs_cbcr = devm_clk_get(wcss->dev, "lcc_abhs_cbc"); - if (IS_ERR(wcss->ahbs_cbcr)) { - ret = PTR_ERR(wcss->ahbs_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n"); - return ret; - } + if (IS_ERR(wcss->ahbs_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->ahbs_cbcr), + "failed to get ahbs_cbcr clk\n"); wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev, "lcc_tcm_slave_cbc"); - if (IS_ERR(wcss->tcm_slave_cbcr)) { - ret = PTR_ERR(wcss->tcm_slave_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get tcm cbcr clk\n"); - return ret; - } + if (IS_ERR(wcss->tcm_slave_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->tcm_slave_cbcr), + "failed to get tcm cbcr clk\n"); wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc"); - if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) { - ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get abhm cbcr clk\n"); - return ret; - } + if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->qdsp6ss_abhm_cbcr), + "failed to get abhm cbcr clk\n"); wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc"); - if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) { - ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get axim cbcr clk\n"); - return ret; - } + if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->qdsp6ss_axim_cbcr), + "failed to get axim cbcr clk\n"); wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep"); - if (IS_ERR(wcss->lcc_bcr_sleep)) { - ret = PTR_ERR(wcss->lcc_bcr_sleep); - if (ret != -EPROBE_DEFER) - dev_err(wcss->dev, "failed to get bcr cbcr clk\n"); - return ret; - } + if (IS_ERR(wcss->lcc_bcr_sleep)) + return dev_err_probe(wcss->dev, PTR_ERR(wcss->lcc_bcr_sleep), + "failed to get bcr cbcr clk\n"); return 0; } @@ -1021,7 +989,6 @@ static int q6v5_wcss_probe(struct platform_device *pdev) wcss = rproc->priv; wcss->dev = &pdev->dev; - wcss->version = desc->version; wcss->version = desc->version; wcss->requires_force_stop = desc->requires_force_stop; @@ -1056,18 +1023,33 @@ static int q6v5_wcss_probe(struct platform_device *pdev) qcom_add_pdm_subdev(rproc, &wcss->pdm_subdev); qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss"); - if (desc->ssctl_id) + if (desc->ssctl_id) { wcss->sysmon = qcom_add_sysmon_subdev(rproc, desc->sysmon_name, desc->ssctl_id); + if (IS_ERR(wcss->sysmon)) { + ret = PTR_ERR(wcss->sysmon); + goto deinit_remove_subdevs; + } + } ret = rproc_add(rproc); if (ret) - return ret; + goto remove_sysmon_subdev; platform_set_drvdata(pdev, rproc); return 0; + +remove_sysmon_subdev: + if (desc->ssctl_id) + qcom_remove_sysmon_subdev(wcss->sysmon); +deinit_remove_subdevs: + qcom_q6v5_deinit(&wcss->q6v5); + qcom_remove_glink_subdev(rproc, &wcss->glink_subdev); + qcom_remove_pdm_subdev(rproc, &wcss->pdm_subdev); + qcom_remove_ssr_subdev(rproc, &wcss->ssr_subdev); + return ret; } static void q6v5_wcss_remove(struct platform_device *pdev) @@ -1111,7 +1093,7 @@ MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match); static struct platform_driver q6v5_wcss_driver = { .probe = q6v5_wcss_probe, - .remove_new = q6v5_wcss_remove, + .remove = q6v5_wcss_remove, .driver = { .name = "qcom-q6v5-wcss-pil", .of_match_table = q6v5_wcss_of_match, diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index a7bb9da27029..5b5664603eed 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -682,7 +682,7 @@ MODULE_DEVICE_TABLE(of, wcnss_of_match); static struct platform_driver wcnss_driver = { .probe = wcnss_probe, - .remove_new = wcnss_remove, + .remove = wcnss_remove, .driver = { .name = "qcom-wcnss-pil", .of_match_table = wcnss_of_match, diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c index dd36fd077911..b989718776bd 100644 --- a/drivers/remoteproc/qcom_wcnss_iris.c +++ b/drivers/remoteproc/qcom_wcnss_iris.c @@ -155,9 +155,8 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo) iris->xo_clk = devm_clk_get(&iris->dev, "xo"); if (IS_ERR(iris->xo_clk)) { - ret = PTR_ERR(iris->xo_clk); - if (ret != -EPROBE_DEFER) - dev_err(&iris->dev, "failed to acquire xo clk\n"); + ret = dev_err_probe(&iris->dev, PTR_ERR(iris->xo_clk), + "failed to acquire xo clk\n"); goto err_device_del; } diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c index cc17e8421f65..921d853594f4 100644 --- a/drivers/remoteproc/rcar_rproc.c +++ b/drivers/remoteproc/rcar_rproc.c @@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(of, rcar_rproc_of_match); static struct platform_driver rcar_rproc_driver = { .probe = rcar_rproc_probe, - .remove_new = rcar_rproc_remove, + .remove = rcar_rproc_remove, .driver = { .name = "rcar-rproc", .of_match_table = rcar_rproc_of_match, diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index f276956f2c5c..eb66f78ec8b7 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -109,10 +109,10 @@ static int rproc_enable_iommu(struct rproc *rproc) return 0; } - domain = iommu_domain_alloc(dev->bus); - if (!domain) { + domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) { dev_err(dev, "can't alloc iommu domain\n"); - return -ENOMEM; + return PTR_ERR(domain); } iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index d3f39009b28e..25a655f33ec0 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -593,7 +593,7 @@ static void rproc_virtio_remove(struct platform_device *pdev) /* Platform driver */ static struct platform_driver rproc_virtio_driver = { .probe = rproc_virtio_probe, - .remove_new = rproc_virtio_remove, + .remove = rproc_virtio_remove, .driver = { .name = "rproc-virtio", }, diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index 1340be9d0110..5df99bae7131 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c @@ -457,7 +457,7 @@ static void st_rproc_remove(struct platform_device *pdev) static struct platform_driver st_rproc_driver = { .probe = st_rproc_probe, - .remove_new = st_rproc_remove, + .remove = st_rproc_remove, .driver = { .name = "st-rproc", .of_match_table = of_match_ptr(st_rproc_match), diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index 8c7f7950b80e..b02b36a3f515 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -946,7 +946,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops, static struct platform_driver stm32_rproc_driver = { .probe = stm32_rproc_probe, - .remove_new = stm32_rproc_remove, + .remove = stm32_rproc_remove, .driver = { .name = "stm32-rproc", .pm = pm_ptr(&stm32_rproc_pm_ops), diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index 8be3f631c192..a695890254ff 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -403,7 +403,7 @@ static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc, * the hard-coded value suffices to support the IPC-only mode. */ *rsc_table_sz = 256; - return (struct resource_table *)kproc->rmem[0].cpu_addr; + return (__force struct resource_table *)kproc->rmem[0].cpu_addr; } /* @@ -576,11 +576,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc) return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - return -EINVAL; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* 64-bit address regions currently not supported */ @@ -793,7 +791,7 @@ MODULE_DEVICE_TABLE(of, k3_dsp_of_match); static struct platform_driver k3_dsp_rproc_driver = { .probe = k3_dsp_rproc_probe, - .remove_new = k3_dsp_rproc_remove, + .remove = k3_dsp_rproc_remove, .driver = { .name = "k3-dsp-rproc", .of_match_table = k3_dsp_of_match, diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c index 09f0484a90e1..a16fb165fced 100644 --- a/drivers/remoteproc/ti_k3_m4_remoteproc.c +++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c @@ -433,11 +433,9 @@ static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc) return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - return -EINVAL; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* 64-bit address regions currently not supported */ diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 747ee467da88..6560b7954027 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -487,10 +487,10 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) * can be effective on all TCM addresses. */ dev_dbg(dev, "zeroing out ATCM memory\n"); - memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size); + memset_io(core->mem[0].cpu_addr, 0x00, core->mem[0].size); dev_dbg(dev, "zeroing out BTCM memory\n"); - memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size); + memset_io(core->mem[1].cpu_addr, 0x00, core->mem[1].size); return 0; } @@ -717,7 +717,7 @@ static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc, * the hard-coded value suffices to support the IPC-only mode. */ *rsc_table_sz = 256; - return (struct resource_table *)kproc->rmem[0].cpu_addr; + return (__force struct resource_table *)kproc->rmem[0].cpu_addr; } /* @@ -1001,12 +1001,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) } rmem = of_reserved_mem_lookup(rmem_np); + of_node_put(rmem_np); if (!rmem) { - of_node_put(rmem_np); ret = -EINVAL; goto unmap_rmem; } - of_node_put(rmem_np); kproc->rmem[i].bus_addr = rmem->base; /* @@ -1558,11 +1557,7 @@ static int k3_r5_core_of_init(struct platform_device *pdev) core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); if (IS_ERR(core->ti_sci)) { - ret = PTR_ERR(core->ti_sci); - if (ret != -EPROBE_DEFER) { - dev_err(dev, "failed to get ti-sci handle, ret = %d\n", - ret); - } + ret = dev_err_probe(dev, PTR_ERR(core->ti_sci), "failed to get ti-sci handle\n"); core->ti_sci = NULL; goto err; } @@ -1578,18 +1573,14 @@ static int k3_r5_core_of_init(struct platform_device *pdev) ret = PTR_ERR_OR_ZERO(core->reset); if (!ret) ret = -ENODEV; - if (ret != -EPROBE_DEFER) { - dev_err(dev, "failed to get reset handle, ret = %d\n", - ret); - } + dev_err_probe(dev, ret, "failed to get reset handle\n"); goto err; } core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci); if (IS_ERR(core->tsp)) { - ret = PTR_ERR(core->tsp); - dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n", - ret); + ret = dev_err_probe(dev, PTR_ERR(core->tsp), + "failed to construct ti-sci proc control\n"); goto err; } @@ -1659,16 +1650,14 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev_of_node(dev); struct platform_device *cpdev; - struct device_node *child; struct k3_r5_core *core; int ret; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { cpdev = of_find_device_by_node(child); if (!cpdev) { ret = -ENODEV; dev_err(dev, "could not get R5 core platform device\n"); - of_node_put(child); goto fail; } @@ -1677,7 +1666,6 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev) dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n", ret); put_device(&cpdev->dev); - of_node_put(child); goto fail; } @@ -1718,11 +1706,8 @@ static int k3_r5_probe(struct platform_device *pdev) init_waitqueue_head(&cluster->core_transition); ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode); - if (ret < 0 && ret != -EINVAL) { - dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n", - ret); - return ret; - } + if (ret < 0 && ret != -EINVAL) + return dev_err_probe(dev, ret, "invalid format for ti,cluster-mode\n"); if (ret == -EINVAL) { /* @@ -1741,49 +1726,39 @@ static int k3_r5_probe(struct platform_device *pdev) } if ((cluster->mode == CLUSTER_MODE_SINGLECPU && !data->single_cpu_mode) || - (cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core)) { - dev_err(dev, "Cluster mode = %d is not supported on this SoC\n", cluster->mode); - return -EINVAL; - } + (cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core)) + return dev_err_probe(dev, -EINVAL, + "Cluster mode = %d is not supported on this SoC\n", + cluster->mode); num_cores = of_get_available_child_count(np); - if (num_cores != 2 && !data->is_single_core) { - dev_err(dev, "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n", - num_cores); - return -ENODEV; - } + if (num_cores != 2 && !data->is_single_core) + return dev_err_probe(dev, -ENODEV, + "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n", + num_cores); - if (num_cores != 1 && data->is_single_core) { - dev_err(dev, "SoC supports only single core R5 but num_cores is set to %d\n", - num_cores); - return -ENODEV; - } + if (num_cores != 1 && data->is_single_core) + return dev_err_probe(dev, -ENODEV, + "SoC supports only single core R5 but num_cores is set to %d\n", + num_cores); platform_set_drvdata(pdev, cluster); ret = devm_of_platform_populate(dev); - if (ret) { - dev_err(dev, "devm_of_platform_populate failed, ret = %d\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "devm_of_platform_populate failed\n"); ret = k3_r5_cluster_of_init(pdev); - if (ret) { - dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "k3_r5_cluster_of_init failed\n"); ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev); if (ret) return ret; ret = k3_r5_cluster_rproc_init(pdev); - if (ret) { - dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "k3_r5_cluster_rproc_init failed\n"); ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev); if (ret) diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c index 36a55f7ffa64..d8be21e71721 100644 --- a/drivers/remoteproc/wkup_m3_rproc.c +++ b/drivers/remoteproc/wkup_m3_rproc.c @@ -251,7 +251,7 @@ static const struct dev_pm_ops wkup_m3_rproc_pm_ops = { static struct platform_driver wkup_m3_rproc_driver = { .probe = wkup_m3_rproc_probe, - .remove_new = wkup_m3_rproc_remove, + .remove = wkup_m3_rproc_remove, .driver = { .name = "wkup_m3_rproc", .of_match_table = wkup_m3_rproc_of_match, diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 18374555888d..5b3abb6db248 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -156,6 +156,7 @@ config RESET_MCHP_SPARX5 config RESET_NPCM bool "NPCM BMC Reset Driver" if COMPILE_TEST default ARCH_NPCM + select AUXILIARY_BUS help This enables the reset controller driver for Nuvoton NPCM BMC SoCs. diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c index a200cc8c7955..e5b6127783a7 100644 --- a/drivers/reset/reset-npcm.c +++ b/drivers/reset/reset-npcm.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Nuvoton Technology corporation. +#include <linux/auxiliary_bus.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> @@ -10,11 +11,14 @@ #include <linux/property.h> #include <linux/reboot.h> #include <linux/reset-controller.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/of_address.h> +#include <soc/nuvoton/clock-npcm8xx.h> + /* NPCM7xx GCR registers */ #define NPCM_MDLR_OFFSET 0x7C #define NPCM7XX_MDLR_USBD0 BIT(9) @@ -89,6 +93,7 @@ struct npcm_rc_data { const struct npcm_reset_info *info; struct regmap *gcr_regmap; u32 sw_reset_number; + struct device *dev; void __iomem *base; spinlock_t lock; }; @@ -372,6 +377,67 @@ static const struct reset_control_ops npcm_rc_ops = { .status = npcm_rc_status, }; +static void npcm_clock_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +static void npcm_clock_adev_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + struct npcm_clock_adev *rdev = to_npcm_clock_adev(adev); + + kfree(rdev); +} + +static struct auxiliary_device *npcm_clock_adev_alloc(struct npcm_rc_data *rst_data, char *clk_name) +{ + struct npcm_clock_adev *rdev; + struct auxiliary_device *adev; + int ret; + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + rdev->base = rst_data->base; + + adev = &rdev->adev; + adev->name = clk_name; + adev->dev.parent = rst_data->dev; + adev->dev.release = npcm_clock_adev_release; + adev->id = 555u; + + ret = auxiliary_device_init(adev); + if (ret) { + kfree(rdev); + return ERR_PTR(ret); + } + + return adev; +} + +static int npcm8xx_clock_controller_register(struct npcm_rc_data *rst_data, char *clk_name) +{ + struct auxiliary_device *adev; + int ret; + + adev = npcm_clock_adev_alloc(rst_data, clk_name); + if (IS_ERR(adev)) + return PTR_ERR(adev); + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + return devm_add_action_or_reset(rst_data->dev, npcm_clock_unregister_adev, adev); +} + static int npcm_rc_probe(struct platform_device *pdev) { struct npcm_rc_data *rc; @@ -392,6 +458,7 @@ static int npcm_rc_probe(struct platform_device *pdev) rc->rcdev.of_node = pdev->dev.of_node; rc->rcdev.of_reset_n_cells = 2; rc->rcdev.of_xlate = npcm_reset_xlate; + rc->dev = &pdev->dev; ret = devm_reset_controller_register(&pdev->dev, &rc->rcdev); if (ret) { @@ -408,12 +475,19 @@ static int npcm_rc_probe(struct platform_device *pdev) rc->restart_nb.priority = 192; rc->restart_nb.notifier_call = npcm_rc_restart; ret = register_restart_handler(&rc->restart_nb); - if (ret) + if (ret) { dev_warn(&pdev->dev, "failed to register restart handler\n"); + return ret; + } } } - return ret; + switch (rc->info->bmc_id) { + case BMC_NPCM8XX: + return npcm8xx_clock_controller_register(rc, "clk-npcm8xx"); + default: + return 0; + } } static struct platform_driver npcm_rc_driver = { diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index d3af1dfa3c7d..a2f9d85c7156 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1204,7 +1204,8 @@ void qcom_glink_native_rx(struct qcom_glink *glink) ret = qcom_glink_rx_open_ack(glink, param1); break; case GLINK_CMD_OPEN: - ret = qcom_glink_rx_defer(glink, param2); + /* upper 16 bits of param2 are the "prio" field */ + ret = qcom_glink_rx_defer(glink, param2 & 0xffff); break; case GLINK_CMD_TX_DATA: case GLINK_CMD_TX_DATA_CONT: diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c index 59b627fc1ecf..954ac4ef53e8 100644 --- a/drivers/rtc/rtc-bd70528.c +++ b/drivers/rtc/rtc-bd70528.c @@ -236,7 +236,6 @@ static int bd70528_probe(struct platform_device *pdev) { struct bd70528_rtc *bd_rtc; const struct rtc_class_ops *rtc_ops; - const char *irq_name; int ret; struct rtc_device *rtc; int irq; @@ -259,7 +258,6 @@ static int bd70528_probe(struct platform_device *pdev) switch (chip) { case ROHM_CHIP_TYPE_BD71815: - irq_name = "bd71815-rtc-alm-0"; bd_rtc->reg_time_start = BD71815_REG_RTC_START; /* @@ -276,7 +274,6 @@ static int bd70528_probe(struct platform_device *pdev) hour_reg = BD71815_REG_HOUR; break; case ROHM_CHIP_TYPE_BD71828: - irq_name = "bd71828-rtc-alm-0"; bd_rtc->reg_time_start = BD71828_REG_RTC_START; bd_rtc->bd718xx_alm_block_start = BD71828_REG_RTC_ALM_START; hour_reg = BD71828_REG_RTC_HOUR; @@ -286,7 +283,7 @@ static int bd70528_probe(struct platform_device *pdev) return -ENOENT; } - irq = platform_get_irq_byname(pdev, irq_name); + irq = platform_get_irq_byname(pdev, "bd70528-rtc-alm-0"); if (irq < 0) return irq; diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index ad39797890e5..bf054dd7682b 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -302,9 +302,9 @@ static void __exit amiga_a3000_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver amiga_a3000_scsi_driver __refdata = { - .remove_new = __exit_p(amiga_a3000_scsi_remove), - .driver = { - .name = "amiga-a3000-scsi", + .remove = __exit_p(amiga_a3000_scsi_remove), + .driver = { + .name = "amiga-a3000-scsi", }, }; diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index d9103adc87fe..75b43047a155 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -115,9 +115,9 @@ static void __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver amiga_a4000t_scsi_driver __refdata = { - .remove_new = __exit_p(amiga_a4000t_scsi_remove), - .driver = { - .name = "amiga-a4000t-scsi", + .remove = __exit_p(amiga_a4000t_scsi_remove), + .driver = { + .name = "amiga-a4000t-scsi", }, }; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 1d09d3ac6aa4..8c384c25dca1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2736,7 +2736,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, int isAif, int isFastResponse, struct hw_fib *aif_fib); int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type); -int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 47287559c768..ffef61c4aa01 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1698,127 +1698,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) return retval; } -int aac_check_health(struct aac_dev * aac) -{ - int BlinkLED; - unsigned long time_now, flagv = 0; - struct list_head * entry; - - /* Extending the scope of fib_lock slightly to protect aac->in_reset */ - if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) - return 0; - - if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { - spin_unlock_irqrestore(&aac->fib_lock, flagv); - return 0; /* OK */ - } - - aac->in_reset = 1; - - /* Fake up an AIF: - * aac_aifcmd.command = AifCmdEventNotify = 1 - * aac_aifcmd.seqnum = 0xFFFFFFFF - * aac_aifcmd.data[0] = AifEnExpEvent = 23 - * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 - * aac.aifcmd.data[2] = AifHighPriority = 3 - * aac.aifcmd.data[3] = BlinkLED - */ - - time_now = jiffies/HZ; - entry = aac->fib_list.next; - - /* - * For each Context that is on the - * fibctxList, make a copy of the - * fib, and then set the event to wake up the - * thread that is waiting for it. - */ - while (entry != &aac->fib_list) { - /* - * Extract the fibctx - */ - struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); - struct hw_fib * hw_fib; - struct fib * fib; - /* - * Check if the queue is getting - * backlogged - */ - if (fibctx->count > 20) { - /* - * It's *not* jiffies folks, - * but jiffies / HZ, so do not - * panic ... - */ - u32 time_last = fibctx->jiffies; - /* - * Has it been > 2 minutes - * since the last read off - * the queue? - */ - if ((time_now - time_last) > aif_timeout) { - entry = entry->next; - aac_close_fib_context(aac, fibctx); - continue; - } - } - /* - * Warning: no sleep allowed while - * holding spinlock - */ - hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); - fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); - if (fib && hw_fib) { - struct aac_aifcmd * aif; - - fib->hw_fib_va = hw_fib; - fib->dev = aac; - aac_fib_init(fib); - fib->type = FSAFS_NTC_FIB_CONTEXT; - fib->size = sizeof (struct fib); - fib->data = hw_fib->data; - aif = (struct aac_aifcmd *)hw_fib->data; - aif->command = cpu_to_le32(AifCmdEventNotify); - aif->seqnum = cpu_to_le32(0xFFFFFFFF); - ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); - ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); - ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); - ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); - - /* - * Put the FIB onto the - * fibctx's fibs - */ - list_add_tail(&fib->fiblink, &fibctx->fib_list); - fibctx->count++; - /* - * Set the event to wake up the - * thread that will waiting. - */ - complete(&fibctx->completion); - } else { - printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); - kfree(fib); - kfree(hw_fib); - } - entry = entry->next; - } - - spin_unlock_irqrestore(&aac->fib_lock, flagv); - - if (BlinkLED < 0) { - printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", - aac->name, BlinkLED); - goto out; - } - - printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); - -out: - aac->in_reset = 0; - return BlinkLED; -} - static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) { return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c index 176704b24e6a..f1ce02cd569e 100644 --- a/drivers/scsi/aic7xxx/aic7770.c +++ b/drivers/scsi/aic7xxx/aic7770.c @@ -99,21 +99,6 @@ struct aic7770_identity aic7770_ident_table[] = ahc_aic7770_EISA_setup } }; -const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table); - -struct aic7770_identity * -aic7770_find_device(uint32_t id) -{ - struct aic7770_identity *entry; - int i; - - for (i = 0; i < ahc_num_aic7770_devs; i++) { - entry = &aic7770_ident_table[i]; - if (entry->full_id == (id & entry->id_mask)) - return (entry); - } - return (NULL); -} int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 9bc755a0a2d3..20857c213c72 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -1119,7 +1119,6 @@ struct aic7770_identity { ahc_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table[]; -extern const int ahc_num_aic7770_devs; #define AHC_EISA_SLOT_OFFSET 0xc00 #define AHC_EISA_IOSIZE 0x100 @@ -1135,7 +1134,6 @@ int ahc_pci_test_register_access(struct ahc_softc *); void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc); /*************************** EISA/VL Front End ********************************/ -struct aic7770_identity *aic7770_find_device(uint32_t); int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *, u_int port); diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 98a1b966a0b0..85055677666c 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -885,7 +885,7 @@ static void __exit atari_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver atari_scsi_driver __refdata = { - .remove_new = __exit_p(atari_scsi_remove), + .remove = __exit_p(atari_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 4cb9249e583c..a6b8c4ddea19 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -138,14 +138,6 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), } while (0) -/* - * PCI devices supported by the current BFA - */ -struct bfa_pciid_s { - u16 device_id; - u16 vendor_id; -}; - extern char bfa_version[]; struct bfa_iocfc_regs_s { @@ -408,9 +400,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, (((&(_bfa)->modules.dconf_mod)->min_cfg) \ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status)) -void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); -void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa); diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 3438d0b8ba06..a99a101b95ef 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1934,24 +1934,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) } /* - * Return the list of PCI vendor/device id lists supported by this - * BFA instance. - */ -void -bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) -{ - static struct bfa_pciid_s __pciids[] = { - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, - }; - - *npciids = ARRAY_SIZE(__pciids); - *pciids = __pciids; -} - -/* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. @@ -1987,20 +1969,3 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) cfg->drvcfg.delay_comp = BFA_FALSE; } - -void -bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) -{ - bfa_cfg_get_default(cfg); - cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; - cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; - cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; - cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; - cfg->fwcfg.num_rports = BFA_RPORT_MIN; - cfg->fwcfg.num_fwtio_reqs = 0; - - cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; - cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; - cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; - cfg->drvcfg.min_cfg = BFA_TRUE; -} diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 5e36620425ac..760606062c66 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -125,28 +125,6 @@ enum bfa_lport_offline_reason { }; /* - * FCS lport info. - */ -struct bfa_lport_info_s { - u8 port_type; /* bfa_lport_type_t : physical or - * virtual */ - u8 port_state; /* one of bfa_lport_state values */ - u8 offline_reason; /* one of bfa_lport_offline_reason_t - * values */ - wwn_t port_wwn; - wwn_t node_wwn; - - /* - * following 4 feilds are valid for Physical Ports only - */ - u32 max_vports_supp; /* Max supported vports */ - u32 num_vports_inuse; /* Num of in use vports */ - u32 max_rports_supp; /* Max supported rports */ - u32 num_rports_inuse; /* Num of doscovered rports */ - -}; - -/* * FCS port statistics */ struct bfa_lport_stats_s { diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 28ae4dc14dc9..2518e36d70d3 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -3413,15 +3413,6 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) } /* - * Notification on completions from related ioim. - */ -void -bfa_tskim_iodone(struct bfa_tskim_s *tskim) -{ - bfa_wc_down(&tskim->wc); -} - -/* * Handle IOC h/w failure notification from itnim. */ void diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 4499f84c2d81..64e4485b226e 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -339,7 +339,6 @@ void bfa_ioim_tov(struct bfa_ioim_s *ioim); void bfa_tskim_attach(struct bfa_fcpim_s *fcpim); void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_tskim_iodone(struct bfa_tskim_s *tskim); void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw); diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 9788354b90da..19903539a08d 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -373,15 +373,11 @@ bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rport[], int *nrports); -wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, - int index, int nrports, bfa_boolean_t bwwn); struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); -void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, struct bfa_lport_attr_s *port_attr); void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, @@ -416,8 +412,6 @@ struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid( struct bfa_fcs_lport_s *port, u32 pid); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( struct bfa_fcs_lport_s *port, wwn_t pwwn); -struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t nwwn); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier( struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid); void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, @@ -486,7 +480,6 @@ bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct bfad_vport_s *vport_drv); -bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); @@ -494,7 +487,6 @@ void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, struct bfa_vport_attr_s *vport_attr); struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn); -void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); @@ -623,8 +615,6 @@ void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, struct bfa_rport_attr_s *attr); struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn); -struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t rnwwn); void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); void bfa_fcs_rport_set_max_logins(u32 max_logins); void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, @@ -633,8 +623,6 @@ void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 pid); -void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi_rsp); void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_logi_s *plogi); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 966bf6cc6dd9..9a85f417018f 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -938,25 +938,6 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) } /* - * NWWN based Lookup for a R-Port in the Port R-Port Queue - */ -struct bfa_fcs_rport_s * -bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) -{ - struct bfa_fcs_rport_s *rport; - struct list_head *qe; - - list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *) qe; - if (wwn_is_equal(rport->nwwn, nwwn)) - return rport; - } - - bfa_trc(port->fcs, nwwn); - return NULL; -} - -/* * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * @@ -5645,54 +5626,6 @@ bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) return &fcs->fabric.bport; } -wwn_t -bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, - int nrports, bfa_boolean_t bwwn) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - int i; - struct bfa_fcs_s *fcs; - - if (port == NULL || nrports == 0) - return (wwn_t) 0; - - fcs = port->fcs; - bfa_trc(fcs, (u32) nrports); - - i = 0; - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while ((qe != qh) && (i < nrports)) { - rport = (struct bfa_fcs_rport_s *) qe; - if (bfa_ntoh3b(rport->pid) > 0xFFF000) { - qe = bfa_q_next(qe); - bfa_trc(fcs, (u32) rport->pwwn); - bfa_trc(fcs, rport->pid); - bfa_trc(fcs, i); - continue; - } - - if (bwwn) { - if (!memcmp(&wwn, &rport->pwwn, 8)) - break; - } else { - if (i == index) - break; - } - - i++; - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, i); - if (rport) - return rport->pwwn; - else - return (wwn_t) 0; -} - void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rports[], int *nrports) @@ -5823,54 +5756,6 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) return NULL; } -/* - * API corresponding to NPIV_VPORT_GETINFO. - */ -void -bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info) -{ - - bfa_trc(port->fcs, port->fabric->fabric_name); - - if (port->vport == NULL) { - /* - * This is a Physical port - */ - port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - - port_info->max_vports_supp = - bfa_lps_get_max_vport(port->fcs->bfa); - port_info->num_vports_inuse = - port->fabric->num_vports; - port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; - port_info->num_rports_inuse = port->num_rports; - } else { - /* - * This is a virtual port - */ - port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - } -} - void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, struct bfa_lport_stats_s *port_stats) @@ -6568,15 +6453,6 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) } /* - * Cleanup notification from fabric SM on link timer expiry. - */ -void -bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_cleanup++; -} - -/* * Stop notification from fabric SM. To be invoked from within FCS. */ void @@ -6699,24 +6575,6 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, } /* - * Use this function to findout if this is a pbc vport or not. - * - * @param[in] vport - pointer to bfa_fcs_vport_t. - * - * @returns None - */ -bfa_boolean_t -bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) -{ - - if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) - return BFA_TRUE; - else - return BFA_FALSE; - -} - -/* * Use this function initialize the vport. * * @param[in] vport - pointer to bfa_fcs_vport_t. diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index ce52a9c88ae6..d4bde9bbe75b 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -2646,27 +2646,6 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); return rport; } -/* - * Called by bport in private loop topology to indicate that a - * rport has been discovered and plogi has been completed. - * - * @param[in] port - base port or vport - * @param[in] rpid - remote port ID - */ -void -bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, - struct fc_logi_s *plogi) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); - if (!rport) - return; - - bfa_fcs_rport_update(rport, plogi); - - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); -} /* * Called by bport/vport to handle PLOGI received from a new remote port. @@ -3089,21 +3068,6 @@ bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) return rport; } -struct bfa_fcs_rport_s * -bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - } - - return rport; -} - /* * Remote port features (RPF) implementation. */ diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index ea2f107f564c..aa68d61a2d0d 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -2254,17 +2254,6 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) return status; } -/* - * Enable/disable IOC failure auto recovery. - */ -void -bfa_ioc_auto_recover(bfa_boolean_t auto_recover) -{ - bfa_auto_recover = auto_recover; -} - - - bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { @@ -2272,16 +2261,6 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc) } bfa_boolean_t -bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) -{ - u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); - - return ((r32 != BFI_IOC_UNINIT) && - (r32 != BFI_IOC_INITING) && - (r32 != BFI_IOC_MEMTEST)); -} - -bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { __be32 *msgp = mbmsg; diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 3ec10503caff..d70332e9ad6d 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -919,7 +919,6 @@ void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc); void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); -void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); void bfa_ioc_detach(struct bfa_ioc_s *ioc); void bfa_ioc_suspend(struct bfa_ioc_s *ioc); void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, @@ -934,7 +933,6 @@ bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); -bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 578e7678b056..ed29ebda30da 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -113,7 +113,6 @@ void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *, struct bfa_pcidev_s *); -void bfa_uf_iocdisable(struct bfa_s *); void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 9f33aa303b18..df33afaaa673 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -913,14 +913,6 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) return reqbuf; } -u32 -bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - - return mod->req_pld_sz; -} - /* * Get the internal response buffer pointer * @@ -1023,21 +1015,6 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_fcxp_queue(fcxp, send_req); } -/* - * Abort a BFA FCXP - * - * @param[in] fcxp BFA fcxp pointer - * - * @return void - */ -bfa_status_t -bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) -{ - bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); - WARN_ON(1); - return BFA_STATUS_OK; -} - void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, @@ -3857,15 +3834,6 @@ bfa_fcport_clr_hardalpa(struct bfa_s *bfa) return BFA_STATUS_OK; } -bfa_boolean_t -bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - *alpa = fcport->cfg.hardalpa; - return fcport->cfg.cfg_hardalpa; -} - u8 bfa_fcport_get_myalpa(struct bfa_s *bfa) { @@ -3923,17 +3891,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) /* * Get port attributes. */ - -wwn_t -bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (node) - return fcport->nwwn; - else - return fcport->pwwn; -} - void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { @@ -4106,18 +4063,6 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa) } /* - * Enable/Disable FAA feature in port config - */ -void -bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, state); - fcport->cfg.faa_state = state; -} - -/* * Get default minimum ratelim speed */ enum bfa_port_speed @@ -5529,23 +5474,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) } void -bfa_uf_iocdisable(struct bfa_s *bfa) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - struct bfa_uf_s *uf; - struct list_head *qe, *qen; - - /* Enqueue unused uf resources to free_q */ - list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); - - list_for_each_safe(qe, qen, &ufm->uf_posted_q) { - uf = (struct bfa_uf_s *) qe; - list_del(&uf->qe); - bfa_uf_put(ufm, uf); - } -} - -void bfa_uf_start(struct bfa_s *bfa) { bfa_uf_post_all(BFA_UF_MOD(bfa)); diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 26eeee82bedc..99a960a8a2db 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -587,14 +587,12 @@ bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); -bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr); -wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, enum bfa_port_linkstate event), void *event_cbarg); @@ -619,7 +617,6 @@ bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); void bfa_fcport_dportenable(struct bfa_s *bfa); void bfa_fcport_dportdisable(struct bfa_s *bfa); bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); -void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn); bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, @@ -687,8 +684,6 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_cb_fcxp_send_t cbfn, void *cbarg, u32 rsp_maxlen, u8 rsp_timeout); -bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); -u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 62cb7a864fd5..6aa1d3a7e24b 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -843,26 +843,6 @@ bfad_drv_init(struct bfad_s *bfad) } void -bfad_drv_uninit(struct bfad_s *bfad) -{ - unsigned long flags; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfa_iocfc_stop(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); - - del_timer_sync(&bfad->hal_tmo); - bfa_isr_disable(&bfad->bfa); - bfa_detach(&bfad->bfa); - bfad_remove_intr(bfad); - bfad_hal_mem_release(bfad); - - bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; -} - -void bfad_drv_start(struct bfad_s *bfad) { unsigned long flags; @@ -1693,9 +1673,8 @@ bfad_init(void) error = bfad_im_module_init(); if (error) { - error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); - goto ext; + return -ENOMEM; } if (strcmp(FCPI_NAME, " fcpim") == 0) diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index da42e3261237..ce2ec5108947 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -312,7 +312,6 @@ void bfad_bfa_tmo(struct timer_list *t); void bfad_init_timer(struct bfad_s *bfad); int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); -void bfad_drv_uninit(struct bfad_s *bfad); int bfad_worker(void *ptr); void bfad_debugfs_init(struct bfad_port_s *port); void bfad_debugfs_exit(struct bfad_port_s *port); diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 41e6b4dac056..e1e0e967bcc3 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -1148,7 +1148,7 @@ struct bfi_diag_dport_scn_testcomp_s { u16 numbuffer; /* from switch */ u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */ u32 latency; /* from switch */ - u32 distance; /* from swtich unit in meters */ + u32 distance; /* from switch unit in meters */ /* Buffers required to saturate the link */ u16 frm_sz; /* from switch for buf_reqd */ u8 rsvd[2]; diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c index f893e9779e9d..baf5f4e47937 100644 --- a/drivers/scsi/bvme6000_scsi.c +++ b/drivers/scsi/bvme6000_scsi.c @@ -106,7 +106,7 @@ static struct platform_driver bvme6000_scsi_driver = { .name = "bvme6000-scsi", }, .probe = bvme6000_probe, - .remove_new = bvme6000_device_remove, + .remove = bvme6000_device_remove, }; static int __init bvme6000_scsi_init(void) diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 8a133254c4f6..1e2d7c63a8e3 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -1045,10 +1045,6 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a, u32 length, void *data); void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); -void esas2r_build_cli_req(struct esas2r_adapter *a, - struct esas2r_request *rq, - u32 length, - u32 cmd_rsp_len); void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c index 30028e56df63..5aa728704dfc 100644 --- a/drivers/scsi/esas2r/esas2r_vda.c +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -444,23 +444,6 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) } } -/* Build a VDA CLI request. */ -void esas2r_build_cli_req(struct esas2r_adapter *a, - struct esas2r_request *rq, - u32 length, - u32 cmd_rsp_len) -{ - struct atto_vda_cli_req *vrq = &rq->vrq->cli; - - clear_vda_request(rq); - - rq->vrq->scsi.function = VDA_FUNC_CLI; - - vrq->length = cpu_to_le32(length); - vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); - vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); -} - /* Build a VDA IOCTL request. */ void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index d223f482488f..a44768bceb9a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -307,6 +307,7 @@ enum { struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); + int (*fw_info_check)(struct hisi_hba *hisi_hba); int (*interrupt_preinit)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *device); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 6219807ce3b9..53cb15f6714b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1321,6 +1321,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) } if (rc == TMF_RESP_FUNC_COMPLETE) { + usleep_range(900, 1000); ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); @@ -1384,6 +1385,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { + u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); struct asd_sas_port *_sas_port = NULL; int phy_no; @@ -1397,7 +1399,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) continue; /* Report PHY state change to libsas */ - if (state & BIT(phy_no)) { + if (new_state & BIT(phy_no)) { if (do_port_check && sas_port && sas_port->port_dev) { struct domain_device *dev = sas_port->port_dev; @@ -1410,6 +1412,16 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) } } else { hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); + + /* + * The new_state is not ready but old_state is ready, + * the two possible causes: + * 1. The connected device is removed + * 2. Device exists but phyup timed out + */ + if (state & BIT(phy_no)) + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); } } } @@ -1545,9 +1557,15 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) /* Init and wait for PHYs to come up and all libsas event finished. */ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; - if (!(hisi_hba->phy_state & BIT(phy_no))) + if (!sas_phy->phy->enabled) + continue; + + if (!(hisi_hba->phy_state & BIT(phy_no))) { + hisi_sas_phy_enable(hisi_hba, phy_no, 1); continue; + } async_schedule_domain(hisi_sas_async_init_wait_phyup, phy, &async); @@ -2450,6 +2468,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (hisi_hba->hw->fw_info_check) { + if (hisi_hba->hw->fw_info_check(hisi_hba)) + goto err_out; + } + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (error) { dev_err(dev, "No usable DMA addressing method\n"); @@ -2630,10 +2653,10 @@ static __init int hisi_sas_init(void) static __exit void hisi_sas_exit(void) { - sas_release_transport(hisi_sas_stt); - if (hisi_sas_debugfs_enable) debugfs_remove(hisi_sas_debugfs_dir); + + sas_release_transport(hisi_sas_stt); } module_init(hisi_sas_init); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 71b5008c3552..c3e571be2222 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1734,6 +1734,23 @@ static struct attribute *host_v1_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v1_hw); +static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 32) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v1_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT .device_configure = hisi_sas_device_configure, @@ -1747,6 +1764,7 @@ static const struct scsi_host_template sht_v1_hw = { static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, + .fw_info_check = check_fw_info_v1_hw, .setup_itct = setup_itct_v1_hw, .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, @@ -1784,7 +1802,7 @@ MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); static struct platform_driver hisi_sas_v1_driver = { .probe = hisi_sas_v1_probe, - .remove_new = hisi_sas_remove, + .remove = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v1_of_match, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 342d75f12051..1a62b5d15eca 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3566,6 +3566,23 @@ static void map_queues_v2_hw(struct Scsi_Host *shost) } } +static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v2_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT .device_configure = hisi_sas_device_configure, @@ -3582,6 +3599,7 @@ static const struct scsi_host_template sht_v2_hw = { static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, + .fw_info_check = check_fw_info_v2_hw, .interrupt_preinit = hisi_sas_v2_interrupt_preinit, .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, @@ -3631,7 +3649,7 @@ MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, - .remove_new = hisi_sas_remove, + .remove = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 4cd3a3eab6f1..5db931663ae4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -43,6 +43,7 @@ #define CQ_INT_CONVERGE_EN 0xb0 #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 +#define CFG_ICT_TIMER_STEP_TRSH 0xc8 #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) @@ -638,9 +639,12 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, CFG_ICT_TIMER_STEP_TRSH, 0xf4240); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, hisi_sas_intr_conv); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); @@ -682,7 +686,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); - hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7ffffff); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 0x30f4240); @@ -2493,6 +2497,7 @@ static int complete_v3_hw(struct hisi_sas_cq *cq) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + cond_resched(); return completed; } @@ -2796,14 +2801,15 @@ static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) { /* config those registers between enable and disable PHYs */ hisi_sas_stop_phys(hisi_hba); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); if (hisi_hba->intr_coal_ticks == 0 || hisi_hba->intr_coal_count == 0) { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); } else { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, hisi_hba->intr_coal_ticks); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, @@ -3371,6 +3377,23 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, }; +static int check_fw_info_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 8) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static struct Scsi_Host * hisi_sas_shost_alloc_pci(struct pci_dev *pdev) { @@ -3401,6 +3424,9 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (check_fw_info_v3_hw(hisi_hba) < 0) + goto err_out; + if (experimental_iopoll_q_cnt < 0 || experimental_iopoll_q_cnt >= hisi_hba->queue_count) dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", @@ -3550,6 +3576,11 @@ debugfs_to_reg_name_v3_hw(int off, int base_off, return NULL; } +static bool debugfs_dump_is_generated_v3_hw(void *p) +{ + return p ? true : false; +} + static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, const struct hisi_sas_debugfs_reg *reg) { @@ -3575,6 +3606,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *global = s->private; + if (!debugfs_dump_is_generated_v3_hw(global->data)) + return -EPERM; + debugfs_print_reg_v3_hw(global->data, s, &debugfs_global_reg); @@ -3586,6 +3620,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *axi = s->private; + if (!debugfs_dump_is_generated_v3_hw(axi->data)) + return -EPERM; + debugfs_print_reg_v3_hw(axi->data, s, &debugfs_axi_reg); @@ -3597,6 +3634,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *ras = s->private; + if (!debugfs_dump_is_generated_v3_hw(ras->data)) + return -EPERM; + debugfs_print_reg_v3_hw(ras->data, s, &debugfs_ras_reg); @@ -3609,6 +3649,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_port *port = s->private; const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; + if (!debugfs_dump_is_generated_v3_hw(port->data)) + return -EPERM; + debugfs_print_reg_v3_hw(port->data, s, reg_port); return 0; @@ -3664,6 +3707,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_cq *debugfs_cq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); @@ -3685,8 +3731,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) { + struct hisi_sas_debugfs_dq *debugfs_dq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_dq_show_slot_v3_hw(s, slot, s->private); @@ -3700,6 +3750,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_iost *iost = debugfs_iost->iost; int i, max_command_entries = HISI_SAS_MAX_COMMANDS; + if (!debugfs_dump_is_generated_v3_hw(iost)) + return -EPERM; + for (i = 0; i < max_command_entries; i++, iost++) { __le64 *data = &iost->qw0; @@ -3719,6 +3772,9 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) int i, tab_idx; __le64 *iost; + if (!debugfs_dump_is_generated_v3_hw(iost_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { /* * Data struct of IOST cache: @@ -3742,6 +3798,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_itct *debugfs_itct = s->private; struct hisi_sas_itct *itct = debugfs_itct->itct; + if (!debugfs_dump_is_generated_v3_hw(itct)) + return -EPERM; + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { __le64 *data = &itct->qw0; @@ -3761,6 +3820,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) int i, tab_idx; __le64 *itct; + if (!debugfs_dump_is_generated_v3_hw(itct_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { /* * Data struct of ITCT cache: @@ -3778,10 +3840,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) } DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); -static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) +static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) { u64 *debugfs_timestamp; - int dump_index = hisi_hba->debugfs_dump_index; struct dentry *dump_dentry; struct dentry *dentry; char name[256]; @@ -3789,17 +3850,17 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) int c; int d; - snprintf(name, 256, "%d", dump_index); + snprintf(name, 256, "%d", index); dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); - debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; + debugfs_timestamp = &hisi_hba->debugfs_timestamp[index]; debugfs_create_u64("timestamp", 0400, dump_dentry, debugfs_timestamp); debugfs_create_file("global", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], + &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL], &debugfs_global_v3_hw_fops); /* Create port dir and files */ @@ -3808,7 +3869,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", p); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_port_reg[dump_index][p], + &hisi_hba->debugfs_port_reg[index][p], &debugfs_port_v3_hw_fops); } @@ -3818,7 +3879,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", c); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_cq[dump_index][c], + &hisi_hba->debugfs_cq[index][c], &debugfs_cq_v3_hw_fops); } @@ -3828,32 +3889,32 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", d); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_dq[dump_index][d], + &hisi_hba->debugfs_dq[index][d], &debugfs_dq_v3_hw_fops); } debugfs_create_file("iost", 0400, dump_dentry, - &hisi_hba->debugfs_iost[dump_index], + &hisi_hba->debugfs_iost[index], &debugfs_iost_v3_hw_fops); debugfs_create_file("iost_cache", 0400, dump_dentry, - &hisi_hba->debugfs_iost_cache[dump_index], + &hisi_hba->debugfs_iost_cache[index], &debugfs_iost_cache_v3_hw_fops); debugfs_create_file("itct", 0400, dump_dentry, - &hisi_hba->debugfs_itct[dump_index], + &hisi_hba->debugfs_itct[index], &debugfs_itct_v3_hw_fops); debugfs_create_file("itct_cache", 0400, dump_dentry, - &hisi_hba->debugfs_itct_cache[dump_index], + &hisi_hba->debugfs_itct_cache[index], &debugfs_itct_cache_v3_hw_fops); debugfs_create_file("axi", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], + &hisi_hba->debugfs_regs[index][DEBUGFS_AXI], &debugfs_axi_v3_hw_fops); debugfs_create_file("ras", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], + &hisi_hba->debugfs_regs[index][DEBUGFS_RAS], &debugfs_ras_v3_hw_fops); } @@ -4516,22 +4577,34 @@ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) int i; devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); + hisi_hba->debugfs_iost_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); + hisi_hba->debugfs_itct_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); + hisi_hba->debugfs_iost[dump_index].iost = NULL; devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); + hisi_hba->debugfs_itct[dump_index].itct = NULL; - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); + hisi_hba->debugfs_dq[dump_index][i].hdr = NULL; + } - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_cq[dump_index][i].complete_hdr); + hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL; + } - for (i = 0; i < DEBUGFS_REGS_NUM; i++) + for (i = 0; i < DEBUGFS_REGS_NUM; i++) { devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); + hisi_hba->debugfs_regs[dump_index][i].data = NULL; + } - for (i = 0; i < hisi_hba->n_phy; i++) + for (i = 0; i < hisi_hba->n_phy; i++) { devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); + hisi_hba->debugfs_port_reg[dump_index][i].data = NULL; + } } static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { @@ -4658,8 +4731,6 @@ static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) debugfs_snapshot_itct_reg_v3_hw(hisi_hba); debugfs_snapshot_iost_reg_v3_hw(hisi_hba); - debugfs_create_files_v3_hw(hisi_hba); - debugfs_snapshot_restore_v3_hw(hisi_hba); hisi_hba->debugfs_dump_index++; @@ -4743,6 +4814,34 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; } +static int debugfs_dump_index_v3_hw_show(struct seq_file *s, void *p) +{ + int *debugfs_dump_index = s->private; + + if (*debugfs_dump_index > 0) + seq_printf(s, "%d\n", *debugfs_dump_index - 1); + else + seq_puts(s, "dump not triggered\n"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_dump_index_v3_hw); + +static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + hisi_hba->debugfs_dump_dentry = + debugfs_create_dir("dump", hisi_hba->debugfs_dir); + + debugfs_create_file("latest_dump", 0400, hisi_hba->debugfs_dump_dentry, + &hisi_hba->debugfs_dump_index, + &debugfs_dump_index_v3_hw_fops); + + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) + debugfs_create_files_v3_hw(hisi_hba, i); +} + static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) { debugfs_remove_recursive(hisi_hba->debugfs_dir); @@ -4755,19 +4854,17 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_sas_debugfs_dir); - debugfs_create_file("trigger_dump", 0200, - hisi_hba->debugfs_dir, - hisi_hba, - &debugfs_trigger_dump_v3_hw_fops); - /* create bist structures */ debugfs_bist_init_v3_hw(hisi_hba); - hisi_hba->debugfs_dump_dentry = - debugfs_create_dir("dump", hisi_hba->debugfs_dir); + debugfs_dump_init_v3_hw(hisi_hba); debugfs_phy_down_cnt_init_v3_hw(hisi_hba); debugfs_fifo_init_v3_hw(hisi_hba); + debugfs_create_file("trigger_dump", 0200, + hisi_hba->debugfs_dir, + hisi_hba, + &debugfs_trigger_dump_v3_hw_fops); } static int @@ -4860,16 +4957,13 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) SHOST_DIX_GUARD_CRC); } - if (hisi_sas_debugfs_enable) - debugfs_init_v3_hw(hisi_hba); - rc = interrupt_preinit_v3_hw(hisi_hba); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = scsi_add_host(shost, dev); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = sas_register_ha(sha); if (rc) @@ -4880,6 +4974,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_unregister_ha; scsi_scan_host(shost); + if (hisi_sas_debugfs_enable) + debugfs_init_v3_hw(hisi_hba); pm_runtime_set_autosuspend_delay(dev, 5000); pm_runtime_use_autosuspend(dev); @@ -4900,9 +4996,6 @@ err_out_unregister_ha: sas_unregister_ha(sha); err_out_remove_host: scsi_remove_host(shost); -err_out_undo_debugfs: - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); err_out_free_host: hisi_sas_free(hisi_hba); scsi_host_put(shost); @@ -4934,6 +5027,8 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct Scsi_Host *shost = sha->shost; pm_runtime_get_noresume(dev); + if (hisi_sas_debugfs_enable) + debugfs_exit_v3_hw(hisi_hba); sas_unregister_ha(sha); flush_workqueue(hisi_hba->wq); @@ -4941,9 +5036,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) hisi_sas_v3_destroy_irqs(pdev, hisi_hba); hisi_sas_free(hisi_hba); - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); - scsi_host_put(shost); } @@ -5034,7 +5126,8 @@ static int _suspend_v3_hw(struct device *device) interrupt_disable_v3_hw(hisi_hba); #ifdef CONFIG_PM - if (atomic_read(&device->power.usage_count)) { + if ((device->power.runtime_status == RPM_SUSPENDING) && + atomic_read(&device->power.usage_count)) { dev_err(dev, "PM suspend: host status cannot be suspended\n"); rc = -EBUSY; goto err_out; diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index fb04b0b515ab..35137f5cfb3a 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c @@ -196,7 +196,7 @@ MODULE_ALIAS("platform:jazz_esp"); static struct platform_driver esp_jazz_driver = { .probe = esp_jazz_probe, - .remove_new = esp_jazz_remove, + .remove = esp_jazz_remove, .driver = { .name = "jazz_esp", }, diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 85059b83ea6b..1c6b024160da 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -398,7 +398,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) + if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || + test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || + test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) return -ENODEV; /* allocate our bsg tracking structure */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d4e46a08f94d..efeb61b15a5b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -571,7 +571,7 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); -void lpfc_sli4_node_prep(struct lpfc_hba *); +void lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); @@ -660,6 +660,7 @@ void lpfc_wqe_cmd_template(void); void lpfc_nvmet_cmd_template(void); void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, uint32_t stat, uint32_t param); +void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba); extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 134bc96dd134..30891ad17e2a 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " - "Data: x%x x%x x%x x%lx x%x\n", Did, + "Data: x%lx x%x x%x x%lx x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); @@ -744,7 +744,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) * state of ndlp hit devloss, change state to * allow rediscovery. */ - if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -832,12 +832,10 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; - spin_lock_irq(&ndlp->lock); if (ndlp->nlp_DID == Did) - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); else - ndlp->nlp_flag |= NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } } @@ -894,13 +892,11 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) + if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } @@ -1440,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " - "NameServer Rsp Data: x%x x%lx x%x\n", + "NameServer Rsp Data: x%lx x%lx x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { @@ -2226,6 +2222,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status, ulp_word4, latt); if (latt || ulp_status) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, + "0229 FDMI cmd %04x failed, latt = %d " + "ulp_status: (x%x/x%x), sli_flag x%x\n", + be16_to_cpu(fdmi_cmd), latt, ulp_status, + ulp_word4, phba->sli.sli_flag); /* Look for a retryable error */ if (ulp_status == IOSTAT_LOCAL_REJECT) { @@ -2234,8 +2235,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case IOERR_SLI_DOWN: /* Driver aborted this IO. No retry as error * is likely Offline->Online or some adapter - * error. Recovery will try again. + * error. Recovery will try again, but if port + * is not active there's no point to continue + * issuing follow up FDMI commands. */ + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { + free_ndlp = cmdiocb->ndlp; + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + return; + } break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: @@ -2256,12 +2265,6 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } } - - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0229 FDMI cmd %04x latt = %d " - "ulp_status: x%x, rid x%x\n", - be16_to_cpu(fdmi_cmd), latt, ulp_status, - ulp_word4); } free_ndlp = cmdiocb->ndlp; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a2d2b02b3418..3fd1aa5cc78c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -870,8 +870,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) wwn_to_u64(ndlp->nlp_nodename.u.wwn)); len += scnprintf(buf+len, size-len, "RPI:x%04x ", ndlp->nlp_rpi); - len += scnprintf(buf+len, size-len, "flag:x%08x ", - ndlp->nlp_flag); + len += scnprintf(buf+len, size-len, "flag:x%08lx ", + ndlp->nlp_flag); if (!ndlp->nlp_type) len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f5ae8cc15820..3e173b5d00e0 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -102,7 +102,7 @@ struct lpfc_nodelist { spinlock_t lock; /* Node management lock */ - uint32_t nlp_flag; /* entry flags */ + unsigned long nlp_flag; /* entry flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -182,37 +182,37 @@ struct lpfc_node_rrq { #define lpfc_ndlp_check_qdepth(phba, ndlp) \ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) -/* Defines for nlp_flag (uint32) */ -#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ -#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ -#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ -#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ -#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ -#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ -#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ -#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ -#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ -#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ -#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ -#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ -#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */ -#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ -#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ -#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ -#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ -#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ -#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ -#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ -#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful +/* nlp_flag mask bits */ +enum lpfc_nlp_flag { + NLP_IGNR_REG_CMPL = 0, /* Rcvd rscn before we cmpl reg login */ + NLP_REG_LOGIN_SEND = 1, /* sent reglogin to adapter */ + NLP_SUPPRESS_RSP = 4, /* Remote NPort supports suppress rsp */ + NLP_PLOGI_SND = 5, /* sent PLOGI request for this entry */ + NLP_PRLI_SND = 6, /* sent PRLI request for this entry */ + NLP_ADISC_SND = 7, /* sent ADISC request for this entry */ + NLP_LOGO_SND = 8, /* sent LOGO request for this entry */ + NLP_RNID_SND = 10, /* sent RNID request for this entry */ + NLP_NVMET_RECOV = 12, /* NVMET auditing node for recovery. */ + NLP_UNREG_INP = 15, /* UNREG_RPI cmd is in progress */ + NLP_DROPPED = 16, /* Init ref count has been dropped */ + NLP_DELAY_TMO = 17, /* delay timeout is running for node */ + NLP_NPR_2B_DISC = 18, /* node is included in num_disc_nodes */ + NLP_RCV_PLOGI = 19, /* Rcv'ed PLOGI from remote system */ + NLP_LOGO_ACC = 20, /* Process LOGO after ACC completes */ + NLP_TGT_NO_SCSIID = 21, /* good PRLI but no binding for scsid */ + NLP_ISSUE_LOGO = 22, /* waiting to issue a LOGO */ + NLP_IN_DEV_LOSS = 23, /* devloss in progress */ + NLP_ACC_REGLOGIN = 24, /* Issue Reg Login after successful ACC */ -#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from + NLP_NPR_ADISC = 25, /* Issue ADISC when dq'ed from NPR list */ -#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ -#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ -#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ -#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ -#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ -#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ + NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ + NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ + NLP_TARGET_REMOVE = 28, /* Target remove in process */ + NLP_SC_REQ = 29, /* Target requires authentication */ + NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ + NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ +}; /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index d737b897ddd8..37f0a930d469 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -725,11 +725,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&np->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&np->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -864,9 +862,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sizeof(struct lpfc_name)); /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -1018,7 +1014,7 @@ stop_rr_fcf_flogi: * registered with the SCSI transport, remove the initial * reference to trigger node release. */ - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); @@ -1236,9 +1232,9 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "6445 ELS completes after LINK_DOWN: " - " Status %x/%x cmd x%x flg x%x\n", + " Status %x/%x cmd x%x flg x%x iotag x%x\n", ulp_status, ulp_word4, cmd, - cmdiocb->cmd_flag); + cmdiocb->cmd_flag, cmdiocb->iotag); if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; @@ -1548,7 +1544,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1597,7 +1593,7 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1675,9 +1671,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *new_ndlp; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; - uint32_t keepDID = 0, keep_nlp_flag = 0; + uint32_t keepDID = 0; int rc; - uint32_t keep_new_nlp_flag = 0; + unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0; uint16_t keep_nlp_state; u32 keep_nlp_fc4_type = 0; struct lpfc_nvme_rport *keep_nrport = NULL; @@ -1704,8 +1700,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3178 PLOGI confirm: ndlp x%x x%x x%x: " - "new_ndlp x%x x%x x%x\n", + "3178 PLOGI confirm: ndlp x%x x%lx x%x: " + "new_ndlp x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, (new_ndlp ? new_ndlp->nlp_DID : 0), (new_ndlp ? new_ndlp->nlp_flag : 0), @@ -1769,48 +1765,48 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp->nlp_flag = ndlp->nlp_flag; /* if new_ndlp had NLP_UNREG_INP set, keep it */ - if (keep_new_nlp_flag & NLP_UNREG_INP) - new_ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag)) + set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_new_nlp_flag & NLP_RPI_REGISTERED) - new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_new_nlp_flag & NLP_DROPPED) - new_ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_new_nlp_flag)) + set_bit(NLP_DROPPED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag); ndlp->nlp_flag = keep_new_nlp_flag; /* if ndlp had NLP_UNREG_INP set, keep it */ - if (keep_nlp_flag & NLP_UNREG_INP) - ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_nlp_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); /* if ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_nlp_flag & NLP_RPI_REGISTERED) - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_nlp_flag & NLP_DROPPED) - ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_nlp_flag)) + set_bit(NLP_DROPPED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); spin_unlock_irq(&new_ndlp->lock); spin_unlock_irq(&ndlp->lock); @@ -1888,7 +1884,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->active_rrq_pool); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n", new_ndlp->nlp_DID, new_ndlp->nlp_flag, new_ndlp->nlp_fc4_type); @@ -2009,7 +2005,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; - int disc; + bool disc; struct serv_parm *sp = NULL; u32 ulp_status, ulp_word4, did, iotag; bool release_node = false; @@ -2044,10 +2040,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* PLOGI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -2060,9 +2053,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2070,11 +2061,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ - if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); - } + if (disc) + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } /* Warn PLOGI status Don't print the vport to vport rjts */ @@ -2097,7 +2085,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * with the reglogin process. */ spin_lock_irq(&ndlp->lock); - if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) || + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) && ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { spin_unlock_irq(&ndlp->lock); goto out; @@ -2108,8 +2097,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2212,12 +2201,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) * outstanding UNREG_RPI mbox command completes, unless we * are going offline. This logic does not apply for Fabric DIDs */ - if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && + if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || + test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) && ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " - "on NPort x%x rpi x%x flg x%x Data:" + "on NPort x%x rpi x%x flg x%lx Data:" " x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); @@ -2335,10 +2325,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_PRLI_SND; + clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag); /* Driver supports multiple FC4 types. Counters matter. */ + spin_lock_irq(&ndlp->lock); vport->fc_prli_sent--; ndlp->fc4_prli_sent--; spin_unlock_irq(&ndlp->lock); @@ -2379,7 +2369,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Warn PRLI status */ lpfc_printf_vlog(vport, mode, LOG_ELS, "2754 PRLI DID:%06X Status:x%x/x%x, " - "data: x%x x%x x%x\n", + "data: x%x x%x x%lx\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, ndlp->fc4_prli_sent, ndlp->nlp_flag); @@ -2396,10 +2386,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || (ndlp->nlp_state == NLP_STE_NPR_NODE && - ndlp->nlp_flag & NLP_DELAY_TMO)) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "2784 PRLI cmpl: Allow Node recovery " - "DID x%06x nstate x%x nflag x%x\n", + "DID x%06x nstate x%x nflag x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag); goto out; @@ -2420,8 +2410,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !ndlp->fc4_prli_sent) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2496,7 +2486,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nvme_fb_size = 0; send_next_prli: @@ -2627,8 +2618,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ + set_bit(NLP_PRLI_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(&ndlp->lock); @@ -2789,7 +2780,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; - int disc; + bool disc; u32 ulp_status, ulp_word4, tmo, iotag; bool release_node = false; @@ -2818,10 +2809,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); /* ADISC completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0104 ADISC completes to NPort x%x " @@ -2832,9 +2821,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2843,9 +2830,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_set_disctmo(vport); } goto out; @@ -2864,8 +2849,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2938,9 +2923,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitADISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ADISC_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -2961,9 +2944,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); return 1; } @@ -2985,7 +2966,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp->vport; IOCB_t *irsp; - unsigned long flags; uint32_t skip_recovery = 0; int wake_up_waiter = 0; u32 ulp_status; @@ -3007,8 +2987,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, iotag = irsp->ulpIoTag; } + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { wake_up_waiter = 1; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; @@ -3023,7 +3003,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " - "IoTag x%x refcnt %d nflags x%x xflags x%x " + "IoTag x%x refcnt %d nflags x%lx xflags x%x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, iotag, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -3061,12 +3041,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* The driver sets this flag for an NPIV instance that doesn't want to * log into the remote port. */ - if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { - spin_lock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); goto out_rsrc_free; @@ -3089,9 +3065,7 @@ out: if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irqsave(&ndlp->lock, flags); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3187 LOGO completes to NPort x%x: Start " @@ -3113,9 +3087,7 @@ out: * register with the transport. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } @@ -3156,12 +3128,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t cmdsize; int rc; - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_LOGO_SND) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag)) return 0; - } - spin_unlock_irq(&ndlp->lock); cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, @@ -3180,10 +3148,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitLOGO++; elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -3208,9 +3174,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -3286,13 +3250,13 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, static int lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) { - int rc = 0; + int rc; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ns_ndlp; LPFC_MBOXQ_t *mbox; - if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) - return rc; + if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag)) + return 0; ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ns_ndlp) @@ -3309,7 +3273,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0936 %s: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return -ENOMEM; @@ -3321,7 +3285,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) goto out; } - fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); if (!mbox->ctx_ndlp) { @@ -3345,7 +3309,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0938 %s: failed to format reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return rc; @@ -4384,11 +4348,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct lpfc_work_evt *evtp; - if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) return; - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&nlp->lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { @@ -4397,10 +4358,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } - if (nlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&nlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) { if (vport->num_disc_nodes) { if (vport->port_state < LPFC_VPORT_READY) { /* Check if there are more ADISCs to be sent */ @@ -4480,14 +4438,11 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) spin_lock_irq(&ndlp->lock); cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; + spin_unlock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - spin_unlock_irq(&ndlp->lock); + if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return; - } - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the @@ -5010,9 +4965,7 @@ out_retry: /* delay is specified in milliseconds */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(delay)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; if ((cmd == ELS_CMD_PRLI) || @@ -5072,7 +5025,7 @@ out_retry: lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x " - "IoTag x%x nflags x%x\n", + "IoTag x%x nflags x%lx\n", cmd, did, cmdiocb->retry, ulp_status, ulp_word4, cmdiocb->iotag, (ndlp ? ndlp->nlp_flag : 0)); @@ -5239,7 +5192,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ACC to LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " - "last els x%x Data: x%x x%x x%x\n", + "last els x%x Data: x%lx x%x x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -5254,16 +5207,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); /* If came from PRLO, then PRLO_ACC is done. * Start rediscovery now. */ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -5300,7 +5251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi x%x DID:%x flg:%x %d x%px " + "0006 rpi x%x DID:%x flg:%lx %d x%px " "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp, mbx_cmd, @@ -5311,11 +5262,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * first on an UNREG_LOGIN and then release the final * references. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (mbx_cmd == MBX_UNREG_LOGIN) - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); lpfc_drop_node(ndlp->vport, ndlp); } @@ -5381,23 +5330,23 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS response tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", + "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n", iotag, ulp_status, ulp_word4, tmo, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); if (mbox) { - if (ulp_status == 0 - && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + if (ulp_status == 0 && + test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { if (!lpfc_unreg_rpi(vport, ndlp) && !test_bit(FC_PT2PT, &vport->fc_flag)) { - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0314 PLOGI recov " "DID x%x " - "Data: x%x x%x x%x\n", + "Data: x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_rpi, @@ -5414,18 +5363,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out_free_mbox; mbox->vport = vport; - if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - } - else { + } else { mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; @@ -5434,12 +5382,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * set for this failed mailbox command. */ lpfc_nlp_put(ndlp); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* ELS rsp: Cannot issue reg_login for <NPortid> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } @@ -5448,32 +5396,20 @@ out_free_mbox: } out: if (ndlp && shost) { - spin_lock_irq(&ndlp->lock); if (mbox) - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; - ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); } /* An SLI4 NPIV instance wants to drop the node at this point under - * these conditions and release the RPI. + * these conditions because it doesn't need the login. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - lpfc_drop_node(vport, ndlp); - } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && - ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* Drop ndlp if there is no planned or outstanding * issued PRLI. * @@ -5540,9 +5476,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } @@ -5570,7 +5504,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, pcmd += sizeof(uint32_t); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC: did:x%x flg:x%x", + "Issue ACC: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_FLOGI: @@ -5649,7 +5583,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: @@ -5687,7 +5621,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLO: did:x%x flg:x%x", + "Issue ACC PRLO: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_RDF: @@ -5732,12 +5666,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, default: return 1; } - if (ndlp->nlp_flag & NLP_LOGO_ACC) { - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) { + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) && + !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; } else { elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5760,7 +5692,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -5835,13 +5767,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, /* Xmit ELS RJT <err> response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0129 Xmit ELS RJT x%x response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", rejectError, elsiocb->iotag, get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue LS_RJT: did:x%x flg:x%x err:x%x", + "Issue LS_RJT: did:x%x flg:x%lx err:x%x", ndlp->nlp_DID, ndlp->nlp_flag, rejectError); phba->fc_stat.elsXmitLSRJT++; @@ -5852,18 +5784,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 1; } - /* The NPIV instance is rejecting this unsolicited ELS. Make sure the - * node's assigned RPI gets released provided this node is not already - * registered with the transport. - */ - if (phba->sli_rev == LPFC_SLI_REV4 && - vport->port_type == LPFC_NPIV_PORT && - !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -5944,7 +5864,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_format_edc_lft_desc(phba, tlv); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + "Issue EDC ACC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5966,7 +5886,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6035,7 +5955,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit ADISC ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0130 Xmit ADISC ACC response iotag x%x xri: " - "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", + "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6051,7 +5971,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ap->DID = be32_to_cpu(vport->fc_myDID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", + "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6157,7 +6077,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6228,7 +6148,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6015 NVME issue PRLI ACC word1 x%08x " - "word4 x%08x word5 x%08x flag x%x, " + "word4 x%08x word5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", npr_nvme->word1, npr_nvme->word4, npr_nvme->word5, ndlp->nlp_flag, @@ -6243,7 +6163,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLI: did:x%x flg:x%x", + "Issue ACC PRLI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6357,7 +6277,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC RNID: did:x%x flg:x%x refcnt %d", + "Issue ACC RNID: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6414,7 +6334,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, get_job_ulpcontext(phba, iocb)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", + "Clear RRQ: did:x%x flg:x%lx exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) xri = bf_get(rrq_oxid, rrq); @@ -6491,7 +6411,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", + "Issue ACC ECHO: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6541,14 +6461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE || - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); - if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* This node was marked for ADISC but was not picked * for discovery. This is possible if the node was * missing in gidft response. @@ -6606,9 +6524,9 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -7104,7 +7022,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2171 Xmit RDP response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8078,7 +7996,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ if (vport->port_state <= LPFC_NS_QRY) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); @@ -8108,7 +8026,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + "RCV RSCN vport: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); @@ -8145,7 +8063,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + "RCV RSCN defer: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); @@ -8201,7 +8119,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN: did:x%x/ste:x%x flg:x%x", + "RCV RSCN: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_MODE, &vport->fc_flag); @@ -8707,7 +8625,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8869,7 +8787,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, " "Data: x%x x%x x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9066,7 +8984,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0120 Xmit ELS RPL ACC response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9642,14 +9560,24 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); /* First we need to issue aborts to outstanding cmds on txcmpl */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) - continue; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2243 iotag = 0x%x cmd_flag = 0x%x " + "ulp_command = 0x%x this_vport %x " + "sli_flag = 0x%x\n", + piocb->iotag, piocb->cmd_flag, + get_job_cmnd(phba, piocb), + (piocb->vport == vport), + phba->sli.sli_flag); if (piocb->vport != vport) continue; - if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) - continue; + if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) { + if (piocb->cmd_flag & LPFC_IO_LIBDFC) + continue; + if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) + continue; + } /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, * or GEN_REQUESTs waiting for a CQE response. @@ -10411,14 +10339,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { if (newnode) lpfc_nlp_put(ndlp); goto dropit; } - spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) @@ -10447,7 +10372,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + "RCV PLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPLOGI++; @@ -10486,9 +10411,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); @@ -10496,7 +10419,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + "RCV FLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; @@ -10523,7 +10446,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LOGO: did:x%x/ste:x%x flg:x%x", + "RCV LOGO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLOGO++; @@ -10540,7 +10463,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLO: did:x%x/ste:x%x flg:x%x", + "RCV PRLO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLO++; @@ -10569,7 +10492,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ADISC: did:x%x/ste:x%x flg:x%x", + "RCV ADISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_send_els_event(vport, ndlp, payload); @@ -10584,7 +10507,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PDISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PDISC: did:x%x/ste:x%x flg:x%x", + "RCV PDISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPDISC++; @@ -10598,7 +10521,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARPR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARPR: did:x%x/ste:x%x flg:x%x", + "RCV FARPR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARPR++; @@ -10606,7 +10529,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARP: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARP: did:x%x/ste:x%x flg:x%x", + "RCV FARP: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARP++; @@ -10614,7 +10537,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FAN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FAN: did:x%x/ste:x%x flg:x%x", + "RCV FAN: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFAN++; @@ -10623,7 +10546,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLI: did:x%x/ste:x%x flg:x%x", + "RCV PRLI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; @@ -10637,7 +10560,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LIRR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LIRR: did:x%x/ste:x%x flg:x%x", + "RCV LIRR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLIRR++; @@ -10648,7 +10571,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RLS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RLS: did:x%x/ste:x%x flg:x%x", + "RCV RLS: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRLS++; @@ -10659,7 +10582,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RPL: did:x%x/ste:x%x flg:x%x", + "RCV RPL: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRPL++; @@ -10670,7 +10593,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RNID: did:x%x/ste:x%x flg:x%x", + "RCV RNID: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRNID++; @@ -10681,7 +10604,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RTV: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RTV: did:x%x/ste:x%x flg:x%x", + "RCV RTV: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRTV++; lpfc_els_rcv_rtv(vport, elsiocb, ndlp); @@ -10691,7 +10614,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RRQ: did:x%x/ste:x%x flg:x%x", + "RCV RRQ: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRRQ++; @@ -10702,7 +10625,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ECHO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ECHO: did:x%x/ste:x%x flg:x%x", + "RCV ECHO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvECHO++; @@ -10718,7 +10641,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FPIN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FPIN: did:x%x/ste:x%x flg:x%x", + "RCV FPIN: did:x%x/ste:x%x " + "flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, @@ -11226,9 +11150,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; return; @@ -11359,11 +11281,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -11566,7 +11486,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPIV LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2928 NPIV LOGO completes to NPort x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%lx x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -11582,8 +11502,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Wake up lpfc_vport_delete if waiting...*/ if (ndlp->logo_waitq) wake_up(ndlp->logo_waitq); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); } @@ -11633,13 +11554,11 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue LOGO npiv did:x%x flg:x%x", + "Issue LOGO npiv did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -11655,9 +11574,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -12138,7 +12055,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " - "flags 0x%x\n", + "flag 0x%lx\n", shost->host_no, ndlp->nlp_DID, vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); @@ -12148,8 +12065,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, */ spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 9241075f72fa..4036a9838bb5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -100,6 +100,12 @@ lpfc_rport_invalid(struct fc_rport *rport) return -EINVAL; } + if (rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) { + pr_info("**** %s: devloss_callbk_done rport x%px SID x%x\n", + __func__, rport, rport->scsi_target_id); + return -EINVAL; + } + rdata = rport->dd_data; if (!rdata) { pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", @@ -137,7 +143,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) ndlp = rdata->pnode; vport = ndlp->vport; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport terminate: sid:x%x did:x%x flg:x%x", + "rport terminate: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) @@ -155,6 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; + bool nvme_reg = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -164,11 +171,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport devlosscb: sid:x%x did:x%x flg:x%x", + "rport devlosscb: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3181 dev_loss_callbk x%06x, rport x%px flg x%x " + "3181 dev_loss_callbk x%06x, rport x%px flg x%lx " "load_flag x%lx refcnt %u state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref), @@ -177,38 +184,44 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* Don't schedule a worker thread event if the vport is going down. */ if (test_bit(FC_UNLOADING, &vport->load_flag) || !test_bit(HBA_SETUP, &phba->hba_flag)) { + spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) + nvme_reg = true; + /* The scsi_transport is done with the rport so lpfc cannot - * call to unregister. Remove the scsi transport reference - * and clean up the SCSI transport node details. + * call to unregister. */ - if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { + if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; - /* NVME transport-registered rports need the - * NLP_XPT_REGD flag to complete an unregister. + /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, + * unregister calls were made to the scsi and nvme + * transports and refcnt was already decremented. Clear + * the NLP_XPT_REGD flag only if the NVME Rport is + * confirmed unregistered. */ - if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) { ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_nlp_put(ndlp); /* may free ndlp */ + } else { + spin_unlock_irqrestore(&ndlp->lock, iflags); + } + } else { spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); - spin_lock_irqsave(&ndlp->lock, iflags); } /* Only 1 thread can drop the initial node reference. If * another thread has set NLP_DROPPED, this thread is done. */ - if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && - !(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); + if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag)) return; - } - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_DROPPED, &ndlp->nlp_flag); + lpfc_nlp_put(ndlp); return; } @@ -235,14 +248,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); + spin_lock_irqsave(&ndlp->lock, iflags); /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag. */ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* * The backend does not expect any more calls associated with this @@ -271,15 +284,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } else { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " - " rport x%px flg x%x load_flag x%lx refcnt " + " rport x%px flg x%lx load_flag x%lx refcnt " "%d\n", __func__, ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { - spin_lock_irqsave(&ndlp->lock, iflags); /* Node is in dev loss. No further transaction. */ - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -412,7 +423,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " - "refcnt %d ndlp %p flag x%x " + "refcnt %d ndlp %p flag x%lx " "port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -455,7 +466,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", + "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n", __func__, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); @@ -469,9 +480,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); return fcf_inuse; } @@ -499,7 +508,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) } break; case Fabric_Cntl_DID: - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) recovering = true; break; case FDMI_DID: @@ -527,15 +536,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -552,7 +559,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -572,7 +579,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x refcnt %d\n", + "NPort x%06x Data: x%lx x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, @@ -582,15 +589,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x\n", + "NPort x%06x Data: x%lx x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -1355,7 +1360,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ @@ -3864,14 +3869,13 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, - "0002 rpi:%x DID:%x flg:%x %d x%px\n", + "0002 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || + if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled @@ -3881,16 +3885,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * there is another reg login in * process. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); /* * We cannot leave the RPI registered because * if we go thru discovery again for this ndlp * a subsequent REG_RPI will fail. */ - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } @@ -4203,7 +4205,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4334,9 +4336,7 @@ out: * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } @@ -4357,11 +4357,11 @@ out: if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0003 rpi:%x DID:%x flg:%x %d x%px\n", + "0003 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -4453,8 +4453,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) __func__, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_state); - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4488,7 +4488,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport add: did:x%x flg:x%x type x%x", + "rport add: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ @@ -4556,7 +4556,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport delete: did:x%x flg:x%x type x%x", + "rport delete: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -4672,7 +4672,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "0999 %s Not regd: ndlp x%px rport x%px DID " - "x%x FLG x%x XPT x%x\n", + "x%x FLG x%lx XPT x%x\n", __func__, ndlp, ndlp->rport, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; @@ -4688,7 +4688,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } else if (!ndlp->rport) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, - "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + "1999 %s NDLP in devloss x%px DID x%x FLG x%lx" " XPT x%x refcnt %u\n", __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, @@ -4733,7 +4733,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type |= NLP_FC_NODE; fallthrough; case NLP_STE_MAPPED_NODE: - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); lpfc_nlp_reg_node(vport, ndlp); break; @@ -4744,7 +4744,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * backend, attempt it now */ case NLP_STE_NPR_NODE: - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); fallthrough; default: lpfc_nlp_unreg_node(vport, ndlp); @@ -4765,13 +4765,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (new_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); if (new_state == NLP_STE_NPR_NODE) - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || @@ -4779,7 +4779,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* For nodes marked for ADISC, Handle unreg in ADISC cmpl * if linkup. In linkdown do unreg_node */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) || !lpfc_is_link_up(vport->phba)) lpfc_nlp_unreg_node(vport, ndlp); } @@ -4799,9 +4799,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } @@ -4833,7 +4831,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { int old_state = ndlp->nlp_state; - int node_dropped = ndlp->nlp_flag & NLP_DROPPED; + bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag); char name1[16], name2[16]; unsigned long iflags; @@ -4849,7 +4847,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (node_dropped && old_state == NLP_STE_UNUSED_NODE && state != NLP_STE_UNUSED_NODE) { - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_get(ndlp); } @@ -4857,7 +4855,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; + clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); ndlp->nlp_type &= ~NLP_FC_NODE; } @@ -4954,14 +4952,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference from lpfc_nlp_init. If set, don't drop it again and * introduce an imbalance. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); - return; - } - spin_unlock_irq(&ndlp->lock); } /* @@ -5076,9 +5068,9 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (ndlp->nlp_flag & NLP_DELAY_TMO)) { + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return 0; - } + if (ulp_context == ndlp->nlp_rpi) return 1; } @@ -5148,7 +5140,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { if (phba->sli_rev != LPFC_SLI_REV4) lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); else @@ -5182,29 +5174,19 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_issue_els_logo(vport, ndlp, 0); /* Check to see if there are any deferred events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - /* NLP_RELEASE_RPI is only set for SLI4 ports. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irq(&ndlp->lock); - } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The node has an outstanding reference for the unreg. Now @@ -5224,8 +5206,6 @@ static void lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { - unsigned long iflags; - /* Driver always gets a reference on the mailbox job * in support of async jobs. */ @@ -5233,9 +5213,8 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, if (!mbox->ctx_ndlp) return; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) { mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else if (phba->sli_rev == LPFC_SLI_REV4 && !test_bit(FC_UNLOADING, &vport->load_flag) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -5243,13 +5222,6 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, (kref_read(&ndlp->kref) > 0)) { mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; } else { - if (test_bit(FC_UNLOADING, &vport->load_flag)) { - if (phba->sli_rev == LPFC_SLI_REV4) { - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - } mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } @@ -5271,13 +5243,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc, acc_plogi = 1; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || + test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) { + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " - "unregistered nlp_flag x%x " + "unregistered nlp_flag x%lx " "did x%x\n", ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID); @@ -5285,11 +5257,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -5312,27 +5284,24 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 1; } + /* Accept PLOGIs after unreg_rpi_cmpl. */ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) - /* - * accept PLOGIs after unreg_rpi_cmpl - */ acc_plogi = 0; - if (((ndlp->nlp_DID & Fabric_DID_MASK) != - Fabric_DID_MASK) && - (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + + if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " - "NPort x%x deferred flg x%x " + "NPort x%x deferred flg x%lx " "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; lpfc_nlp_put(ndlp); @@ -5342,7 +5311,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LOG_NODE | LOG_DISCOVERY, "1444 Failed to allocate mempool " "unreg_rpi UNREG x%x, " - "DID x%x, flag x%x, " + "DID x%x, flag x%lx, " "ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); @@ -5352,7 +5321,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * not unloading. */ if (!test_bit(FC_UNLOADING, &vport->load_flag)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, @@ -5365,13 +5334,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) out: if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (acc_plogi) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 0; } @@ -5399,7 +5368,7 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { /* The mempool_alloc might sleep */ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); @@ -5487,7 +5456,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_dequeue_node(vport, ndlp); @@ -5532,9 +5501,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_els_abort(phba, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); @@ -5543,10 +5510,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); - - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - return 0; } @@ -5620,7 +5583,7 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "0929 FIND node DID " - "Data: x%px x%x x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); @@ -5667,7 +5630,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "2025 FIND node DID MAPPED " - "Data: x%px x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); @@ -5701,13 +5664,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6453 Setup New Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp; } @@ -5726,7 +5687,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6455 Setup RSCN Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5744,13 +5705,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) NLP_EVT_DEVICE_RECOVERY); } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6456 Skip Setup RSCN Node x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); ndlp = NULL; @@ -5758,7 +5717,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6457 Setup Active Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5769,7 +5728,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || (!vport->phba->nvmet_support && - ndlp->nlp_flag & NLP_RCV_PLOGI)) + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))) return NULL; if (vport->phba->nvmet_support) @@ -5779,10 +5738,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * allows for rediscovery */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } return ndlp; } @@ -6153,7 +6109,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ @@ -6366,11 +6322,11 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0004 rpi:%x DID:%x flg:%x %d x%px\n", + "0004 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -6420,7 +6376,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "3185 FIND node filter %ps DID " - "ndlp x%px did x%x flg x%x st x%x " + "ndlp x%px did x%x flg x%lx st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6555,9 +6511,10 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0007 Init New ndlp x%px, rpi:x%x DID:%x " - "flg:x%x refcnt:%d\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:x%x " + "flg:x%lx refcnt:%d\n", ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6589,7 +6546,7 @@ lpfc_nlp_release(struct kref *kref) struct lpfc_vport *vport = ndlp->vport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node release: did:x%x flg:x%x type:x%x", + "node release: did:x%x flg:x%lx type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -6601,19 +6558,12 @@ lpfc_nlp_release(struct kref *kref) lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); - /* Not all ELS transactions have registered the RPI with the port. - * In these cases the rpi usage is temporary and the node is - * released when the WQE is completed. Catch this case to free the - * RPI to the pool. Because this node is in the release path, a lock - * is unnecessary. All references are gone and the node has been - * dequeued. + /* All nodes are initialized with an RPI that needs to be released + * now. All references are gone and the node has been dequeued. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && - !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - } + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } /* The node is not freed back to memory, it is released to a pool so @@ -6642,7 +6592,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp) if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node get: did:x%x flg:x%x refcnt:x%x", + "node get: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6674,7 +6624,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) { if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", + "node put: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); } else { @@ -6727,11 +6677,12 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); goto out; - } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + } else if (test_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag)) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "2624 RPI %x DID %x flag %x " + "2624 RPI %x DID %x flag %lx " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 0dd451009b07..7f57397d91a9 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1943,6 +1943,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, lpfc_offline_prep(phba, mbx_action); lpfc_sli_flush_io_rings(phba); + lpfc_nvmels_flush_cmd(phba); lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); @@ -3092,7 +3093,8 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " - "refcnt:%d xflags x%x nflag x%x\n", + "refcnt:%d xflags x%x " + "nflag x%lx\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, @@ -3379,7 +3381,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) } /** - * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever @@ -3387,7 +3389,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) * is to fixup the temporary rpi assignments. **/ void -lpfc_sli4_node_prep(struct lpfc_hba *phba) +lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; @@ -3397,10 +3399,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) return; vports = lpfc_create_vport_work_array(phba); - if (vports == NULL) + if (!vports) return; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i]; i++) { if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) continue; @@ -3409,14 +3411,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { - /* TODO print log? */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0099 RPI alloc error for " + "ndlp x%px DID:x%06x " + "flg:x%lx\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " - "DID:x%06x flg:x%x\n", + "DID:x%06x flg:x%lx\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } @@ -3820,35 +3828,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) &vports[i]->fc_nodes, nlp_listp) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (offline || hba_pci_err) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_UNREG_INP | - NLP_RPI_REGISTERED); - spin_unlock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli_rpi_release(vports[i], - ndlp); - } else { - lpfc_unreg_rpi(vports[i], ndlp); - } - /* - * Whenever an SLI4 port goes offline, free the - * RPI. Get a new RPI when the adapter port - * comes back online. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(vports[i], KERN_INFO, - LOG_NODE | LOG_DISCOVERY, - "0011 Free RPI x%x on " - "ndlp: x%px did x%x\n", - ndlp->nlp_rpi, ndlp, - ndlp->nlp_DID); - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag); } if (ndlp->nlp_type & NLP_FABRIC) { @@ -6925,9 +6910,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -13518,6 +13501,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Disable FW logging to host memory */ lpfc_ras_stop_fwlog(phba); + lpfc_sli4_queue_unset(phba); + /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4574716c8764..4d88cfe71cae 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -65,7 +65,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { /* First, we MUST have a RPI registered */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) return 0; /* Compare the ADISC rsp WWNN / WWPN matches our internal node @@ -239,7 +239,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/ @@ -340,7 +340,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) /* Now process the REG_RPI cmpl */ lpfc_mbx_cmpl_reg_login(phba, login_mbox); - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); kfree(save_iocb); } @@ -404,7 +404,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "0114 PLOGI chkparm OK Data: x%x x%x x%lx " "x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi, vport->port_state, @@ -429,7 +429,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) break; fallthrough; case NLP_STE_REG_LOGIN_ISSUE: @@ -449,7 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); @@ -480,7 +480,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); login_mbox = NULL; link_mbox = NULL; @@ -552,13 +552,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -627,10 +627,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * this ELS request. The only way to do this is * to register, then unregister the RPI. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | - NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); } stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; @@ -665,9 +664,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Start the ball rolling by issuing REG_LOGIN here */ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); @@ -797,7 +795,7 @@ out: */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -814,9 +812,7 @@ out: /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -835,9 +831,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else @@ -890,9 +884,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -915,14 +907,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " - "nflag x%x lastels x%x ref cnt %u", + "nflag x%lx lastels x%x ref cnt %u", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -935,9 +925,7 @@ out: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an @@ -978,7 +966,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport, out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " - "state x%x flags x%x port_type: x%x " + "state x%x flags x%lx port_type: x%x " "npr->initfcn: x%x npr->tgtfcn: x%x\n", cmd, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag, vport->port_type, @@ -1020,7 +1008,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); } if (npr->Retry && ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) @@ -1057,7 +1045,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport rolechg: role:x%x did:x%x flg:x%x", + "rport rolechg: role:x%x did:x%x flg:x%lx", roles, ndlp->nlp_DID, ndlp->nlp_flag); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) @@ -1068,10 +1056,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 0; } @@ -1081,16 +1067,12 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (test_bit(FC_RSCN_MODE, &vport->fc_flag) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET)))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 1; } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -1115,10 +1097,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -1143,11 +1125,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1437 release_rpi UNREG x%x " - "on NPort x%x flg x%x\n", + "on NPort x%x flg x%lx\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -1175,7 +1157,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; @@ -1190,13 +1172,12 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * working on the same NPortID, do nothing for this thread * to stop it. */ - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); - } return ndlp->nlp_state; } @@ -1230,9 +1211,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; @@ -1290,11 +1269,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (vport->num_disc_nodes)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + vport->num_disc_nodes) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { @@ -1356,9 +1333,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1389,7 +1364,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + if (test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } @@ -1418,7 +1393,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0121 PLOGI chkparm OK Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) @@ -1446,14 +1421,14 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } /* @@ -1476,7 +1451,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1500,7 +1475,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1520,7 +1495,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } @@ -1535,8 +1510,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* decrement node reference count to the failed mbox * command */ @@ -1544,7 +1518,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { @@ -1552,7 +1526,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } @@ -1605,18 +1579,15 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1636,9 +1607,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp->nlp_state; } @@ -1656,10 +1626,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { if (vport->num_disc_nodes) lpfc_more_adisc(vport); } @@ -1748,9 +1715,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; @@ -1789,18 +1754,15 @@ static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding ADISC */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding ADISC */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1820,9 +1782,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -1856,7 +1817,7 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, * transition to UNMAPPED provided the RPI has completed * registration. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } else { @@ -1895,7 +1856,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1906,7 +1867,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; @@ -1976,9 +1937,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); @@ -1989,7 +1948,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* Only if we are not a fabric nport do we issue PRLI */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -2061,15 +2020,12 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -2084,17 +2040,16 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); /* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it. */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || !vport->phba->nvmet_support) - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2228,7 +2183,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, + &ndlp->nlp_flag); } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; @@ -2272,7 +2228,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Both sides support FB. The target's first * burst size is a 512 byte encoded value. */ - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, nvpr); @@ -2287,7 +2243,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6029 NVME PRLI Cmpl w1 x%08x " - "w4 x%08x w5 x%08x flag x%x, " + "w4 x%08x w5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", be32_to_cpu(nvpr->word1), be32_to_cpu(nvpr->word4), @@ -2299,9 +2255,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; @@ -2353,18 +2307,15 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } @@ -2401,9 +2352,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2442,9 +2392,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } @@ -2483,9 +2431,8 @@ lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2591,8 +2538,9 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport, { ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); @@ -2653,9 +2601,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Send PRLO_ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR. @@ -2665,7 +2611,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, - "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n", + "3422 DID x%06x nflag x%lx lastels x%x ref cnt %u\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -2685,8 +2631,9 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -2699,16 +2646,16 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); - } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + } else if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* send PLOGI immediately, move to PLOGI issue state */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2729,14 +2676,14 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2767,15 +2714,15 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2790,24 +2737,18 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); } return ndlp->nlp_state; } @@ -2844,7 +2785,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2877,7 +2818,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2896,12 +2837,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - if (ndlp->nlp_flag & NLP_LOGO_ACC) { + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); - } } else { - if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + if (test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2913,10 +2853,8 @@ static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); @@ -2932,8 +2870,9 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -3146,7 +3085,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " - "state %d rpi x%x Data: x%x x%x\n", + "state %d rpi x%x Data: x%lx x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, ndlp->nlp_flag, data1); @@ -3163,12 +3102,12 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " - "rpi x%x Data: x%x x%x\n", + "rpi x%x Data: x%lx x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, - "DSM out: ste:%d did:x%x flg:x%x", + "DSM out: ste:%d did:x%x flg:x%lx", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index fec23c723730..49dd78ed8a9a 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1232,7 +1232,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ if ((phba->cfg_nvme_enable_fb) && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size) wqe->fcp_iwrite.initial_xfer_len = @@ -2231,6 +2231,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hdw_queue *qp; int abts_scsi, abts_nvme; + u16 nvmels_cnt; /* Host transport has to clean up and confirm requiring an indefinite * wait. Print a message if a 10 second wait expires and renew the @@ -2243,6 +2244,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, pending = 0; abts_scsi = 0; abts_nvme = 0; + nvmels_cnt = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; if (!vport->localport || !qp || !qp->io_wq) @@ -2255,6 +2257,11 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, abts_scsi += qp->abts_scsi_io_bufs; abts_nvme += qp->abts_nvme_io_bufs; } + if (phba->sli4_hba.nvmels_wq) { + pring = phba->sli4_hba.nvmels_wq->pring; + if (pring) + nvmels_cnt = pring->txcmplq_cnt; + } if (!vport->localport || test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || phba->link_state == LPFC_HBA_ERROR || @@ -2263,10 +2270,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6176 Lport x%px Localport x%px wait " - "timed out. Pending %d [%d:%d]. " + "timed out. Pending %d [%d:%d:%d]. " "Renewing.\n", lport, vport->localport, pending, - abts_scsi, abts_nvme); + abts_scsi, abts_nvme, nvmels_cnt); continue; } break; @@ -2644,14 +2651,11 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference. Check if another thread has set * NLP_DROPPED. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, + &ndlp->nlp_flag)) { lpfc_nlp_put(ndlp); return; } - spin_unlock_irq(&ndlp->lock); } } } @@ -2841,3 +2845,43 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); #endif } + +/** + * lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port + * @phba: Pointer to HBA context object. + * + **/ +void +lpfc_nvmels_flush_cmd(struct lpfc_hba *phba) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + LIST_HEAD(cancel_list); + struct lpfc_sli_ring *pring = NULL; + struct lpfc_iocbq *piocb, *tmp_iocb; + unsigned long iflags; + + if (phba->sli4_hba.nvmels_wq) + pring = phba->sli4_hba.nvmels_wq->pring; + + if (unlikely(!pring)) + return; + + spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock(&pring->ring_lock); + list_splice_init(&pring->txq, &cancel_list); + pring->txq_cnt = 0; + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->cmd_flag & LPFC_IO_NVME_LS) { + list_move_tail(&piocb->list, &cancel_list); + pring->txcmplq_cnt--; + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + } + } + spin_unlock(&pring->ring_lock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (!list_empty(&cancel_list)) + lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); +#endif +} diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 55c3e2c2bf8f..e6c9112a8862 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2854,7 +2854,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag)) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 11c974bffa72..905026a4782c 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4629,7 +4629,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); @@ -5829,7 +5829,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " - "rpi x%x nlp_flag x%x Data: x%x x%x\n", + "rpi x%x nlp_flag x%lx Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); @@ -6094,8 +6094,8 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { + clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag); spin_lock_irqsave(&pnode->lock, flags); - pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } @@ -6124,7 +6124,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag); pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); lpfc_unreg_rpi(vport, pnode); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ec6e55771b4..874644b31a3e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1932,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) union lpfc_wqe128 *wqe; struct lpfc_iocbq *sync_buf; unsigned long iflags; - u32 ret_val; + u32 ret_val, cgn_sig_freq; u32 atot, wtot, max; u8 warn_sync_period = 0; @@ -1987,8 +1987,10 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) } else if (wtot) { if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + cgn_sig_freq = phba->cgn_sig_freq ? phba->cgn_sig_freq : + lpfc_fabric_cgn_frequency; /* We hit an Signal warning condition */ - max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * + max = LPFC_SEC_TO_MSEC / cgn_sig_freq * lpfc_acqe_cgn_frequency; bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); @@ -2842,27 +2844,6 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) return; } -static void -__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - unsigned long iflags; - - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - ndlp->nlp_flag &= ~NLP_UNREG_INP; -} - -void -lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - __lpfc_sli_rpi_release(vport, ndlp); -} - /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. @@ -2932,18 +2913,16 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " - "on NPort x%x Data: x%x x%x x%px x%lx x%x\n", + "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp, vport->load_flag, kref_read(&ndlp->kref)); - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } /* The unreg_login mailbox is complete and had a @@ -2991,6 +2970,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; + bool unreg_inp; ndlp = pmb->ctx_ndlp; if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { @@ -3003,20 +2983,26 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, "0010 UNREG_LOGIN vpi:x%x " - "rpi:%x DID:%x defer x%x flg x%x " + "rpi:%x DID:%x defer x%x flg x%lx " "x%px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + + /* Cleanup the nlp_flag now that the UNREG RPI + * has completed. + */ + unreg_inp = test_and_clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); /* Check to see if there are any deferred * events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != - NLP_EVT_NOTHING_PENDING)) { + if (unreg_inp && + ndlp->nlp_defer_did != + NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, @@ -3025,14 +3011,12 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } + lpfc_nlp_put(ndlp); } } @@ -5291,6 +5275,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) "0296 Restart HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); + lpfc_sli4_queue_unset(phba); + rc = lpfc_sli4_brdreset(phba); if (rc) { phba->link_state = LPFC_HBA_ERROR; @@ -8748,6 +8734,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli_config_mbox_opcode_get( phba, mboxq), rc, dd); + /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. @@ -8760,6 +8747,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + lpfc_sli4_node_rpi_restore(phba); + lpfc_set_host_data(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -8947,7 +8936,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_iocblist; } - lpfc_sli4_node_prep(phba); if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { @@ -14352,9 +14340,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) * an unsolicited PLOGI from the same NPortId from * starting another mailbox transaction. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_UNREG_INP; - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; @@ -17625,6 +17611,9 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) if (!eq) return -ENODEV; + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17651,10 +17640,12 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, eq->phba->mbox_mem_pool); +list_remove: /* Remove eq from any list */ list_del_init(&eq->list); - mempool_free(mbox, eq->phba->mbox_mem_pool); + return status; } @@ -17682,6 +17673,10 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) /* sanity check on queue memory */ if (!cq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17707,9 +17702,11 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, cq->phba->mbox_mem_pool); + +list_remove: /* Remove cq from any list */ list_del_init(&cq->list); - mempool_free(mbox, cq->phba->mbox_mem_pool); return status; } @@ -17737,6 +17734,10 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) /* sanity check on queue memory */ if (!mq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17762,9 +17763,11 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, mq->phba->mbox_mem_pool); + +list_remove: /* Remove mq from any list */ list_del_init(&mq->list); - mempool_free(mbox, mq->phba->mbox_mem_pool); return status; } @@ -17792,6 +17795,10 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) /* sanity check on queue memory */ if (!wq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17816,11 +17823,13 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, wq->phba->mbox_mem_pool); + +list_remove: /* Remove wq from any list */ list_del_init(&wq->list); kfree(wq->pring); wq->pring = NULL; - mempool_free(mbox, wq->phba->mbox_mem_pool); return status; } @@ -17850,6 +17859,10 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, /* sanity check on queue memory */ if (!hrq || !drq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17890,9 +17903,11 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, hrq->phba->mbox_mem_pool); + +list_remove: list_del_init(&hrq->list); list_del_init(&drq->list); - mempool_free(mbox, hrq->phba->mbox_mem_pool); return status; } @@ -19074,9 +19089,9 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * to free ndlp when transmit completes */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && - !(ndlp->nlp_flag & NLP_DROPPED) && + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { - ndlp->nlp_flag |= NLP_DROPPED; + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21094,11 +21109,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; restart_loop = 1; - spin_unlock_irq(&phba->hbalock); - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); - spin_lock_irq(&phba->hbalock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); break; } } @@ -21113,9 +21124,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) ndlp = mb->ctx_ndlp; mb->ctx_ndlp = NULL; if (ndlp) { - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21124,9 +21133,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Release the ndlp with the cleaned-up active mailbox command */ if (act_mbx_ndlp) { - spin_lock(&act_mbx_ndlp->lock); - act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&act_mbx_ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag); lpfc_nlp_put(act_mbx_ndlp); } } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index e70f163fab90..61fe1220f8ad 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.5" +#define LPFC_DRIVER_VERSION "14.4.0.6" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7a4d4d8e2ad5..9e0e35763377 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -496,7 +496,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) !ndlp->logo_waitq) { ndlp->logo_waitq = &waitq; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); ndlp->save_flags |= NLP_WAIT_FOR_LOGO; } spin_unlock_irq(&ndlp->lock); @@ -515,8 +515,8 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } /* Error - clean up node flags. */ + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); @@ -708,7 +708,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, "1829 DA_ID issue status %d. " - "SFlag x%x NState x%x, NFlag x%x " + "SFlag x%x NState x%x, NFlag x%lx " "Rpi x%x\n", rc, ndlp->save_flags, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 187ae0a65d40..ff0253d47a0e 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -432,7 +432,7 @@ static void esp_mac_remove(struct platform_device *dev) static struct platform_driver esp_mac_driver = { .probe = esp_mac_probe, - .remove_new = esp_mac_remove, + .remove = esp_mac_remove, .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index f225bb20aa22..a86bd839d08e 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -547,7 +547,7 @@ static void __exit mac_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver mac_scsi_driver __refdata = { - .remove_new = __exit_p(mac_scsi_remove), + .remove = __exit_p(mac_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c index e08a38e2a442..9b19d5205e50 100644 --- a/drivers/scsi/mvme16x_scsi.c +++ b/drivers/scsi/mvme16x_scsi.c @@ -127,7 +127,7 @@ static struct platform_driver mvme16x_scsi_driver = { .name = "mvme16x-scsi", }, .probe = mvme16x_probe, - .remove_new = mvme16x_device_remove, + .remove = mvme16x_device_remove, }; static int __init mvme16x_scsi_init(void) diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 501b574239e8..7871e29a820a 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -92,8 +92,11 @@ enum port_type { #define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ #define PM8001_RESERVE_SLOT 8 -#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528 -#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG +#define PM8001_SECTOR_SIZE 512 +#define PM8001_PAGE_SIZE_4K 4096 +#define PM8001_MAX_IO_SIZE (4 * 1024 * 1024) +#define PM8001_MAX_DMA_SG (PM8001_MAX_IO_SIZE / PM8001_PAGE_SIZE_4K) +#define PM8001_MAX_SECTORS (PM8001_MAX_IO_SIZE / PM8001_SECTOR_SIZE) enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 33e1eba62ca1..f8c81e53e93f 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -68,6 +68,10 @@ static bool pm8001_read_wwn = true; module_param_named(read_wwn, pm8001_read_wwn, bool, 0444); MODULE_PARM_DESC(zoned, "Get WWN from the controller. Default: true"); +uint pcs_event_log_severity = 0x03; +module_param(pcs_event_log_severity, int, 0644); +MODULE_PARM_DESC(pcs_event_log_severity, "PCS event log severity level"); + static struct scsi_transport_template *pm8001_stt; static int pm8001_init_ccb_tag(struct pm8001_hba_info *); @@ -117,6 +121,7 @@ static const struct scsi_host_template pm8001_sht = { .scan_start = pm8001_scan_start, .can_queue = 1, .sg_tablesize = PM8001_MAX_DMA_SG, + .max_sectors = PM8001_MAX_SECTORS, .shost_groups = pm8001_host_groups, .sdev_groups = pm8001_sdev_groups, .track_queue_depth = 1, @@ -447,9 +452,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, } for (i = 0; i < PM8001_MAX_DEVICES; i++) { pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; - pm8001_ha->devices[i].id = i; - pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; - atomic_set(&pm8001_ha->devices[i].running_req, 0); } pm8001_ha->flags = PM8001F_INIT_TIME; return 0; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index ee2da8e49d4c..d80cffd25a6e 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -572,6 +572,13 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, pm8001_ccb_free(pm8001_ha, ccb); } +static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id) +{ + pm8001_dev->id = id; + pm8001_dev->device_id = PM8001_MAX_DEVICES; + atomic_set(&pm8001_dev->running_req, 0); +} + /** * pm8001_alloc_dev - find a empty pm8001_device * @pm8001_ha: our hba card information @@ -580,9 +587,11 @@ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { - pm8001_ha->devices[dev].id = dev; - return &pm8001_ha->devices[dev]; + struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev]; + + if (pm8001_dev->dev_type == SAS_PHY_UNUSED) { + pm8001_init_dev(pm8001_dev, dev); + return pm8001_dev; } } if (dev == PM8001_MAX_DEVICES) { @@ -613,9 +622,7 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, void pm8001_free_dev(struct pm8001_device *pm8001_dev) { - u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); - pm8001_dev->id = id; pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index ced6721380a8..42c7b3f7afbf 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -96,6 +96,8 @@ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; extern const struct pm8001_dispatch pm8001_80xx_dispatch; +extern uint pcs_event_log_severity; + struct pm8001_hba_info; struct pm8001_ccb_info; struct pm8001_device; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index a9869cd8c4c0..e65951dd2024 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -763,7 +763,8 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].phys_addr_lo; pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = + pcs_event_log_severity; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; /* Enable higher IQs and OQs, 32 to 63, bit 16 */ diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index cf13148ba281..d2f47dc31dbf 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2738,6 +2738,7 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDF_ERR(&qedf->dbg_ctx, "Status block initialization failed (0x%x) for id = %d.\n", ret, sb_id); @@ -4018,11 +4019,6 @@ void qedf_stag_change_work(struct work_struct *work) struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); - if (!qedf) { - QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); - return; - } - if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Already is in recovery, hence not calling software context reset.\n"); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index c5aec26019d6..628d59dda20c 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -369,6 +369,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDI_ERR(&qedi->dbg_ctx, "Status block initialization failed for id = %d.\n", sb_id); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 6177f4798f3a..74866b9f2b14 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1463,7 +1463,7 @@ static struct platform_driver qpti_sbus_driver = { .of_match_table = qpti_match, }, .probe = qpti_sbus_probe, - .remove_new = qpti_sbus_remove, + .remove = qpti_sbus_remove, }; module_platform_driver(qpti_sbus_driver); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index f86be197fedd..84334ab39c81 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -307,10 +307,6 @@ sg_open(struct inode *inode, struct file *filp) if (retval) goto sg_put; - retval = scsi_autopm_get_device(device); - if (retval) - goto sdp_put; - /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ @@ -318,7 +314,7 @@ sg_open(struct inode *inode, struct file *filp) scsi_block_when_processing_errors(device))) { retval = -ENXIO; /* we are in error recovery for this device */ - goto error_out; + goto sdp_put; } mutex_lock(&sdp->open_rel_lock); @@ -371,8 +367,6 @@ out_undo: } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); -error_out: - scsi_autopm_put_device(device); sdp_put: kref_put(&sdp->d_ref, sg_device_destroy); scsi_device_put(device); @@ -392,7 +386,6 @@ sg_release(struct inode *inode, struct file *filp) SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); - scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index b0bef83db7b6..6594661db5f4 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -306,7 +306,7 @@ static void sgiwd93_remove(struct platform_device *pdev) static struct platform_driver sgiwd93_driver = { .probe = sgiwd93_probe, - .remove_new = sgiwd93_remove, + .remove = sgiwd93_remove, .driver = { .name = "sgiwd93", } diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 9df1c90a24c1..d1d2556c8fc4 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -119,7 +119,7 @@ static void snirm710_driver_remove(struct platform_device *dev) static struct platform_driver snirm710_driver = { .probe = snirm710_probe, - .remove_new = snirm710_driver_remove, + .remove = snirm710_driver_remove, .driver = { .name = "snirm_53c710", }, diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index beb88f25dbb9..e8ef27d7ef61 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -991,7 +991,10 @@ static int test_ready(struct scsi_tape *STp, int do_wait) scode = cmdstatp->sense_hdr.sense_key; if (scode == UNIT_ATTENTION) { /* New media? */ - new_session = 1; + if (cmdstatp->sense_hdr.asc == 0x28) { /* New media */ + new_session = 1; + DEBC_printk(STp, "New tape session."); + } if (attentions < MAX_ATTENTIONS) { attentions++; continue; @@ -3506,6 +3509,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; + bool cmd_mtiocget; struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; @@ -3619,6 +3623,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) */ if (mtc.mt_op != MTREW && mtc.mt_op != MTOFFL && + mtc.mt_op != MTLOAD && mtc.mt_op != MTRETEN && mtc.mt_op != MTERASE && mtc.mt_op != MTSEEK && @@ -3732,17 +3737,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) goto out; } + cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET); + if ((i = flush_buffer(STp, 0)) < 0) { - retval = i; - goto out; - } - if (STp->can_partitions && - (i = switch_partition(STp)) < 0) { - retval = i; - goto out; + if (cmd_mtiocget && STp->pos_unknown) { + /* flush fails -> modify status accordingly */ + reset_state(STp); + STp->pos_unknown = 1; + } else { /* return error */ + retval = i; + goto out; + } + } else { /* flush_buffer succeeds */ + if (STp->can_partitions) { + i = switch_partition(STp); + if (i < 0) { + retval = i; + goto out; + } + } } - if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { + if (cmd_mtiocget) { struct mtget mt_status; if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { @@ -3756,7 +3772,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); mt_status.mt_blkno = STps->drv_block; mt_status.mt_fileno = STps->drv_file; - if (STp->block_size != 0) { + if (STp->block_size != 0 && mt_status.mt_blkno >= 0) { if (STps->rw == ST_WRITING) mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index fffc0fa52594..ca9cd691cc32 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -656,8 +656,14 @@ static void __exit sun3_scsi_remove(struct platform_device *pdev) iounmap(ioaddr); } -static struct platform_driver sun3_scsi_driver = { - .remove_new = __exit_p(sun3_scsi_remove), +/* + * sun3_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver sun3_scsi_driver __refdata = { + .remove = __exit_p(sun3_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index e20f314cf3e7..365406885b8e 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -265,7 +265,7 @@ static void esp_sun3x_remove(struct platform_device *dev) static struct platform_driver esp_sun3x_driver = { .probe = esp_sun3x_probe, - .remove_new = esp_sun3x_remove, + .remove = esp_sun3x_remove, .driver = { .name = "sun3x_esp", }, diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 5ce6c9d19d1e..aa430501f0c7 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -603,7 +603,7 @@ static struct platform_driver esp_sbus_driver = { .of_match_table = esp_match, }, .probe = esp_sbus_probe, - .remove_new = esp_sbus_remove, + .remove = esp_sbus_remove, }; module_platform_driver(esp_sbus_driver); diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index 4d8f3b7024ae..f66f869dff2e 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -6,6 +6,7 @@ menuconfig SOUNDWIRE tristate "SoundWire support" depends on ACPI || OF + depends on SND_SOC_SDCA_OPTIONAL help SoundWire is a 2-Pin interface with data and clock line ratified by the MIPI Alliance. SoundWire is used for transporting data diff --git a/drivers/soundwire/amd_init.c b/drivers/soundwire/amd_init.c index db040f435059..d11b60efda33 100644 --- a/drivers/soundwire/amd_init.c +++ b/drivers/soundwire/amd_init.c @@ -121,6 +121,7 @@ static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res) sdw_pdata[index].instance = index; sdw_pdata[index].acp_sdw_lock = res->acp_lock; + sdw_pdata[index].acp_rev = res->acp_rev; pdevinfo[index].name = "amd_sdw_manager"; pdevinfo[index].id = index; pdevinfo[index].parent = res->parent; @@ -177,7 +178,7 @@ EXPORT_SYMBOL_NS(sdw_amd_probe, SOUNDWIRE_AMD_INIT); void sdw_amd_exit(struct sdw_amd_ctx *ctx) { sdw_amd_cleanup(ctx); - kfree(ctx->ids); + kfree(ctx->peripherals); kfree(ctx); } EXPORT_SYMBOL_NS(sdw_amd_exit, SOUNDWIRE_AMD_INIT); @@ -204,10 +205,11 @@ int sdw_amd_get_slave_info(struct sdw_amd_ctx *ctx) num_slaves++; } - ctx->ids = kcalloc(num_slaves, sizeof(*ctx->ids), GFP_KERNEL); - if (!ctx->ids) + ctx->peripherals = kmalloc(struct_size(ctx->peripherals, array, num_slaves), + GFP_KERNEL); + if (!ctx->peripherals) return -ENOMEM; - ctx->num_slaves = num_slaves; + ctx->peripherals->num_peripherals = num_slaves; for (index = 0; index < ctx->count; index++) { if (!(ctx->link_mask & BIT(index))) continue; @@ -215,8 +217,7 @@ int sdw_amd_get_slave_info(struct sdw_amd_ctx *ctx) if (amd_manager) { bus = &amd_manager->bus; list_for_each_entry(slave, &bus->slaves, node) { - ctx->ids[i].id = slave->id; - ctx->ids[i].link_id = bus->link_id; + ctx->peripherals->array[i] = slave; i++; } } diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index 0d01849c3586..5a4bfaef65fb 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -433,12 +433,18 @@ static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_pa u32 frame_fmt_reg, dpn_frame_fmt; dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num); - switch (amd_manager->instance) { - case ACP_SDW0: - frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg; - break; - case ACP_SDW1: - frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg; + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + frame_fmt_reg = acp63_sdw0_dp_reg[p_params->num].frame_fmt_reg; + break; + case ACP_SDW1: + frame_fmt_reg = acp63_sdw1_dp_reg[p_params->num].frame_fmt_reg; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -465,20 +471,28 @@ static int amd_sdw_transport_params(struct sdw_bus *bus, u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg; u32 offset_reg, lane_ctrl_ch_en_reg; - switch (amd_manager->instance) { - case ACP_SDW0: - frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg; - sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg; - hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg; - offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg; - lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg; - break; - case ACP_SDW1: - frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg; - sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg; - hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg; - offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg; - lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg; + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + frame_fmt_reg = acp63_sdw0_dp_reg[params->port_num].frame_fmt_reg; + sample_int_reg = acp63_sdw0_dp_reg[params->port_num].sample_int_reg; + hctrl_dp0_reg = acp63_sdw0_dp_reg[params->port_num].hctrl_dp0_reg; + offset_reg = acp63_sdw0_dp_reg[params->port_num].offset_reg; + lane_ctrl_ch_en_reg = + acp63_sdw0_dp_reg[params->port_num].lane_ctrl_ch_en_reg; + break; + case ACP_SDW1: + frame_fmt_reg = acp63_sdw1_dp_reg[params->port_num].frame_fmt_reg; + sample_int_reg = acp63_sdw1_dp_reg[params->port_num].sample_int_reg; + hctrl_dp0_reg = acp63_sdw1_dp_reg[params->port_num].hctrl_dp0_reg; + offset_reg = acp63_sdw1_dp_reg[params->port_num].offset_reg; + lane_ctrl_ch_en_reg = + acp63_sdw1_dp_reg[params->port_num].lane_ctrl_ch_en_reg; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -520,12 +534,20 @@ static int amd_sdw_port_enable(struct sdw_bus *bus, u32 dpn_ch_enable; u32 lane_ctrl_ch_en_reg; - switch (amd_manager->instance) { - case ACP_SDW0: - lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; - break; - case ACP_SDW1: - lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + lane_ctrl_ch_en_reg = + acp63_sdw0_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; + break; + case ACP_SDW1: + lane_ctrl_ch_en_reg = + acp63_sdw1_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -910,6 +932,7 @@ static int amd_sdw_manager_probe(struct platform_device *pdev) amd_manager->mmio = amd_manager->acp_mmio + (amd_manager->instance * SDW_MANAGER_REG_OFFSET); amd_manager->acp_sdw_lock = pdata->acp_sdw_lock; + amd_manager->acp_rev = pdata->acp_rev; amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS); amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS); amd_manager->dev = dev; @@ -926,15 +949,21 @@ static int amd_sdw_manager_probe(struct platform_device *pdev) * information. */ amd_manager->bus.controller_id = 0; - - switch (amd_manager->instance) { - case ACP_SDW0: - amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS; - amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS; - break; - case ACP_SDW1: - amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS; - amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS; + dev_dbg(dev, "acp_rev:0x%x\n", amd_manager->acp_rev); + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + amd_manager->num_dout_ports = AMD_ACP63_SDW0_MAX_TX_PORTS; + amd_manager->num_din_ports = AMD_ACP63_SDW0_MAX_RX_PORTS; + break; + case ACP_SDW1: + amd_manager->num_dout_ports = AMD_ACP63_SDW1_MAX_TX_PORTS; + amd_manager->num_din_ports = AMD_ACP63_SDW1_MAX_RX_PORTS; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h index 707065468e05..cc2170e4521e 100644 --- a/drivers/soundwire/amd_manager.h +++ b/drivers/soundwire/amd_manager.h @@ -155,12 +155,12 @@ #define AMD_SDW_IRQ_MASK_8TO11 0x000c7777 #define AMD_SDW_IRQ_ERROR_MASK 0xff #define AMD_SDW_MAX_FREQ_NUM 1 -#define AMD_SDW0_MAX_TX_PORTS 3 -#define AMD_SDW0_MAX_RX_PORTS 3 -#define AMD_SDW1_MAX_TX_PORTS 1 -#define AMD_SDW1_MAX_RX_PORTS 1 -#define AMD_SDW0_MAX_DAI 6 -#define AMD_SDW1_MAX_DAI 2 +#define AMD_ACP63_SDW0_MAX_TX_PORTS 3 +#define AMD_ACP63_SDW0_MAX_RX_PORTS 3 +#define AMD_ACP63_SDW1_MAX_TX_PORTS 1 +#define AMD_ACP63_SDW1_MAX_RX_PORTS 1 +#define AMD_ACP63_SDW0_MAX_DAI 6 +#define AMD_ACP63_SDW1_MAX_DAI 2 #define AMD_SDW_SLAVE_0_ATTACHED 5 #define AMD_SDW_SSP_COUNTER_VAL 3 @@ -222,7 +222,7 @@ struct sdw_manager_dp_reg { * in SoundWire DMA driver. */ -static struct sdw_manager_dp_reg sdw0_manager_dp_reg[AMD_SDW0_MAX_DAI] = { +static struct sdw_manager_dp_reg acp63_sdw0_dp_reg[AMD_ACP63_SDW0_MAX_DAI] = { {ACP_SW_AUDIO0_TX_FRAME_FORMAT, ACP_SW_AUDIO0_TX_SAMPLEINTERVAL, ACP_SW_AUDIO0_TX_HCTRL_DP0, ACP_SW_AUDIO0_TX_OFFSET_DP0, ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP0}, {ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL, @@ -237,7 +237,7 @@ static struct sdw_manager_dp_reg sdw0_manager_dp_reg[AMD_SDW0_MAX_DAI] = { ACP_SW_AUDIO2_RX_OFFSET, ACP_SW_AUDIO2_RX_CHANNEL_ENABLE_DP0}, }; -static struct sdw_manager_dp_reg sdw1_manager_dp_reg[AMD_SDW1_MAX_DAI] = { +static struct sdw_manager_dp_reg acp63_sdw1_dp_reg[AMD_ACP63_SDW1_MAX_DAI] = { {ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL, ACP_SW_AUDIO1_TX_OFFSET, ACP_SW_AUDIO1_TX_CHANNEL_ENABLE_DP0}, {ACP_SW_AUDIO1_RX_FRAME_FORMAT, ACP_SW_AUDIO1_RX_SAMPLEINTERVAL, ACP_SW_AUDIO1_RX_HCTRL, diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 263ca32f0c5c..d1dc62c34f1c 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -112,7 +112,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, /* Set higher order bits */ *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); - /* Set enumuration device number and broadcast device number */ + /* Set enumeration device number and broadcast device number */ set_bit(SDW_ENUM_DEV_NUM, bus->assigned); set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 05652e983539..f367670ea991 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -1378,6 +1378,31 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns) } /** + * sdw_cdns_soft_reset() - Cadence soft-reset + * @cdns: Cadence instance + */ +int sdw_cdns_soft_reset(struct sdw_cdns *cdns) +{ + int ret; + + cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_SOFT_RST, + CDNS_MCP_CONTROL_SOFT_RST); + + ret = cdns_config_update(cdns); + if (ret < 0) { + dev_err(cdns->dev, "%s: config update failed\n", __func__); + return ret; + } + + ret = cdns_set_wait(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_SOFT_RST, 0); + if (ret < 0) + dev_err(cdns->dev, "%s: Soft Reset timed out\n", __func__); + + return ret; +} +EXPORT_SYMBOL(sdw_cdns_soft_reset); + +/** * sdw_cdns_init() - Cadence initialization * @cdns: Cadence instance */ @@ -1400,6 +1425,11 @@ int sdw_cdns_init(struct sdw_cdns *cdns) cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT, CDNS_IP_MCP_CONTROL_CMD_ACCEPT); + /* disable wakeup */ + cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, + CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP, + 0); + /* Configure mcp config */ val = cdns_readl(cdns, CDNS_MCP_CONFIG); diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index e1d7969ba48a..c34fb050fe4f 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -168,6 +168,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns); irqreturn_t sdw_cdns_irq(int irq, void *dev_id); irqreturn_t sdw_cdns_thread(int irq, void *dev_id); +int sdw_cdns_soft_reset(struct sdw_cdns *cdns); int sdw_cdns_init(struct sdw_cdns *cdns); int sdw_cdns_pdi_init(struct sdw_cdns *cdns, struct sdw_cdns_stream_config config); diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c index 4f3dd70d6a1a..a005b63582e9 100644 --- a/drivers/soundwire/intel_ace2x.c +++ b/drivers/soundwire/intel_ace2x.c @@ -175,6 +175,9 @@ static int intel_link_power_up(struct sdw_intel *sdw) __func__, ret); goto out; } + + hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true, + AZX_REG_ML_LEPTR_ID_SDW, true); } *shim_mask |= BIT(link_id); @@ -201,6 +204,10 @@ static int intel_link_power_down(struct sdw_intel *sdw) *shim_mask &= ~BIT(link_id); + if (!*shim_mask) + hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true, + AZX_REG_ML_LEPTR_ID_SDW, false); + ret = hdac_bus_eml_sdw_power_down_unlocked(sdw->link_res->hbus, link_id); if (ret < 0) { dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_down failed: %d\n", diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index ae689d5d1ab9..599954d92752 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -41,6 +41,10 @@ static int md_flags; module_param_named(sdw_md_flags, md_flags, int, 0444); MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)"); +static int mclk_divider; +module_param_named(sdw_mclk_divider, mclk_divider, int, 0444); +MODULE_PARM_DESC(sdw_mclk_divider, "SoundWire Intel mclk divider"); + struct wake_capable_part { const u16 mfg_id; const u16 part_id; @@ -142,8 +146,12 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus) "intel-sdw-ip-clock", &prop->mclk_freq); - /* the values reported by BIOS are the 2x clock, not the bus clock */ - prop->mclk_freq /= 2; + if (mclk_divider) + /* use kernel parameter for BIOS or board work-arounds */ + prop->mclk_freq /= mclk_divider; + else + /* the values reported by BIOS are the 2x clock, not the bus clock */ + prop->mclk_freq /= 2; fwnode_property_read_u32(link, "intel-quirk-mask", diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c index d3ff6c65b64c..ad1f8ebdbfc9 100644 --- a/drivers/soundwire/intel_bus_common.c +++ b/drivers/soundwire/intel_bus_common.c @@ -16,6 +16,12 @@ int intel_start_bus(struct sdw_intel *sdw) struct sdw_bus *bus = &cdns->bus; int ret; + ret = sdw_cdns_soft_reset(cdns); + if (ret < 0) { + dev_err(dev, "%s: unable to soft-reset Cadence IP: %d\n", __func__, ret); + return ret; + } + /* * follow recommended programming flows to avoid timeouts when * gsync is enabled diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c index a09134b97cd6..12e7a98f319f 100644 --- a/drivers/soundwire/intel_init.c +++ b/drivers/soundwire/intel_init.c @@ -252,17 +252,16 @@ static struct sdw_intel_ctx num_slaves++; } - ctx->ids = kcalloc(num_slaves, sizeof(*ctx->ids), GFP_KERNEL); - if (!ctx->ids) + ctx->peripherals = kmalloc(struct_size(ctx->peripherals, array, num_slaves), + GFP_KERNEL); + if (!ctx->peripherals) goto err; - - ctx->num_slaves = num_slaves; + ctx->peripherals->num_peripherals = num_slaves; i = 0; list_for_each_entry(link, &ctx->link_list, list) { bus = &link->cdns->bus; list_for_each_entry(slave, &bus->slaves, node) { - ctx->ids[i].id = slave->id; - ctx->ids[i].link_id = bus->link_id; + ctx->peripherals->array[i] = slave; i++; } } @@ -371,7 +370,7 @@ void sdw_intel_exit(struct sdw_intel_ctx *ctx) } sdw_intel_cleanup(ctx); - kfree(ctx->ids); + kfree(ctx->peripherals); kfree(ctx->ldev); kfree(ctx); } diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c index e5d9df26d4dc..9d59f486edbe 100644 --- a/drivers/soundwire/mipi_disco.c +++ b/drivers/soundwire/mipi_disco.c @@ -23,6 +23,26 @@ #include <linux/soundwire/sdw.h> #include "bus.h" +static bool mipi_fwnode_property_read_bool(const struct fwnode_handle *fwnode, + const char *propname) +{ + int ret; + u8 val; + + if (!fwnode_property_present(fwnode, propname)) + return false; + ret = fwnode_property_read_u8_array(fwnode, propname, &val, 1); + if (ret < 0) + return false; + return !!val; +} + +static bool mipi_device_property_read_bool(const struct device *dev, + const char *propname) +{ + return mipi_fwnode_property_read_bool(dev_fwnode(dev), propname); +} + /** * sdw_master_read_prop() - Read Master properties * @bus: SDW bus instance @@ -31,8 +51,11 @@ int sdw_master_read_prop(struct sdw_bus *bus) { struct sdw_master_prop *prop = &bus->prop; struct fwnode_handle *link; + const char *scales_prop; char name[32]; - int nval, i; + int nval; + int ret; + int i; device_property_read_u32(bus->dev, "mipi-sdw-sw-interface-revision", @@ -48,11 +71,11 @@ int sdw_master_read_prop(struct sdw_bus *bus) return -EIO; } - if (fwnode_property_read_bool(link, + if (mipi_fwnode_property_read_bool(link, "mipi-sdw-clock-stop-mode0-supported")) prop->clk_stop_modes |= BIT(SDW_CLK_STOP_MODE0); - if (fwnode_property_read_bool(link, + if (mipi_fwnode_property_read_bool(link, "mipi-sdw-clock-stop-mode1-supported")) prop->clk_stop_modes |= BIT(SDW_CLK_STOP_MODE1); @@ -71,9 +94,11 @@ int sdw_master_read_prop(struct sdw_bus *bus) return -ENOMEM; } - fwnode_property_read_u32_array(link, + ret = fwnode_property_read_u32_array(link, "mipi-sdw-clock-frequencies-supported", prop->clk_freq, prop->num_clk_freq); + if (ret < 0) + return ret; } /* @@ -88,7 +113,12 @@ int sdw_master_read_prop(struct sdw_bus *bus) } } - nval = fwnode_property_count_u32(link, "mipi-sdw-supported-clock-gears"); + scales_prop = "mipi-sdw-supported-clock-scales"; + nval = fwnode_property_count_u32(link, scales_prop); + if (nval == 0) { + scales_prop = "mipi-sdw-supported-clock-gears"; + nval = fwnode_property_count_u32(link, scales_prop); + } if (nval > 0) { prop->num_clk_gears = nval; prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears, @@ -99,10 +129,12 @@ int sdw_master_read_prop(struct sdw_bus *bus) return -ENOMEM; } - fwnode_property_read_u32_array(link, - "mipi-sdw-supported-clock-gears", + ret = fwnode_property_read_u32_array(link, + scales_prop, prop->clk_gears, prop->num_clk_gears); + if (ret < 0) + return ret; } fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate", @@ -114,7 +146,7 @@ int sdw_master_read_prop(struct sdw_bus *bus) fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size", &prop->default_col); - prop->dynamic_frame = fwnode_property_read_bool(link, + prop->dynamic_frame = mipi_fwnode_property_read_bool(link, "mipi-sdw-dynamic-frame-shape"); fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold", @@ -131,6 +163,7 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave, struct sdw_dp0_prop *dp0) { int nval; + int ret; fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength", &dp0->max_word); @@ -148,20 +181,38 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave, if (!dp0->words) return -ENOMEM; - fwnode_property_read_u32_array(port, + ret = fwnode_property_read_u32_array(port, "mipi-sdw-port-wordlength-configs", dp0->words, dp0->num_words); + if (ret < 0) + return ret; } - dp0->BRA_flow_controlled = fwnode_property_read_bool(port, + dp0->BRA_flow_controlled = mipi_fwnode_property_read_bool(port, "mipi-sdw-bra-flow-controlled"); - dp0->simple_ch_prep_sm = fwnode_property_read_bool(port, + dp0->simple_ch_prep_sm = mipi_fwnode_property_read_bool(port, "mipi-sdw-simplified-channel-prepare-sm"); - dp0->imp_def_interrupts = fwnode_property_read_bool(port, + dp0->imp_def_interrupts = mipi_fwnode_property_read_bool(port, "mipi-sdw-imp-def-dp0-interrupts-supported"); + nval = fwnode_property_count_u32(port, "mipi-sdw-lane-list"); + if (nval > 0) { + dp0->num_lanes = nval; + dp0->lane_list = devm_kcalloc(&slave->dev, + dp0->num_lanes, sizeof(*dp0->lane_list), + GFP_KERNEL); + if (!dp0->lane_list) + return -ENOMEM; + + ret = fwnode_property_read_u32_array(port, + "mipi-sdw-lane-list", + dp0->lane_list, dp0->num_lanes); + if (ret < 0) + return ret; + } + return 0; } @@ -171,9 +222,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, { struct fwnode_handle *node; u32 bit, i = 0; - int nval; unsigned long addr; char name[40]; + int nval; + int ret; addr = ports; /* valid ports are 1 to 14 so apply mask */ @@ -208,9 +260,11 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, return -ENOMEM; } - fwnode_property_read_u32_array(node, + ret = fwnode_property_read_u32_array(node, "mipi-sdw-port-wordlength-configs", dpn[i].words, dpn[i].num_words); + if (ret < 0) + return ret; } fwnode_property_read_u32(node, "mipi-sdw-data-port-type", @@ -220,7 +274,7 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, "mipi-sdw-max-grouping-supported", &dpn[i].max_grouping); - dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node, + dpn[i].simple_ch_prep_sm = mipi_fwnode_property_read_bool(node, "mipi-sdw-simplified-channelprepare-sm"); fwnode_property_read_u32(node, @@ -249,9 +303,11 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, return -ENOMEM; } - fwnode_property_read_u32_array(node, + ret = fwnode_property_read_u32_array(node, "mipi-sdw-channel-number-list", dpn[i].channels, dpn[i].num_channels); + if (ret < 0) + return ret; } nval = fwnode_property_count_u32(node, "mipi-sdw-channel-combination-list"); @@ -266,10 +322,12 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, return -ENOMEM; } - fwnode_property_read_u32_array(node, + ret = fwnode_property_read_u32_array(node, "mipi-sdw-channel-combination-list", dpn[i].ch_combinations, dpn[i].num_ch_combinations); + if (ret < 0) + return ret; } fwnode_property_read_u32(node, @@ -278,13 +336,27 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer", &dpn[i].max_async_buffer); - dpn[i].block_pack_mode = fwnode_property_read_bool(node, + dpn[i].block_pack_mode = mipi_fwnode_property_read_bool(node, "mipi-sdw-block-packing-mode"); fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type", &dpn[i].port_encoding); - /* TODO: Read audio mode */ + nval = fwnode_property_count_u32(node, "mipi-sdw-lane-list"); + if (nval > 0) { + dpn[i].num_lanes = nval; + dpn[i].lane_list = devm_kcalloc(&slave->dev, + dpn[i].num_lanes, sizeof(*dpn[i].lane_list), + GFP_KERNEL); + if (!dpn[i].lane_list) + return -ENOMEM; + + ret = fwnode_property_read_u32_array(node, + "mipi-sdw-lane-list", + dpn[i].lane_list, dpn[i].num_lanes); + if (ret < 0) + return ret; + } fwnode_handle_put(node); @@ -304,42 +376,46 @@ int sdw_slave_read_prop(struct sdw_slave *slave) struct device *dev = &slave->dev; struct fwnode_handle *port; int nval; + int ret; device_property_read_u32(dev, "mipi-sdw-sw-interface-revision", &prop->mipi_revision); - prop->wake_capable = device_property_read_bool(dev, + prop->wake_capable = mipi_device_property_read_bool(dev, "mipi-sdw-wake-up-unavailable"); prop->wake_capable = !prop->wake_capable; - prop->test_mode_capable = device_property_read_bool(dev, + prop->test_mode_capable = mipi_device_property_read_bool(dev, "mipi-sdw-test-mode-supported"); prop->clk_stop_mode1 = false; - if (device_property_read_bool(dev, + if (mipi_device_property_read_bool(dev, "mipi-sdw-clock-stop-mode1-supported")) prop->clk_stop_mode1 = true; - prop->simple_clk_stop_capable = device_property_read_bool(dev, + prop->simple_clk_stop_capable = mipi_device_property_read_bool(dev, "mipi-sdw-simplified-clockstopprepare-sm-supported"); device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout", &prop->clk_stop_timeout); - device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout", - &prop->ch_prep_timeout); + ret = device_property_read_u32(dev, "mipi-sdw-peripheral-channelprepare-timeout", + &prop->ch_prep_timeout); + if (ret < 0) + device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout", + &prop->ch_prep_timeout); device_property_read_u32(dev, "mipi-sdw-clockstopprepare-hard-reset-behavior", &prop->reset_behave); - prop->high_PHY_capable = device_property_read_bool(dev, + prop->high_PHY_capable = mipi_device_property_read_bool(dev, "mipi-sdw-highPHY-capable"); - prop->paging_support = device_property_read_bool(dev, + prop->paging_support = mipi_device_property_read_bool(dev, "mipi-sdw-paging-support"); - prop->bank_delay_support = device_property_read_bool(dev, + prop->bank_delay_support = mipi_device_property_read_bool(dev, "mipi-sdw-bank-delay-support"); device_property_read_u32(dev, @@ -354,7 +430,17 @@ int sdw_slave_read_prop(struct sdw_slave *slave) device_property_read_u32(dev, "mipi-sdw-sink-port-list", &prop->sink_ports); - /* Read dp0 properties */ + device_property_read_u32(dev, "mipi-sdw-sdca-interrupt-register-list", + &prop->sdca_interrupt_register_list); + + prop->commit_register_supported = mipi_device_property_read_bool(dev, + "mipi-sdw-commit-register-supported"); + + /* + * Read dp0 properties - we don't rely on the 'mipi-sdw-dp-0-supported' + * property since the 'mipi-sdw-dp0-subproperties' property is logically + * equivalent. + */ port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties"); if (!port) { dev_dbg(dev, "DP0 node not found!!\n"); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index aed57002fd0e..2b403b14066c 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1173,7 +1173,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl, else sconfig.direction = SDW_DATA_DIR_RX; - /* hw parameters wil be ignored as we only support PDM */ + /* hw parameters will be ignored as we only support PDM */ sconfig.ch_count = 1; sconfig.frame_rate = params_rate(params); sconfig.type = stream->type; diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index f1a4df6cfebd..97cf8bcca047 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -5,6 +5,7 @@ #include <linux/of.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_type.h> +#include <sound/sdca.h> #include "bus.h" #include "sysfs_local.h" @@ -70,6 +71,17 @@ int sdw_slave_add(struct sdw_bus *bus, list_add_tail(&slave->node, &bus->slaves); mutex_unlock(&bus->bus_lock); + /* + * The Soundwire driver probe may optionally register SDCA + * sub-devices, one per Function. This means the information + * on the SDCA revision and the number/type of Functions need + * to be extracted from platform firmware before the SoundWire + * driver probe, and as a consequence before the SoundWire + * device_register() below. + */ + sdca_lookup_interface_revision(slave); + sdca_lookup_functions(slave); + ret = device_register(&slave->dev); if (ret) { dev_err(bus->dev, "Failed to add slave: ret %d\n", ret); @@ -259,3 +271,5 @@ int sdw_of_find_slaves(struct sdw_bus *bus) return 0; } + +MODULE_IMPORT_NS(SND_SOC_SDCA); diff --git a/drivers/soundwire/sysfs_slave.c b/drivers/soundwire/sysfs_slave.c index f4259710dd0f..c5c22d1708ec 100644 --- a/drivers/soundwire/sysfs_slave.c +++ b/drivers/soundwire/sysfs_slave.c @@ -215,7 +215,7 @@ const struct attribute_group *sdw_attr_groups[] = { /* * the status is shown in capital letters for UNATTACHED and RESERVED - * on purpose, to highligh users to the fact that these status values + * on purpose, to highlight users to the fact that these status values * are not expected. */ static const char *const slave_status[] = { diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 91108ddfaef2..316bce577081 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -183,7 +183,7 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) case QSPI_MR: return "MR"; case QSPI_RD: - return "MR"; + return "RD"; case QSPI_TD: return "TD"; case QSPI_SR: diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 0b6b0151b3a3..eeb7d082c247 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1685,7 +1685,7 @@ static unsigned int spi_imx_transfer_estimate_time_us(struct spi_transfer *trans words = DIV_ROUND_UP(transfer->len * BITS_PER_BYTE, transfer->bits_per_word); word_delay_us = DIV_ROUND_CLOSEST(spi_delay_to_ns(&transfer->word_delay, transfer), NSEC_PER_USEC); - result += words * word_delay_us; + result += (u64)words * word_delay_us; } return min(result, U32_MAX); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 17b8baf749e6..abc6792e738c 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -172,6 +172,9 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, if (!spi_mem_controller_is_capable(ctlr, dtr)) return false; + if (op->data.swap16 && !spi_mem_controller_is_capable(ctlr, swap16)) + return false; + if (op->cmd.nbytes != 2) return false; } else { diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c index 2166a56aeb02..fdbea9dffb62 100644 --- a/drivers/spi/spi-mtk-snfi.c +++ b/drivers/spi/spi-mtk-snfi.c @@ -776,7 +776,7 @@ static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; } -static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { +static const struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { .init_ctx = mtk_snand_ecc_init_ctx, .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, .prepare_io_req = mtk_snand_ecc_prepare_io_req, diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 71edeb8dfc5f..809767d3145c 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -294,7 +294,8 @@ static void mxic_spi_hw_init(struct mxic_spi *mxic) mxic->regs + HC_CFG); } -static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags) +static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags, + bool swap16) { int nio = 1; @@ -305,6 +306,11 @@ static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags) else if (spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL)) nio = 2; + if (swap16) + flags &= ~HC_CFG_DATA_PASS; + else + flags |= HC_CFG_DATA_PASS; + return flags | HC_CFG_NIO(nio) | HC_CFG_TYPE(spi_get_chipselect(spi, 0), HC_CFG_TYPE_SPI_NOR) | HC_CFG_SLV_ACT(spi_get_chipselect(spi, 0)) | HC_CFG_IDLE_SIO_LVL(1); @@ -397,7 +403,8 @@ static ssize_t mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, if (WARN_ON(offs + desc->info.offset + len > U32_MAX)) return -EINVAL; - writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG); + writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0, desc->info.op_tmpl.data.swap16), + mxic->regs + HC_CFG); writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len), mxic->regs + LRD_CFG); @@ -441,7 +448,8 @@ static ssize_t mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, if (WARN_ON(offs + desc->info.offset + len > U32_MAX)) return -EINVAL; - writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG); + writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0, desc->info.op_tmpl.data.swap16), + mxic->regs + HC_CFG); writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len), mxic->regs + LWR_CFG); @@ -518,7 +526,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem, if (ret) return ret; - writel(mxic_spi_prep_hc_cfg(mem->spi, HC_CFG_MAN_CS_EN), + writel(mxic_spi_prep_hc_cfg(mem->spi, HC_CFG_MAN_CS_EN, op->data.swap16), mxic->regs + HC_CFG); writel(HC_EN_BIT, mxic->regs + HC_EN); @@ -573,6 +581,7 @@ static const struct spi_controller_mem_ops mxic_spi_mem_ops = { static const struct spi_controller_mem_caps mxic_spi_mem_caps = { .dtr = true, .ecc = true, + .swap16 = true, }; static void mxic_spi_set_cs(struct spi_device *spi, bool lvl) @@ -640,7 +649,7 @@ static int mxic_spi_transfer_one(struct spi_controller *host, /* ECC wrapper */ static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand) { - struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); struct mxic_spi *mxic = nand->ecc.engine->priv; mxic->ecc.use_pipelined_conf = true; @@ -650,7 +659,7 @@ static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand) static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand) { - struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); struct mxic_spi *mxic = nand->ecc.engine->priv; mxic->ecc.use_pipelined_conf = false; @@ -661,7 +670,7 @@ static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand) static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req) { - struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); return ops->prepare_io_req(nand, req); } @@ -669,12 +678,12 @@ static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand, static int mxic_spi_mem_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req) { - struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); return ops->finish_io_req(nand, req); } -static struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = { +static const struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = { .init_ctx = mxic_spi_mem_ecc_init_ctx, .cleanup_ctx = mxic_spi_mem_ecc_cleanup_ctx, .prepare_io_req = mxic_spi_mem_ecc_prepare_io_req, diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index 316e3db88492..69d0f2175568 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -503,7 +503,7 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op rockchip_sfc_adjust_op_work((struct spi_mem_op *)op); rockchip_sfc_xfer_setup(sfc, mem, op, len); if (len) { - if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) { + if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD && !(len & 0x3)) { init_completion(&sfc->cp); rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA); ret = rockchip_sfc_xfer_data_dma(sfc, op, len); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 460a49d9a0de..ff1add2ecb91 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -424,6 +424,16 @@ static int spi_probe(struct device *dev) spi->irq = 0; } + if (has_acpi_companion(dev) && spi->irq < 0) { + struct acpi_device *adev = to_acpi_device_node(dev->fwnode); + + spi->irq = acpi_dev_gpio_irq_get(adev, 0); + if (spi->irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (spi->irq < 0) + spi->irq = 0; + } + ret = dev_pm_domain_attach(dev, true); if (ret) return ret; @@ -2866,9 +2876,6 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, sizeof(spi->modalias)); - if (spi->irq < 0) - spi->irq = acpi_dev_gpio_irq_get(adev, 0); - acpi_device_set_enumerated(adev); adev->power.flags.ignore_parent = true; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 440e07b1d5cd..287ac5b0495f 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -369,7 +369,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) bdev_file = bdev_file_open_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL); if (IS_ERR(bdev_file)) { - pr_err("pSCSI: bdev_open_by_path() failed\n"); + pr_err("pSCSI: bdev_file_open_by_path() failed\n"); scsi_device_put(sd); return PTR_ERR(bdev_file); } diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 61e7ae524b1f..d3f9686e26e7 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -220,6 +220,15 @@ config DEVFREQ_THERMAL If you want this support, you should say Y here. +config PCIE_THERMAL + bool "PCIe cooling support" + depends on PCIEPORTBUS + help + This implements PCIe cooling mechanism through bandwidth reduction + for PCIe devices. + + If you want this support, you should say Y here. + config THERMAL_EMULATION bool "Thermal emulation mode support" help diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 1e1559bb971e..9abf43a74f2b 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -32,6 +32,8 @@ thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o # devfreq cooling thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o +thermal_sys-$(CONFIG_PCIE_THERMAL) += pcie_cooling.o + obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o k3_j72xx_bandgap.o # platform thermal drivers obj-y += broadcom/ diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c index cd4776aa805e..3c5f7dbddf2c 100644 --- a/drivers/thermal/amlogic_thermal.c +++ b/drivers/thermal/amlogic_thermal.c @@ -333,7 +333,7 @@ static struct platform_driver amlogic_thermal_driver = { .of_match_table = of_amlogic_thermal_match, }, .probe = amlogic_thermal_probe, - .remove_new = amlogic_thermal_remove, + .remove = amlogic_thermal_remove, }; module_platform_driver(amlogic_thermal_driver); diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index fdcb077cfd54..9bff21068721 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -970,7 +970,7 @@ static void armada_thermal_exit(struct platform_device *pdev) static struct platform_driver armada_thermal_driver = { .probe = armada_thermal_probe, - .remove_new = armada_thermal_exit, + .remove = armada_thermal_exit, .driver = { .name = "armada_thermal", .of_match_table = armada_thermal_id_table, diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 7d61493082b5..7fbba2233c4c 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -268,7 +268,7 @@ static void bcm2835_thermal_remove(struct platform_device *pdev) static struct platform_driver bcm2835_thermal_driver = { .probe = bcm2835_thermal_probe, - .remove_new = bcm2835_thermal_remove, + .remove = bcm2835_thermal_remove, .driver = { .name = "bcm2835_thermal", .of_match_table = bcm2835_thermal_of_match_table, diff --git a/drivers/thermal/broadcom/ns-thermal.c b/drivers/thermal/broadcom/ns-thermal.c index 5eaf79c490f0..8b5b32f749ee 100644 --- a/drivers/thermal/broadcom/ns-thermal.c +++ b/drivers/thermal/broadcom/ns-thermal.c @@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(of, ns_thermal_of_match); static struct platform_driver ns_thermal_driver = { .probe = ns_thermal_probe, - .remove_new = ns_thermal_remove, + .remove = ns_thermal_remove, .driver = { .name = "ns-thermal", .of_match_table = ns_thermal_of_match, diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c index a27aff88cd96..2077e85ef5ca 100644 --- a/drivers/thermal/da9062-thermal.c +++ b/drivers/thermal/da9062-thermal.c @@ -250,10 +250,10 @@ static void da9062_thermal_remove(struct platform_device *pdev) static struct platform_driver da9062_thermal_driver = { .probe = da9062_thermal_probe, - .remove_new = da9062_thermal_remove, + .remove = da9062_thermal_remove, .driver = { - .name = "da9062-thermal", - .of_match_table = da9062_compatible_reg_id_table, + .name = "da9062-thermal", + .of_match_table = da9062_compatible_reg_id_table, }, }; diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index ac30de3c0a5f..f9157a47156b 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c @@ -170,7 +170,7 @@ MODULE_DEVICE_TABLE(of, dove_thermal_id_table); static struct platform_driver dove_thermal_driver = { .probe = dove_thermal_probe, - .remove_new = dove_thermal_exit, + .remove = dove_thermal_exit, .driver = { .name = "dove_thermal", .of_match_table = dove_thermal_id_table, diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index ac6fa6b8f99f..3b644de3292e 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -588,10 +588,15 @@ static void allow_maximum_power(struct thermal_zone_device *tz) static int check_power_actors(struct thermal_zone_device *tz, struct power_allocator_params *params) { - const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max); + const struct thermal_trip_desc *td; struct thermal_instance *instance; int ret = 0; + if (!params->trip_max) + return 0; + + td = trip_to_trip_desc(params->trip_max); + list_for_each_entry(instance, &td->thermal_instances, trip_node) { if (!cdev_is_power_actor(instance->cdev)) { dev_warn(&tz->device, "power_allocator: %s is not a power actor\n", diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index f1fe0f8ab04f..7e918bd3f100 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -637,10 +637,10 @@ static struct platform_driver hisi_thermal_driver = { .driver = { .name = "hisi_thermal", .pm = pm_sleep_ptr(&hisi_thermal_pm_ops), - .of_match_table = of_hisi_thermal_match, + .of_match_table = of_hisi_thermal_match, }, .probe = hisi_thermal_probe, - .remove_new = hisi_thermal_remove, + .remove = hisi_thermal_remove, }; module_platform_driver(hisi_thermal_driver); diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index d74ed6ce2974..719d71f5b235 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -399,7 +399,7 @@ static struct platform_driver imx8mm_tmu = { .of_match_table = imx8mm_tmu_table, }, .probe = imx8mm_tmu_probe, - .remove_new = imx8mm_tmu_remove, + .remove = imx8mm_tmu_remove, }; module_platform_driver(imx8mm_tmu); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index b8e85a405351..bab52e6b3b15 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -861,7 +861,7 @@ static struct platform_driver imx_thermal = { .of_match_table = of_imx_thermal_match, }, .probe = imx_thermal_probe, - .remove_new = imx_thermal_remove, + .remove = imx_thermal_remove, }; module_platform_driver(imx_thermal); diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index b0c0f0ffdcb0..8660ef2175be 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -75,11 +75,6 @@ struct odvp_attr { static BIN_ATTR_SIMPLE_RO(data_vault); -static struct bin_attribute *data_attributes[] = { - &bin_attr_data_vault, - NULL, -}; - static ssize_t imok_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -108,10 +103,6 @@ static const struct attribute_group imok_attribute_group = { .attrs = imok_attr, }; -static const struct attribute_group data_attribute_group = { - .bin_attrs = data_attributes, -}; - static ssize_t available_uuids_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -137,7 +128,7 @@ static ssize_t current_uuid_show(struct device *dev, struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i, length = 0; - if (priv->current_uuid_index > 0) + if (priv->current_uuid_index >= 0) return sprintf(buf, "%s\n", int3400_thermal_uuids[priv->current_uuid_index]); @@ -624,8 +615,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) } if (!ZERO_OR_NULL_PTR(priv->data_vault)) { - result = sysfs_create_group(&pdev->dev.kobj, - &data_attribute_group); + result = device_create_bin_file(&pdev->dev, &bin_attr_data_vault); if (result) goto free_uuid; } @@ -648,7 +638,7 @@ free_notify: free_sysfs: cleanup_odvp(priv); if (!ZERO_OR_NULL_PTR(priv->data_vault)) { - sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); + device_remove_bin_file(&pdev->dev, &bin_attr_data_vault); kfree(priv->data_vault); } free_uuid: @@ -683,7 +673,7 @@ static void int3400_thermal_remove(struct platform_device *pdev) acpi_thermal_rel_misc_device_remove(priv->adev->handle); if (!ZERO_OR_NULL_PTR(priv->data_vault)) - sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); + device_remove_bin_file(&pdev->dev, &bin_attr_data_vault); sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group); thermal_zone_device_unregister(priv->thermal); @@ -707,7 +697,7 @@ MODULE_DEVICE_TABLE(acpi, int3400_thermal_match); static struct platform_driver int3400_thermal_driver = { .probe = int3400_thermal_probe, - .remove_new = int3400_thermal_remove, + .remove = int3400_thermal_remove, .driver = { .name = "int3400 thermal", .acpi_match_table = ACPI_PTR(int3400_thermal_match), diff --git a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c index 193645a73861..96d6277a5a8c 100644 --- a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c @@ -60,7 +60,7 @@ static SIMPLE_DEV_PM_OPS(int3401_proc_thermal_pm, int3401_thermal_suspend, static struct platform_driver int3401_driver = { .probe = int3401_add, - .remove_new = int3401_remove, + .remove = int3401_remove, .driver = { .name = "int3401 thermal", .acpi_match_table = int3401_device_ids, diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c index ab8bfb5a3946..543b03960e99 100644 --- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c @@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(acpi, int3402_thermal_match); static struct platform_driver int3402_thermal_driver = { .probe = int3402_thermal_probe, - .remove_new = int3402_thermal_remove, + .remove = int3402_thermal_remove, .driver = { .name = "int3402 thermal", .acpi_match_table = int3402_thermal_match, diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index c094a422ded3..04aa0afb3b1d 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -281,7 +281,7 @@ MODULE_DEVICE_TABLE(acpi, int3403_device_ids); static struct platform_driver int3403_driver = { .probe = int3403_add, - .remove_new = int3403_remove, + .remove = int3403_remove, .driver = { .name = "int3403 thermal", .acpi_match_table = int3403_device_ids, diff --git a/drivers/thermal/intel/int340x_thermal/int3406_thermal.c b/drivers/thermal/intel/int340x_thermal/int3406_thermal.c index 1c266493c1aa..e21fcbccf4ba 100644 --- a/drivers/thermal/intel/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3406_thermal.c @@ -195,7 +195,7 @@ MODULE_DEVICE_TABLE(acpi, int3406_thermal_match); static struct platform_driver int3406_thermal_driver = { .probe = int3406_thermal_probe, - .remove_new = int3406_thermal_remove, + .remove = int3406_thermal_remove, .driver = { .name = "int3406 thermal", .acpi_match_table = int3406_thermal_match, diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c index 2a703770fc91..678d6ed711b5 100644 --- a/drivers/thermal/k3_bandgap.c +++ b/drivers/thermal/k3_bandgap.c @@ -250,7 +250,7 @@ MODULE_DEVICE_TABLE(of, of_k3_bandgap_match); static struct platform_driver k3_bandgap_sensor_driver = { .probe = k3_bandgap_probe, - .remove_new = k3_bandgap_remove, + .remove = k3_bandgap_remove, .driver = { .name = "k3-soc-thermal", .of_match_table = of_k3_bandgap_match, diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c index 9bc279ac131a..70de6dbf99c5 100644 --- a/drivers/thermal/k3_j72xx_bandgap.c +++ b/drivers/thermal/k3_j72xx_bandgap.c @@ -238,7 +238,7 @@ static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata, K3_VTM_TS_STAT_DTEMP_MASK; dtemp = vtm_get_best_value(s0, s1, s2); - if (dtemp < 0 || dtemp >= TABLE_SIZE) + if (dtemp >= TABLE_SIZE) return -EINVAL; *temp = derived_table[dtemp]; @@ -594,7 +594,7 @@ MODULE_DEVICE_TABLE(of, of_k3_j72xx_bandgap_match); static struct platform_driver k3_j72xx_bandgap_sensor_driver = { .probe = k3_j72xx_bandgap_probe, - .remove_new = k3_j72xx_bandgap_remove, + .remove = k3_j72xx_bandgap_remove, .driver = { .name = "k3-j72xx-soc-thermal", .of_match_table = of_k3_j72xx_bandgap_match, diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c index a18158ebe65f..7c2265231668 100644 --- a/drivers/thermal/kirkwood_thermal.c +++ b/drivers/thermal/kirkwood_thermal.c @@ -102,7 +102,7 @@ MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table); static struct platform_driver kirkwood_thermal_driver = { .probe = kirkwood_thermal_probe, - .remove_new = kirkwood_thermal_exit, + .remove = kirkwood_thermal_exit, .driver = { .name = "kirkwood_thermal", .of_match_table = kirkwood_thermal_id_table, diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 1997e91bb3be..07f7f3b7a2fb 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -329,7 +329,7 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp) static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl) { - u32 masks[] = { + static const u32 masks[] = { LVTS_MONINT_OFFSET_SENSOR0, LVTS_MONINT_OFFSET_SENSOR1, LVTS_MONINT_OFFSET_SENSOR2, @@ -424,7 +424,7 @@ static irqreturn_t lvts_ctrl_irq_handler(struct lvts_ctrl *lvts_ctrl) { irqreturn_t iret = IRQ_NONE; u32 value; - u32 masks[] = { + static const u32 masks[] = { LVTS_INT_SENSOR0, LVTS_INT_SENSOR1, LVTS_INT_SENSOR2, @@ -1788,7 +1788,7 @@ static const struct dev_pm_ops lvts_pm_ops = { static struct platform_driver lvts_driver = { .probe = lvts_probe, - .remove_new = lvts_remove, + .remove = lvts_remove, .driver = { .name = "mtk-lvts-thermal", .of_match_table = lvts_of_match, diff --git a/drivers/thermal/pcie_cooling.c b/drivers/thermal/pcie_cooling.c new file mode 100644 index 000000000000..a876d64f1582 --- /dev/null +++ b/drivers/thermal/pcie_cooling.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PCIe cooling device + * + * Copyright (C) 2023-2024 Intel Corporation + */ + +#include <linux/build_bug.h> +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pci-bwctrl.h> +#include <linux/slab.h> +#include <linux/sprintf.h> +#include <linux/thermal.h> + +#define COOLING_DEV_TYPE_PREFIX "PCIe_Port_Link_Speed_" + +static int pcie_cooling_get_max_level(struct thermal_cooling_device *cdev, unsigned long *state) +{ + struct pci_dev *port = cdev->devdata; + + /* cooling state 0 is same as the maximum PCIe speed */ + *state = port->subordinate->max_bus_speed - PCIE_SPEED_2_5GT; + + return 0; +} + +static int pcie_cooling_get_cur_level(struct thermal_cooling_device *cdev, unsigned long *state) +{ + struct pci_dev *port = cdev->devdata; + + /* cooling state 0 is same as the maximum PCIe speed */ + *state = cdev->max_state - (port->subordinate->cur_bus_speed - PCIE_SPEED_2_5GT); + + return 0; +} + +static int pcie_cooling_set_cur_level(struct thermal_cooling_device *cdev, unsigned long state) +{ + struct pci_dev *port = cdev->devdata; + enum pci_bus_speed speed; + + /* cooling state 0 is same as the maximum PCIe speed */ + speed = (cdev->max_state - state) + PCIE_SPEED_2_5GT; + + return pcie_set_target_speed(port, speed, true); +} + +static struct thermal_cooling_device_ops pcie_cooling_ops = { + .get_max_state = pcie_cooling_get_max_level, + .get_cur_state = pcie_cooling_get_cur_level, + .set_cur_state = pcie_cooling_set_cur_level, +}; + +struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port) +{ + char *name __free(kfree) = + kasprintf(GFP_KERNEL, COOLING_DEV_TYPE_PREFIX "%s", pci_name(port)); + if (!name) + return ERR_PTR(-ENOMEM); + + return thermal_cooling_device_register(name, port, &pcie_cooling_ops); +} + +void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev) +{ + thermal_cooling_device_unregister(cdev); +} + +/* For bus_speed <-> state arithmetic */ +static_assert(PCIE_SPEED_2_5GT + 1 == PCIE_SPEED_5_0GT); +static_assert(PCIE_SPEED_5_0GT + 1 == PCIE_SPEED_8_0GT); +static_assert(PCIE_SPEED_8_0GT + 1 == PCIE_SPEED_16_0GT); +static_assert(PCIE_SPEED_16_0GT + 1 == PCIE_SPEED_32_0GT); +static_assert(PCIE_SPEED_32_0GT + 1 == PCIE_SPEED_64_0GT); + +MODULE_AUTHOR("Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>"); +MODULE_DESCRIPTION("PCIe cooling driver"); diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 5e94a45eba3e..d7f2e6ca92c2 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -938,7 +938,6 @@ static const struct adc_tm5_data adc_tm5_gen2_data_pmic = { static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node *node) { struct adc_tm5_channel *channels; - struct device_node *child; u32 value; int ret; struct device *dev = adc_tm->dev; @@ -982,12 +981,10 @@ static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node * adc_tm->avg_samples = VADC_DEF_AVG_SAMPLES; } - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { ret = adc_tm5_get_dt_channel_data(adc_tm, channels, child); - if (ret) { - of_node_put(child); + if (ret) return ret; - } channels++; } diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index dc1c4ae2d8b0..1a7874676f68 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -162,28 +162,35 @@ struct tsens_plat_data data_tsens_v1 = { .fields = tsens_v1_regfields, }; -static const struct tsens_ops ops_8956 = { - .init = init_8956, +static const struct tsens_ops ops_common = { + .init = init_common, .calibrate = tsens_calibrate_common, .get_temp = get_temp_tsens_valid, }; -struct tsens_plat_data data_8956 = { +struct tsens_plat_data data_8937 = { .num_sensors = 11, - .ops = &ops_8956, + .ops = &ops_common, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, }; -static const struct tsens_ops ops_8976 = { - .init = init_common, +static const struct tsens_ops ops_8956 = { + .init = init_8956, .calibrate = tsens_calibrate_common, .get_temp = get_temp_tsens_valid, }; +struct tsens_plat_data data_8956 = { + .num_sensors = 11, + .ops = &ops_8956, + .feat = &tsens_v1_feat, + .fields = tsens_v1_regfields, +}; + struct tsens_plat_data data_8976 = { .num_sensors = 11, - .ops = &ops_8976, + .ops = &ops_common, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, }; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 0b4421bf4785..3aa3736181aa 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -1120,6 +1120,9 @@ static const struct of_device_id tsens_table[] = { .compatible = "qcom,msm8916-tsens", .data = &data_8916, }, { + .compatible = "qcom,msm8937-tsens", + .data = &data_8937, + }, { .compatible = "qcom,msm8939-tsens", .data = &data_8939, }, { @@ -1360,7 +1363,7 @@ static void tsens_remove(struct platform_device *pdev) static struct platform_driver tsens_driver = { .probe = tsens_probe, - .remove_new = tsens_remove, + .remove = tsens_remove, .driver = { .name = "qcom-tsens", .pm = &tsens_pm_ops, diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index cab39de045b1..7b36a0318fa6 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -647,7 +647,7 @@ extern struct tsens_plat_data data_8960; extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8974, data_9607; /* TSENS v1 targets */ -extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956; +extern struct tsens_plat_data data_tsens_v1, data_8937, data_8976, data_8956; /* TSENS v2 targets */ extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2; diff --git a/drivers/thermal/renesas/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c index 810f86677461..1ec169aeacfc 100644 --- a/drivers/thermal/renesas/rcar_gen3_thermal.c +++ b/drivers/thermal/renesas/rcar_gen3_thermal.c @@ -603,7 +603,7 @@ static struct platform_driver rcar_gen3_thermal_driver = { .of_match_table = rcar_gen3_thermal_dt_ids, }, .probe = rcar_gen3_thermal_probe, - .remove_new = rcar_gen3_thermal_remove, + .remove = rcar_gen3_thermal_remove, }; module_platform_driver(rcar_gen3_thermal_driver); diff --git a/drivers/thermal/renesas/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c index ddc8341e5c3f..00a66ee0a5b0 100644 --- a/drivers/thermal/renesas/rcar_thermal.c +++ b/drivers/thermal/renesas/rcar_thermal.c @@ -579,7 +579,7 @@ static struct platform_driver rcar_thermal_driver = { .of_match_table = rcar_thermal_dt_ids, }, .probe = rcar_thermal_probe, - .remove_new = rcar_thermal_remove, + .remove = rcar_thermal_remove, }; module_platform_driver(rcar_thermal_driver); diff --git a/drivers/thermal/renesas/rzg2l_thermal.c b/drivers/thermal/renesas/rzg2l_thermal.c index 0e1cb9045ee6..b588be628640 100644 --- a/drivers/thermal/renesas/rzg2l_thermal.c +++ b/drivers/thermal/renesas/rzg2l_thermal.c @@ -240,7 +240,7 @@ static struct platform_driver rzg2l_thermal_driver = { .of_match_table = rzg2l_thermal_dt_ids, }, .probe = rzg2l_thermal_probe, - .remove_new = rzg2l_thermal_remove, + .remove = rzg2l_thermal_remove, }; module_platform_driver(rzg2l_thermal_driver); diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 086ed42dd16c..f551df48eef9 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1689,7 +1689,7 @@ static struct platform_driver rockchip_thermal_driver = { .of_match_table = of_rockchip_thermal_match, }, .probe = rockchip_thermal_probe, - .remove_new = rockchip_thermal_remove, + .remove = rockchip_thermal_remove, }; module_platform_driver(rockchip_thermal_driver); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 96cffb2c44ba..47a99b3c5395 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1164,7 +1164,7 @@ static struct platform_driver exynos_tmu_driver = { .of_match_table = exynos_tmu_match, }, .probe = exynos_tmu_probe, - .remove_new = exynos_tmu_remove, + .remove = exynos_tmu_remove, }; module_platform_driver(exynos_tmu_driver); diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index 60a871998b07..bb96be947521 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c @@ -173,7 +173,7 @@ MODULE_DEVICE_TABLE(of, spear_thermal_id_table); static struct platform_driver spear_thermal_driver = { .probe = spear_thermal_probe, - .remove_new = spear_thermal_exit, + .remove = spear_thermal_exit, .driver = { .name = "spear_thermal", .pm = &spear_thermal_pm_ops, diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c index dfd1d529c410..e546067c9621 100644 --- a/drivers/thermal/sprd_thermal.c +++ b/drivers/thermal/sprd_thermal.c @@ -534,7 +534,7 @@ static const struct dev_pm_ops sprd_thermal_pm_ops = { static struct platform_driver sprd_thermal_driver = { .probe = sprd_thm_probe, - .remove_new = sprd_thm_remove, + .remove = sprd_thm_remove, .driver = { .name = "sprd-thermal", .pm = &sprd_thermal_pm_ops, diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index 97493d2b2f49..8f76e50ea567 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -174,7 +174,7 @@ static struct platform_driver st_mmap_thermal_driver = { .of_match_table = st_mmap_thermal_of_match, }, .probe = st_mmap_probe, - .remove_new = st_mmap_remove, + .remove = st_mmap_remove, }; module_platform_driver(st_mmap_thermal_driver); diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index ffd988600ed6..6e90eb9f414d 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c @@ -582,7 +582,7 @@ static struct platform_driver stm_thermal_driver = { .of_match_table = stm_thermal_of_match, }, .probe = stm_thermal_probe, - .remove_new = stm_thermal_remove, + .remove = stm_thermal_remove, }; module_platform_driver(stm_thermal_driver); diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index 3203d8bd13a8..22674790629a 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -9,6 +9,7 @@ */ #include <linux/bitmap.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/interrupt.h> @@ -348,19 +349,18 @@ static void sun8i_ths_reset_control_assert(void *data) static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node) { - struct device_node *sram_node; struct platform_device *sram_pdev; struct regmap *regmap = NULL; - sram_node = of_parse_phandle(node, "allwinner,sram", 0); + struct device_node *sram_node __free(device_node) = + of_parse_phandle(node, "allwinner,sram", 0); if (!sram_node) return ERR_PTR(-ENODEV); sram_pdev = of_find_device_by_node(sram_node); if (!sram_pdev) { /* platform device might not be probed yet */ - regmap = ERR_PTR(-EPROBE_DEFER); - goto out_put_node; + return ERR_PTR(-EPROBE_DEFER); } /* If no regmap is found then the other device driver is at fault */ @@ -369,8 +369,7 @@ static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node) regmap = ERR_PTR(-EINVAL); platform_device_put(sram_pdev); -out_put_node: - of_node_put(sram_node); + return regmap; } diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index a023c948afbd..2c5ddf0db40c 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -1651,7 +1651,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct tegra_soctherm *ts = dev_get_drvdata(dev); - struct device_node *np_stc, *np_stcc; + struct device_node *np_stc; const char *name; int i; @@ -1668,7 +1668,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) return; } - for_each_child_of_node(np_stc, np_stcc) { + for_each_child_of_node_scoped(np_stc, np_stcc) { struct soctherm_throt_cfg *stc; struct thermal_cooling_device *tcd; int err; @@ -1683,7 +1683,6 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) if (stc->init) { dev_err(dev, "throttle-cfg: %s: redefined!\n", name); - of_node_put(np_stcc); break; } @@ -2269,7 +2268,7 @@ static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume); static struct platform_driver tegra_soctherm_driver = { .probe = tegra_soctherm_probe, - .remove_new = tegra_soctherm_remove, + .remove = tegra_soctherm_remove, .driver = { .name = "tegra_soctherm", .pm = &tegra_soctherm_pm, diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c index 72ce14c980cd..997d77ce30d9 100644 --- a/drivers/thermal/tegra/tegra-bpmp-thermal.c +++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c @@ -315,7 +315,7 @@ MODULE_DEVICE_TABLE(of, tegra_bpmp_thermal_of_match); static struct platform_driver tegra_bpmp_thermal_driver = { .probe = tegra_bpmp_thermal_probe, - .remove_new = tegra_bpmp_thermal_remove, + .remove = tegra_bpmp_thermal_remove, .driver = { .name = "tegra-bpmp-thermal", .of_match_table = tegra_bpmp_thermal_of_match, diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 07e09897165f..fab11b98ca49 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -95,13 +95,11 @@ static int thermal_of_populate_trip(struct device_node *np, static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips) { - struct thermal_trip *tt; - struct device_node *trips; int ret, count; *ntrips = 0; - trips = of_get_child_by_name(np, "trips"); + struct device_node *trips __free(device_node) = of_get_child_by_name(np, "trips"); if (!trips) return NULL; @@ -109,39 +107,27 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n if (!count) return NULL; - tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL); - if (!tt) { - ret = -ENOMEM; - goto out_of_node_put; - } - - *ntrips = count; + struct thermal_trip *tt __free(kfree) = kzalloc(sizeof(*tt) * count, GFP_KERNEL); + if (!tt) + return ERR_PTR(-ENOMEM); count = 0; for_each_child_of_node_scoped(trips, trip) { ret = thermal_of_populate_trip(trip, &tt[count++]); if (ret) - goto out_kfree; + return ERR_PTR(ret); } - of_node_put(trips); - - return tt; - -out_kfree: - kfree(tt); -out_of_node_put: - of_node_put(trips); + *ntrips = count; - return ERR_PTR(ret); + return no_free_ptr(tt); } static struct device_node *of_thermal_zone_find(struct device_node *sensor, int id) { - struct device_node *np, *tz; struct of_phandle_args sensor_specs; - np = of_find_node_by_name(NULL, "thermal-zones"); + struct device_node *np __free(device_node) = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { pr_debug("No thermal zones description\n"); return ERR_PTR(-ENODEV); @@ -159,8 +145,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int "#thermal-sensor-cells"); if (count <= 0) { pr_err("%pOFn: missing thermal sensor\n", child); - tz = ERR_PTR(-EINVAL); - goto out; + return ERR_PTR(-EINVAL); } for (i = 0; i < count; i++) { @@ -172,22 +157,18 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int i, &sensor_specs); if (ret < 0) { pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", child, ret); - tz = ERR_PTR(ret); - goto out; + return ERR_PTR(ret); } if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? sensor_specs.args[0] : 0)) { pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child); - tz = no_free_ptr(child); - goto out; + return no_free_ptr(child); } } } - tz = ERR_PTR(-ENODEV); -out: - of_node_put(np); - return tz; + + return ERR_PTR(-ENODEV); } static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdelay) @@ -297,7 +278,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, struct cooling_spec *c) { - struct device_node *tz_np, *cm_np, *child; + struct device_node *tz_np, *cm_np; bool result = false; tz_np = thermal_of_zone_get_by_name(tz); @@ -311,7 +292,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, goto out; /* Look up the trip and the cdev in the cooling maps. */ - for_each_child_of_node(cm_np, child) { + for_each_child_of_node_scoped(cm_np, child) { struct device_node *tr_np; int count, i; @@ -330,7 +311,6 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, break; } - of_node_put(child); break; } diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index caadfc61be93..ba43399d0b38 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -1281,7 +1281,7 @@ MODULE_DEVICE_TABLE(of, of_ti_bandgap_match); static struct platform_driver ti_bandgap_sensor_driver = { .probe = ti_bandgap_probe, - .remove_new = ti_bandgap_remove, + .remove = ti_bandgap_remove, .driver = { .name = "ti-soc-thermal", .pm = DEV_PM_OPS, diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c index 0325b7195136..1a04294effea 100644 --- a/drivers/thermal/uniphier_thermal.c +++ b/drivers/thermal/uniphier_thermal.c @@ -371,7 +371,7 @@ MODULE_DEVICE_TABLE(of, uniphier_tm_dt_ids); static struct platform_driver uniphier_tm_driver = { .probe = uniphier_tm_probe, - .remove_new = uniphier_tm_remove, + .remove = uniphier_tm_remove, .driver = { .name = "uniphier-thermal", .of_match_table = uniphier_tm_dt_ids, diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index 8bab2aedc499..6d99a02dd439 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -698,7 +698,6 @@ static int rp2_probe(struct pci_dev *pdev, const struct firmware *fw; struct rp2_card *card; struct rp2_uart_port *ports; - void __iomem * const *bars; int rc; card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); @@ -711,13 +710,16 @@ static int rp2_probe(struct pci_dev *pdev, if (rc) return rc; - rc = pcim_iomap_regions_request_all(pdev, 0x03, DRV_NAME); + rc = pcim_request_all_regions(pdev, DRV_NAME); if (rc) return rc; - bars = pcim_iomap_table(pdev); - card->bar0 = bars[0]; - card->bar1 = bars[1]; + card->bar0 = pcim_iomap(pdev, 0, 0); + if (!card->bar0) + return -ENOMEM; + card->bar1 = pcim_iomap(pdev, 1, 0); + if (!card->bar1) + return -ENOMEM; card->pdev = pdev; rp2_decode_cap(id, &card->n_ports, &card->smpte); diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index dba935c712d6..240ce135bbfb 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -417,13 +417,6 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba) -{ - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, - REG_UFS_MEM_CFG); -} -EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); - void ufshcd_mcq_enable(struct ufs_hba *hba) { ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); @@ -437,6 +430,13 @@ void ufshcd_mcq_disable(struct ufs_hba *hba) hba->mcq_enabled = false; } +void ufshcd_mcq_enable_esi(struct ufs_hba *hba) +{ + ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, + REG_UFS_MEM_CFG); +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); + void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) { ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); @@ -539,7 +539,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) struct scsi_cmnd *cmd = lrbp->cmd; struct ufs_hw_queue *hwq; void __iomem *reg, *opr_sqd_base; - u32 nexus, id, val, rtc; + u32 nexus, id, val; int err; if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) @@ -573,14 +573,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU, opr_sqd_base + REG_SQRTC); - /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ + /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */ reg = opr_sqd_base + REG_SQRTS; err = read_poll_timeout(readl, val, val & SQ_CUS, 20, MCQ_POLL_US, false, reg); - rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)); - if (err || rtc) - dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n", - __func__, id, task_tag, err, rtc); + if (err) + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n", + __func__, id, task_tag, err); + else + dev_info(hba->dev, + "%s, hwq %d: cleanup return code (RTC) %ld\n", + __func__, id, + FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); if (ufshcd_mcq_sq_start(hba, hwq)) err = -ETIMEDOUT; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index abbe7135a977..6a2685333076 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -298,6 +298,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -349,18 +350,6 @@ static void ufshcd_configure_wb(struct ufs_hba *hba) ufshcd_wb_toggle_buf_flush(hba, true); } -static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) -{ - if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) - scsi_unblock_requests(hba->host); -} - -static void ufshcd_scsi_block_requests(struct ufs_hba *hba) -{ - if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) - scsi_block_requests(hba->host); -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { @@ -739,25 +728,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us); * Return: -ETIMEDOUT on error, zero on success. */ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, - unsigned long timeout_ms) + u32 val, unsigned long interval_us, + unsigned long timeout_ms) { - int err = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - - /* ignore bits that we don't intend to wait on */ - val = val & mask; + u32 v; - while ((ufshcd_readl(hba, reg) & mask) != val) { - usleep_range(interval_us, interval_us + 50); - if (time_after(jiffies, timeout)) { - if ((ufshcd_readl(hba, reg) & mask) != val) - err = -ETIMEDOUT; - break; - } - } + val &= mask; /* ignore bits that we don't intend to wait on */ - return err; + return read_poll_timeout(ufshcd_readl, v, (v & mask) == val, + interval_us, timeout_ms * 1000, false, hba, reg); } /** @@ -1255,11 +1234,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, static u32 ufshcd_pending_cmds(struct ufs_hba *hba) { const struct scsi_device *sdev; + unsigned long flags; u32 pending = 0; - lockdep_assert_held(hba->host->host_lock); + spin_lock_irqsave(hba->host->host_lock, flags); __shost_for_each_device(sdev, hba->host) pending += sbitmap_weight(&sdev->budget_map); + spin_unlock_irqrestore(hba->host->host_lock, flags); return pending; } @@ -1273,7 +1254,6 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us) { - unsigned long flags; int ret = 0; u32 tm_doorbell; u32 tr_pending; @@ -1281,7 +1261,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ktime_t start; ufshcd_hold(hba); - spin_lock_irqsave(hba->host->host_lock, flags); /* * Wait for all the outstanding tasks/transfer requests. * Verify by checking the doorbell registers are clear. @@ -1302,7 +1281,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); io_schedule_timeout(msecs_to_jiffies(20)); if (ktime_to_us(ktime_sub(ktime_get(), start)) > wait_timeout_us) { @@ -1314,7 +1292,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, */ do_last_check = true; } - spin_lock_irqsave(hba->host->host_lock, flags); } while (tm_doorbell || tr_pending); if (timeout) { @@ -1324,7 +1301,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ret = -EBUSY; } out: - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_release(hba); return ret; } @@ -2411,8 +2387,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; @@ -2551,13 +2525,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command - * @completion: initialize the completion only if this is set to true * * Return: 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, - bool completion) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { lockdep_assert_held(&hba->uic_cmd_mutex); @@ -2567,8 +2539,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, return -EIO; } - if (completion) - init_completion(&uic_cmd->done); + init_completion(&uic_cmd->done); uic_cmd->cmd_active = 1; ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -2594,7 +2565,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2775,7 +2746,6 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); - memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); @@ -2878,6 +2848,26 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } +static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag) +{ + memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr)); + + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = lun; + ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp); +} + +static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + struct scsi_cmnd *cmd, u8 lun, int tag) +{ + __ufshcd_setup_cmd(lrbp, cmd, lun, tag); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + lrbp->req_abort_skip = false; + + ufshcd_comp_scsi_upiu(hba, lrbp); +} + /** * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID * @upiu_wlun_id: UPIU W-LUN id @@ -3010,16 +3000,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_hold(hba); lrbp = &hba->lrb[tag]; - lrbp->cmd = cmd; - lrbp->task_tag = tag; - lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); - ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); - - lrbp->req_abort_skip = false; - - ufshcd_comp_scsi_upiu(hba, lrbp); + ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag); err = ufshcd_map_sg(hba, lrbp); if (err) { @@ -3047,11 +3029,8 @@ out: static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, u8 lun, int tag) { - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = lun; + __ufshcd_setup_cmd(lrbp, NULL, lun, tag); lrbp->intr_cmd = true; /* No interrupt aggregation */ - ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; } @@ -3083,7 +3062,6 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { u32 mask; - unsigned long flags; int err; if (hba->mcq_enabled) { @@ -3103,9 +3081,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) mask = 1U << task_tag; /* clear outstanding transaction before retry */ - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. @@ -4288,7 +4264,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) reenable_intr = true; } spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, cmd, false); + ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -4539,6 +4515,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) return -EINVAL; } + if (pwr_info->lane_rx != pwr_info->lane_tx) { + dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. @@ -4822,51 +4806,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop); */ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { - int retry_outer = 3; - int retry_inner; + int retry; -start: - if (ufshcd_is_hba_active(hba)) - /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); + for (retry = 3; retry > 0; retry--) { + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); - /* UniPro link is disabled at this point */ - ufshcd_set_link_off(hba); + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); - ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); - /* start controller initialization sequence */ - ufshcd_hba_start(hba); + /* start controller initialization sequence */ + ufshcd_hba_start(hba); - /* - * To initialize a UFS host controller HCE bit must be set to 1. - * During initialization the HCE bit value changes from 1->0->1. - * When the host controller completes initialization sequence - * it sets the value of HCE bit to 1. The same HCE bit is read back - * to check if the controller has completed initialization sequence. - * So without this delay the value HCE = 1, set in the previous - * instruction might be read back. - * This delay can be changed based on the controller. - */ - ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); - /* wait for the host controller to complete initialization */ - retry_inner = 50; - while (!ufshcd_is_hba_active(hba)) { - if (retry_inner) { - retry_inner--; - } else { - dev_err(hba->dev, - "Controller enable failed\n"); - if (retry_outer) { - retry_outer--; - goto start; - } - return -EIO; - } - usleep_range(1000, 1100); + /* wait for the host controller to complete initialization */ + if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, + CONTROLLER_ENABLE, 1000, 50)) + break; + + dev_err(hba->dev, "Enabling the controller failed\n"); } + if (!retry) + return -EIO; + /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); @@ -5258,6 +5235,9 @@ static int ufshcd_device_configure(struct scsi_device *sdev, */ sdev->silence_suspend = 1; + if (hba->vops && hba->vops->config_scsi_dev) + hba->vops->config_scsi_dev(sdev); + ufshcd_crypto_register(hba, q); return 0; @@ -5478,32 +5458,37 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; + struct uic_command *cmd; spin_lock(hba->host->host_lock); + cmd = hba->active_uic_cmd; + if (WARN_ON_ONCE(!cmd)) + goto unlock; + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); - if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { - hba->active_uic_cmd->argument2 |= - ufshcd_get_uic_cmd_result(hba); - hba->active_uic_cmd->argument3 = - ufshcd_get_dme_attr_val(hba); + if (intr_status & UIC_COMMAND_COMPL) { + cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); + cmd->argument3 = ufshcd_get_dme_attr_val(hba); if (!hba->uic_async_done) - hba->active_uic_cmd->cmd_active = 0; - complete(&hba->active_uic_cmd->done); + cmd->cmd_active = 0; + complete(&cmd->done); retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { - hba->active_uic_cmd->cmd_active = 0; + if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) { + cmd->cmd_active = 0; complete(hba->uic_async_done); retval = IRQ_HANDLED; } if (retval == IRQ_HANDLED) - ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, - UFS_CMD_COMP); + ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); + +unlock: spin_unlock(hba->host->host_lock); + return retval; } @@ -6196,12 +6181,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) u32 status = 0; hba = container_of(work, struct ufs_hba, eeh_work); - ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", __func__, err); - goto out; + return; } trace_ufshcd_exception_event(dev_name(hba->dev), status); @@ -6213,8 +6197,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_temp_exception_event_handler(hba, status); ufs_debugfs_exception_event(hba, status); -out: - ufshcd_scsi_unblock_requests(hba); } /* Complete requests that have door-bell cleared */ @@ -6380,15 +6362,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } - ufshcd_scsi_block_requests(hba); /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ - blk_mq_wait_quiesce_done(&hba->host->tag_set); + blk_mq_quiesce_tagset(&hba->host->tag_set); cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); @@ -7002,14 +6983,11 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; u32 mask = 1 << tag; - unsigned long flags; if (!test_bit(tag, &hba->outstanding_tasks)) goto out; - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utmrl_clear(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, @@ -7052,12 +7030,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); - /* send command to the controller */ __set_bit(task_tag, &hba->outstanding_tasks); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); + /* send command to the controller */ + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); /* wait until the task management command is completed */ @@ -7473,10 +7452,9 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) { struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - int err = 0; + int err; int poll_cnt; u8 resp = 0xF; - u32 reg; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, @@ -7491,46 +7469,27 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) * cmd not pending in the device, check if it is * in transition. */ - dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + dev_info( + hba->dev, + "%s: cmd with tag %d not pending in the device.\n", __func__, tag); - if (hba->mcq_enabled) { - /* MCQ mode */ - if (ufshcd_cmd_inflight(lrbp->cmd)) { - /* sleep for max. 200us same delay as in SDB mode */ - usleep_range(100, 200); - continue; - } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", - __func__, tag); - goto out; - } - - /* Single Doorbell Mode */ - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - if (reg & (1 << tag)) { - /* sleep for max. 200us to stabilize */ - usleep_range(100, 200); - continue; + if (!ufshcd_cmd_inflight(lrbp->cmd)) { + dev_info(hba->dev, + "%s: cmd with tag=%d completed.\n", + __func__, tag); + return 0; } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", - __func__, tag); - goto out; + usleep_range(100, 200); } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", __func__, tag, err); - if (!err) - err = resp; /* service response error */ - goto out; + return err ? : resp; } } - if (!poll_cnt) { - err = -EBUSY; - goto out; - } + if (!poll_cnt) + return -EBUSY; err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_ABORT_TASK, &resp); @@ -7540,7 +7499,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", __func__, tag, err); } - goto out; + return err; } err = ufshcd_clear_cmd(hba, tag); @@ -7548,7 +7507,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); -out: return err; } @@ -7672,6 +7630,29 @@ release: } /** + * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result. + * @hba: UFS host controller instance. + * @probe_start: time when the ufshcd_probe_hba() call started. + * @ret: ufshcd_probe_hba() return value. + */ +static void ufshcd_process_probe_result(struct ufs_hba *hba, + ktime_t probe_start, int ret) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), probe_start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); +} + +/** * ufshcd_host_reset_and_restore - reset and restore host controller * @hba: per-adapter instance * @@ -7700,8 +7681,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) err = ufshcd_hba_enable(hba); /* Establish the link again and restore the device */ - if (!err) - err = ufshcd_probe_hba(hba, false); + if (!err) { + ktime_t probe_start = ktime_get(); + + err = ufshcd_device_init(hba, /*init_dev_params=*/false); + if (!err) + err = ufshcd_probe_hba(hba, false); + ufshcd_process_probe_result(hba, probe_start, err); + } if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); @@ -8727,10 +8714,43 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) hba->nutrs); } +static int ufshcd_post_device_init(struct ufs_hba *hba) +{ + int ret; + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + ufshcd_set_timestamp_attr(hba); + + if (!hba->max_pwr_info.is_valid) + return 0; + + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + /* Gear up to HS gear. */ + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + return ret; + } + + return 0; +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; - struct Scsi_Host *host = hba->host; + + WARN_ON_ONCE(!hba->scsi_host_added); hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -8771,56 +8791,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ret = ufshcd_device_params_init(hba); if (ret) return ret; - if (is_mcq_supported(hba) && !hba->scsi_host_added) { - ufshcd_mcq_enable(hba); - ret = ufshcd_alloc_mcq(hba); - if (!ret) { - ufshcd_config_mcq(hba); - } else { - /* Continue with SDB mode */ - ufshcd_mcq_disable(hba); - use_mcq_mode = false; - dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", - ret); - } - ret = scsi_add_host(host, hba->dev); - if (ret) { - dev_err(hba->dev, "scsi_add_host failed\n"); - return ret; - } - hba->scsi_host_added = true; - } else if (is_mcq_supported(hba)) { - /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ + if (is_mcq_supported(hba) && + hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { ufshcd_config_mcq(hba); ufshcd_mcq_enable(hba); } } - ufshcd_tune_unipro_params(hba); - - /* UFS device is also active now */ - ufshcd_set_ufs_dev_active(hba); - ufshcd_force_reset_auto_bkops(hba); - - ufshcd_set_timestamp_attr(hba); - - /* Gear up to HS gear if supported */ - if (hba->max_pwr_info.is_valid) { - /* - * Set the right value to bRefClkFreq before attempting to - * switch to HS gears. - */ - if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) - ufshcd_set_dev_ref_clk(hba); - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - if (ret) { - dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", - __func__, ret); - return ret; - } - } - - return 0; + return ufshcd_post_device_init(hba); } /** @@ -8834,14 +8812,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) */ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) { - ktime_t start = ktime_get(); - unsigned long flags; int ret; - ret = ufshcd_device_init(hba, init_dev_params); - if (ret) - goto out; - if (!hba->pm_op_in_progress && (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ @@ -8854,13 +8826,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto out; + return ret; } /* Reinit the device */ ret = ufshcd_device_init(hba, init_dev_params); if (ret) - goto out; + return ret; } ufshcd_print_pwr_info(hba); @@ -8880,18 +8852,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) ufshcd_write_ee_control(hba); ufshcd_configure_auto_hibern8(hba); -out: - spin_lock_irqsave(hba->host->host_lock, flags); - if (ret) - hba->ufshcd_state = UFSHCD_STATE_ERROR; - else if (hba->ufshcd_state == UFSHCD_STATE_RESET) - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - spin_unlock_irqrestore(hba->host->host_lock, flags); - - trace_ufshcd_init(dev_name(hba->dev), ret, - ktime_to_us(ktime_sub(ktime_get(), start)), - hba->curr_dev_pwr_mode, hba->uic_link_state); - return ret; + return 0; } /** @@ -8902,11 +8863,14 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + ktime_t probe_start; int ret; down(&hba->host_sem); /* Initialize hba, detect and initialize UFS device */ + probe_start = ktime_get(); ret = ufshcd_probe_hba(hba, true); + ufshcd_process_probe_result(hba, probe_start, ret); up(&hba->host_sem); if (ret) goto out; @@ -10309,6 +10273,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); */ static int ufshcd_set_dma_mask(struct ufs_hba *hba) { + if (hba->vops && hba->vops->set_dma_mask) + return hba->vops->set_dma_mask(hba); if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) return 0; @@ -10372,6 +10338,74 @@ static const struct blk_mq_ops ufshcd_tmf_ops = { .queue_rq = ufshcd_queue_tmf, }; +static int ufshcd_add_scsi_host(struct ufs_hba *hba) +{ + int err; + + if (is_mcq_supported(hba)) { + ufshcd_mcq_enable(hba); + err = ufshcd_alloc_mcq(hba); + if (!err) { + ufshcd_config_mcq(hba); + } else { + /* Continue with SDB mode */ + ufshcd_mcq_disable(hba); + use_mcq_mode = false; + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", + err); + } + } + if (!is_mcq_supported(hba) && !hba->lsdb_sup) { + dev_err(hba->dev, + "%s: failed to initialize (legacy doorbell mode not supported)\n", + __func__); + return -EINVAL; + } + + err = scsi_add_host(hba->host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + return err; + } + hba->scsi_host_added = true; + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto remove_scsi_host; + hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + return 0; + +free_tmf_queue: + blk_mq_destroy_queue(hba->tmf_queue); + blk_put_queue(hba->tmf_queue); + +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); + +remove_scsi_host: + if (hba->scsi_host_added) + scsi_remove_host(hba->host); + + return err; +} + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -10503,42 +10537,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->is_irq_enabled = true; } - if (!is_mcq_supported(hba)) { - if (!hba->lsdb_sup) { - dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n", - __func__); - err = -EINVAL; - goto out_disable; - } - err = scsi_add_host(host, hba->dev); - if (err) { - dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; - } - hba->scsi_host_added = true; - } - - hba->tmf_tag_set = (struct blk_mq_tag_set) { - .nr_hw_queues = 1, - .queue_depth = hba->nutmrs, - .ops = &ufshcd_tmf_ops, - .flags = BLK_MQ_F_NO_SCHED, - }; - err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); - if (err < 0) - goto out_remove_scsi_host; - hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); - if (IS_ERR(hba->tmf_queue)) { - err = PTR_ERR(hba->tmf_queue); - goto free_tmf_tag_set; - } - hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, - sizeof(*hba->tmf_rqs), GFP_KERNEL); - if (!hba->tmf_rqs) { - err = -ENOMEM; - goto free_tmf_queue; - } - /* Reset the attached device */ ufshcd_device_reset(hba); @@ -10550,7 +10548,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto free_tmf_queue; + goto out_disable; } /* @@ -10576,7 +10574,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); - atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* * We are assuming that device wasn't put in sleep/power-down * state exclusively during the boot stage before kernel. @@ -10585,6 +10583,49 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ ufshcd_set_ufs_dev_active(hba); + /* Initialize hba, detect and initialize UFS device */ + ktime_t probe_start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + err = ufshcd_link_startup(hba); + if (err) + goto out_disable; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto initialized; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + err = ufshcd_verify_dev_init(hba); + if (err) + goto out_disable; + + /* Initiate UFS initialization, and waiting until completion */ + err = ufshcd_complete_dev_init(hba); + if (err) + goto out_disable; + + err = ufshcd_device_params_init(hba); + if (err) + goto out_disable; + + err = ufshcd_post_device_init(hba); + +initialized: + ufshcd_process_probe_result(hba, probe_start, err); + if (err) + goto out_disable; + + err = ufshcd_add_scsi_host(hba); + if (err) + goto out_disable; + async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); @@ -10592,14 +10633,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_pm_qos_init(hba); return 0; -free_tmf_queue: - blk_mq_destroy_queue(hba->tmf_queue); - blk_put_queue(hba->tmf_queue); -free_tmf_tag_set: - blk_mq_free_tag_set(&hba->tmf_tag_set); -out_remove_scsi_host: - if (hba->scsi_host_added) - scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c index 876781fd6861..0167d8bef71a 100644 --- a/drivers/ufs/host/tc-dwc-g210-pci.c +++ b/drivers/ufs/host/tc-dwc-g210-pci.c @@ -80,14 +80,12 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); - if (err < 0) { + mmio_base = pcim_iomap_region(pdev, 0, UFSHCD); + if (IS_ERR(mmio_base)) { dev_err(&pdev->dev, "request and iomap failed\n"); - return err; + return PTR_ERR(mmio_base); } - mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 5867e6338562..6548f7a8562f 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -48,6 +48,8 @@ #define HCI_UNIPRO_APB_CLK_CTRL 0x68 #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define WLU_EN BIT(31) +#define WLU_BURST_LEN(x) ((x) << 27 | ((x) & 0xF)) #define HCI_GPIO_OUT 0x70 #define HCI_ERR_EN_PA_LAYER 0x78 #define HCI_ERR_EN_DL_LAYER 0x7C @@ -74,6 +76,10 @@ #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ UNIPRO_PCLK_CTRL_EN |\ UNIPRO_MCLK_CTRL_EN) + +#define HCI_IOP_ACG_DISABLE 0x100 +#define HCI_IOP_ACG_DISABLE_EN BIT(0) + /* Device fatal error */ #define DFES_ERR_EN BIT(31) #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ @@ -198,15 +204,8 @@ static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) exynos_ufs_ctrl_clkstop(ufs, false); } -static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) -{ - return 0; -} - -static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +static int exynos_ufs_shareability(struct exynos_ufs *ufs) { - struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - /* IO Coherency setting */ if (ufs->sysreg) { return regmap_update_bits(ufs->sysreg, @@ -214,11 +213,32 @@ static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) UFS_SHARABLE, UFS_SHARABLE); } - attr->tx_dif_p_nsec = 3200000; - return 0; } +static int gs101_ufs_drv_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 reg; + + /* Enable WriteBooster */ + hba->caps |= UFSHCD_CAP_WB_EN; + + /* Enable clock gating and hibern8 */ + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + + /* set ACG to be controlled by UFS_ACG_DISABLE */ + reg = hci_readl(ufs, HCI_IOP_ACG_DISABLE); + hci_writel(ufs, reg & (~HCI_IOP_ACG_DISABLE_EN), HCI_IOP_ACG_DISABLE); + + return exynos_ufs_shareability(ufs); +} + +static int exynosauto_ufs_drv_init(struct exynos_ufs *ufs) +{ + return exynos_ufs_shareability(ufs); +} + static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; @@ -546,6 +566,9 @@ static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR) + return; + t_cfg->tx_linereset_p = exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); t_cfg->tx_linereset_n = @@ -724,6 +747,9 @@ static void exynos_ufs_config_smu(struct exynos_ufs *ufs) { u32 reg, val; + if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE) + return; + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); /* make encryption disabled by default */ @@ -771,6 +797,21 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, exynos_ufs_disable_ov_tm(hba); } +#define UFS_HW_VER_MAJOR_MASK GENMASK(15, 8) + +static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba) +{ + u8 major; + + major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, hba->ufs_version); + + if (major >= 3) + return UFS_HS_G4; + + /* Default is HS-G3 */ + return UFS_HS_G3; +} + static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -788,6 +829,10 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, ufshcd_init_host_params(&host_params); + /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ + host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); + host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_err("%s: failed to determine capabilities\n", __func__); @@ -1429,7 +1474,7 @@ static int exynos_ufs_init(struct ufs_hba *hba) exynos_ufs_fmp_init(hba, ufs); if (ufs->drv_data->drv_init) { - ret = ufs->drv_data->drv_init(dev, ufs); + ret = ufs->drv_data->drv_init(ufs); if (ret) { dev_err(dev, "failed to init drv-data\n"); goto out; @@ -1440,8 +1485,8 @@ static int exynos_ufs_init(struct ufs_hba *hba) if (ret) goto out; exynos_ufs_specify_phy_time_attr(ufs); - if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) - exynos_ufs_config_smu(ufs); + + exynos_ufs_config_smu(ufs); hba->host->dma_alignment = DATA_UNIT_SIZE - 1; return 0; @@ -1484,12 +1529,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); } -static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - if (!enter) { + if (cmd == UIC_CMD_DME_HIBER_EXIT) { if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) exynos_ufs_disable_auto_ctrl_hcc(ufs); exynos_ufs_ungate_clks(ufs); @@ -1517,30 +1562,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) } } -static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); - if (!enter) { - u32 cur_mode = 0; - u32 pwrmode; - - if (ufshcd_is_hs_mode(&ufs->dev_req_params)) - pwrmode = FAST_MODE; - else - pwrmode = SLOW_MODE; - - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); - if (cur_mode != (pwrmode << 4 | pwrmode)) { - dev_warn(hba->dev, "%s: power mode change\n", __func__); - hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; - hba->pwr_info.pwr_tx = cur_mode & 0xf; - ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - } - - if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) - exynos_ufs_establish_connt(ufs); - } else { + if (cmd == UIC_CMD_DME_HIBER_ENTER) { ufs->entry_hibern8_t = ktime_get(); exynos_ufs_gate_clks(ufs); if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) @@ -1627,15 +1653,15 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, } static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, - enum uic_cmd_dme enter, + enum uic_cmd_dme cmd, enum ufs_notify_change_status notify) { switch ((u8)notify) { case PRE_CHANGE: - exynos_ufs_pre_hibern8(hba, enter); + exynos_ufs_pre_hibern8(hba, cmd); break; case POST_CHANGE: - exynos_ufs_post_hibern8(hba, enter); + exynos_ufs_post_hibern8(hba, cmd); break; } } @@ -1891,6 +1917,12 @@ static int gs101_ufs_post_link(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; + /* + * Enable Write Line Unique. This field has to be 0x3 + * to support Write Line Unique transaction on gs101. + */ + hci_writel(ufs, WLU_EN | WLU_BURST_LEN(3), HCI_AXIDMA_RWDATA_BURST_LEN); + exynos_ufs_enable_dbg_mode(hba); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0x3e8); exynos_ufs_disable_dbg_mode(hba); @@ -2036,7 +2068,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, - .drv_init = exynos7_ufs_drv_init, .pre_link = exynos7_ufs_pre_link, .post_link = exynos7_ufs_post_link, .pre_pwr_change = exynos7_ufs_pre_pwr_change, @@ -2045,26 +2076,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { static struct exynos_ufs_uic_attr gs101_uic_attr = { .tx_trailingclks = 0xff, - .tx_dif_p_nsec = 3000000, /* unit: ns */ - .tx_dif_n_nsec = 1000000, /* unit: ns */ - .tx_high_z_cnt_nsec = 20000, /* unit: ns */ - .tx_base_unit_nsec = 100000, /* unit: ns */ - .tx_gran_unit_nsec = 4000, /* unit: ns */ - .tx_sleep_cnt = 1000, /* unit: ns */ - .tx_min_activatetime = 0xa, - .rx_filler_enable = 0x2, - .rx_dif_p_nsec = 1000000, /* unit: ns */ - .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ - .rx_base_unit_nsec = 100000, /* unit: ns */ - .rx_gran_unit_nsec = 4000, /* unit: ns */ - .rx_sleep_cnt = 1280, /* unit: ns */ - .rx_stall_cnt = 320, /* unit: ns */ - .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), - .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), - .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), .pa_dbg_opt_suite1_val = 0x90913C1C, .pa_dbg_opt_suite1_off = PA_GS101_DBG_OPTION_SUITE1, .pa_dbg_opt_suite2_val = 0xE01C115F, @@ -2122,11 +2133,10 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = { UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, - .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | - EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_UFSPR_SECURE | EXYNOS_UFS_OPT_TIMER_TICK_SELECT, - .drv_init = exynosauto_ufs_drv_init, + .drv_init = gs101_ufs_drv_init, .pre_link = gs101_ufs_pre_link, .post_link = gs101_ufs_post_link, .pre_pwr_change = gs101_ufs_pre_pwr_change, diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index 1646c4a9bb08..9670dc138d1e 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -182,7 +182,7 @@ struct exynos_ufs_drv_data { unsigned int quirks; unsigned int opts; /* SoC's specific operations */ - int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*drv_init)(struct exynos_ufs *ufs); int (*pre_link)(struct exynos_ufs *ufs); int (*post_link)(struct exynos_ufs *ufs); int (*pre_pwr_change)(struct exynos_ufs *ufs, diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 9a5919434c4e..06ab1e5e8b6f 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -1780,6 +1780,15 @@ static int ufs_mtk_config_esi(struct ufs_hba *hba) return ufs_mtk_config_mcq(hba, true); } +static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev) +{ + struct ufs_hba *hba = shost_priv(sdev->host); + + dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun); + if (sdev->lun == 2) + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue); +} + /* * struct ufs_hba_mtk_vops - UFS MTK specific variant operations * @@ -1809,6 +1818,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .op_runtime_config = ufs_mtk_op_runtime_config, .mcq_config_resource = ufs_mtk_mcq_config_resource, .config_esi = ufs_mtk_config_esi, + .config_scsi_dev = ufs_mtk_config_scsi_dev, }; /** diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index ecdfff2456e3..3b592492e152 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -828,12 +828,28 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); - if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) - hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; - return err; } +/* UFS device-specific quirks */ +static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, + { .wmanufacturerid = UFS_VENDOR_WDC, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + {} +}; + +static void ufs_qcom_fixup_dev_quirks(struct ufs_hba *hba) +{ + ufshcd_fixup_dev_quirks(hba, ufs_qcom_dev_fixups); +} + static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { return ufshci_version(2, 0); @@ -858,7 +874,8 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->hw_ver.major > 0x3) hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; - if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc")) + if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc") || + of_device_is_compatible(hba->dev->of_node, "qcom,sm8650-ufshc")) hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; } @@ -1801,6 +1818,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .link_startup_notify = ufs_qcom_link_startup_notify, .pwr_change_notify = ufs_qcom_pwr_change_notify, .apply_dev_quirks = ufs_qcom_apply_dev_quirks, + .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 8711e5cbc968..3ff97112e1f6 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba) return -ENOMEM; ufshcd_set_variant(hba, priv); - hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO; return 0; } +static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) +{ + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + static const struct ufs_hba_variant_ops ufs_renesas_vops = { .name = "renesas", .init = ufs_renesas_init, + .set_dma_mask = ufs_renesas_set_dma_mask, .setup_clocks = ufs_renesas_setup_clocks, .hce_enable_notify = ufs_renesas_hce_enable_notify, .dbg_register_dump = ufs_renesas_dbg_register_dump, diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 54e0cc0653a2..ea39c5d5b8cf 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -588,14 +588,12 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); - if (err < 0) { + mmio_base = pcim_iomap_region(pdev, 0, UFSHCD); + if (IS_ERR(mmio_base)) { dev_err(&pdev->dev, "request and iomap failed\n"); - return err; + return PTR_ERR(mmio_base); } - mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c index cf719307b3f6..60b2766a69bf 100644 --- a/drivers/usb/typec/tcpm/wcove.c +++ b/drivers/usb/typec/tcpm/wcove.c @@ -621,10 +621,6 @@ static int wcove_typec_probe(struct platform_device *pdev) if (irq < 0) return irq; - irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq); - if (irq < 0) - return irq; - ret = guid_parse(WCOVE_DSM_UUID, &wcove->guid); if (ret) return ret; diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 7d0c83b5b071..8455f08f5d40 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -368,7 +368,6 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr unsigned long lgcd = 0; int log_entity_size; unsigned long size; - u64 start = 0; int err; struct page *pg; unsigned int nsg; @@ -379,10 +378,9 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr struct device *dma = mvdev->vdev.dma_dev; for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); - map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { + map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { size = maplen(map, mr); lgcd = gcd(lgcd, size); - start += size; } log_entity_size = ilog2(lgcd); diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 0d632ba5d2a3..451c639299eb 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -486,31 +486,11 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; } -static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, - struct hisi_acc_vf_migration_file *migf) +static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) { - struct acc_vf_data *vf_data = &migf->vf_data; - struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; struct device *dev = &vf_qm->pdev->dev; int ret; - if (unlikely(qm_wait_dev_not_ready(vf_qm))) { - /* Update state and return with match data */ - vf_data->vf_qm_state = QM_NOT_READY; - hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - migf->total_length = QM_MATCH_SIZE; - return 0; - } - - vf_data->vf_qm_state = QM_READY; - hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - - ret = vf_qm_cache_wb(vf_qm); - if (ret) { - dev_err(dev, "failed to writeback QM Cache!\n"); - return ret; - } - ret = qm_get_regs(vf_qm, vf_data); if (ret) return -EINVAL; @@ -536,6 +516,38 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; } + return 0; +} + +static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &vf_qm->pdev->dev; + int ret; + + if (unlikely(qm_wait_dev_not_ready(vf_qm))) { + /* Update state and return with match data */ + vf_data->vf_qm_state = QM_NOT_READY; + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + migf->total_length = QM_MATCH_SIZE; + return 0; + } + + vf_data->vf_qm_state = QM_READY; + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM Cache!\n"); + return ret; + } + + ret = vf_qm_read_data(vf_qm, vf_data); + if (ret) + return -EINVAL; + migf->total_length = sizeof(struct acc_vf_data); return 0; } @@ -615,21 +627,43 @@ static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf) mutex_unlock(&migf->lock); } +static void +hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *src_migf) +{ + struct hisi_acc_vf_migration_file *dst_migf = hisi_acc_vdev->debug_migf; + + if (!dst_migf) + return; + + dst_migf->total_length = src_migf->total_length; + memcpy(&dst_migf->vf_data, &src_migf->vf_data, + sizeof(struct acc_vf_data)); +} + static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev) { if (hisi_acc_vdev->resuming_migf) { + hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->resuming_migf); hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf); fput(hisi_acc_vdev->resuming_migf->filp); hisi_acc_vdev->resuming_migf = NULL; } if (hisi_acc_vdev->saving_migf) { + hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->saving_migf); hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf); fput(hisi_acc_vdev->saving_migf->filp); hisi_acc_vdev->saving_migf = NULL; } } +static struct hisi_acc_vf_core_device *hisi_acc_get_vf_dev(struct vfio_device *vdev) +{ + return container_of(vdev, struct hisi_acc_vf_core_device, + core_device.vdev); +} + static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev) { hisi_acc_vdev->vf_qm_state = QM_NOT_READY; @@ -1031,8 +1065,7 @@ static struct file * hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev, enum vfio_device_mig_state new_state) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); enum vfio_device_mig_state next_state; struct file *res = NULL; int ret; @@ -1073,8 +1106,7 @@ static int hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev, enum vfio_device_mig_state *curr_state) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); mutex_lock(&hisi_acc_vdev->state_mutex); *curr_state = hisi_acc_vdev->mig_state; @@ -1276,10 +1308,132 @@ static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int return vfio_pci_core_ioctl(core_vdev, cmd, arg); } +static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + int ret; + + lockdep_assert_held(&hisi_acc_vdev->open_mutex); + /* + * When the device is not opened, the io_base is not mapped. + * The driver cannot perform device read and write operations. + */ + if (!hisi_acc_vdev->dev_opened) { + seq_puts(seq, "device not opened!\n"); + return -EINVAL; + } + + ret = qm_wait_dev_not_ready(vf_qm); + if (ret) { + seq_puts(seq, "VF device not ready!\n"); + return -EBUSY; + } + + return 0; +} + +static int hisi_acc_vf_debug_cmd(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + u64 value; + int ret; + + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_debug_check(seq, vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + return ret; + } + + value = readl(vf_qm->io_base + QM_MB_CMD_SEND_BASE); + if (value == QM_MB_CMD_NOT_READY) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + seq_puts(seq, "mailbox cmd channel not ready!\n"); + return -EINVAL; + } + mutex_unlock(&hisi_acc_vdev->open_mutex); + seq_puts(seq, "mailbox cmd channel ready!\n"); + + return 0; +} + +static int hisi_acc_vf_dev_read(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + size_t vf_data_sz = offsetofend(struct acc_vf_data, padding); + struct acc_vf_data *vf_data; + int ret; + + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_debug_check(seq, vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + return ret; + } + + mutex_lock(&hisi_acc_vdev->state_mutex); + vf_data = kzalloc(sizeof(*vf_data), GFP_KERNEL); + if (!vf_data) { + ret = -ENOMEM; + goto mutex_release; + } + + vf_data->vf_qm_state = hisi_acc_vdev->vf_qm_state; + ret = vf_qm_read_data(&hisi_acc_vdev->vf_qm, vf_data); + if (ret) + goto migf_err; + + seq_hex_dump(seq, "Dev Data:", DUMP_PREFIX_OFFSET, 16, 1, + (const void *)vf_data, vf_data_sz, false); + + seq_printf(seq, + "guest driver load: %u\n" + "data size: %lu\n", + hisi_acc_vdev->vf_qm_state, + sizeof(struct acc_vf_data)); + +migf_err: + kfree(vf_data); +mutex_release: + mutex_unlock(&hisi_acc_vdev->state_mutex); + mutex_unlock(&hisi_acc_vdev->open_mutex); + + return ret; +} + +static int hisi_acc_vf_migf_read(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + size_t vf_data_sz = offsetofend(struct acc_vf_data, padding); + struct hisi_acc_vf_migration_file *debug_migf = hisi_acc_vdev->debug_migf; + + /* Check whether the live migration operation has been performed */ + if (debug_migf->total_length < QM_MATCH_SIZE) { + seq_puts(seq, "device not migrated!\n"); + return -EAGAIN; + } + + seq_hex_dump(seq, "Mig Data:", DUMP_PREFIX_OFFSET, 16, 1, + (const void *)&debug_migf->vf_data, vf_data_sz, false); + seq_printf(seq, "migrate data length: %lu\n", debug_migf->total_length); + + return 0; +} + static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device; int ret; @@ -1288,12 +1442,16 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) return ret; if (core_vdev->mig_ops) { + mutex_lock(&hisi_acc_vdev->open_mutex); ret = hisi_acc_vf_qm_init(hisi_acc_vdev); if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); vfio_pci_core_disable(vdev); return ret; } hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + hisi_acc_vdev->dev_opened = true; + mutex_unlock(&hisi_acc_vdev->open_mutex); } vfio_pci_core_finish_enable(vdev); @@ -1302,11 +1460,13 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + mutex_lock(&hisi_acc_vdev->open_mutex); + hisi_acc_vdev->dev_opened = false; iounmap(vf_qm->io_base); + mutex_unlock(&hisi_acc_vdev->open_mutex); vfio_pci_core_close_device(core_vdev); } @@ -1318,8 +1478,7 @@ static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = { static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct pci_dev *pdev = to_pci_dev(core_vdev->dev); struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev); @@ -1327,6 +1486,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) hisi_acc_vdev->pf_qm = pf_qm; hisi_acc_vdev->vf_dev = pdev; mutex_init(&hisi_acc_vdev->state_mutex); + mutex_init(&hisi_acc_vdev->open_mutex); core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY; core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops; @@ -1372,6 +1532,47 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { .detach_ioas = vfio_iommufd_physical_detach_ioas, }; +static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct vfio_device *vdev = &hisi_acc_vdev->core_device.vdev; + struct hisi_acc_vf_migration_file *migf; + struct dentry *vfio_dev_migration; + struct dentry *vfio_hisi_acc; + struct device *dev = vdev->dev; + + if (!debugfs_initialized() || + !IS_ENABLED(CONFIG_VFIO_DEBUGFS)) + return; + + if (vdev->ops != &hisi_acc_vfio_pci_migrn_ops) + return; + + vfio_dev_migration = debugfs_lookup("migration", vdev->debug_root); + if (!vfio_dev_migration) { + dev_err(dev, "failed to lookup migration debugfs file!\n"); + return; + } + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return; + hisi_acc_vdev->debug_migf = migf; + + vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration); + debugfs_create_devm_seqfile(dev, "dev_data", vfio_hisi_acc, + hisi_acc_vf_dev_read); + debugfs_create_devm_seqfile(dev, "migf_data", vfio_hisi_acc, + hisi_acc_vf_migf_read); + debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc, + hisi_acc_vf_debug_cmd); +} + +static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + kfree(hisi_acc_vdev->debug_migf); + hisi_acc_vdev->debug_migf = NULL; +} + static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_acc_vf_core_device *hisi_acc_vdev; @@ -1398,6 +1599,8 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device); if (ret) goto out_put_vdev; + + hisi_acc_vfio_debug_init(hisi_acc_vdev); return 0; out_put_vdev: @@ -1410,6 +1613,7 @@ static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev) struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev); vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device); + hisi_acc_vf_debugfs_exit(hisi_acc_vdev); vfio_put_device(&hisi_acc_vdev->core_device.vdev); } diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 5bab46602fad..245d7537b2bc 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -32,6 +32,7 @@ #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) #define QM_SQC_VFT_NUM_SHIFT_V2 45 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) +#define QM_MB_CMD_NOT_READY 0xffffffff /* RW regs */ #define QM_REGS_MAX_LEN 7 @@ -99,6 +100,13 @@ struct hisi_acc_vf_migration_file { struct hisi_acc_vf_core_device { struct vfio_pci_core_device core_device; u8 match_done; + /* + * io_base is only valid when dev_opened is true, + * which is protected by open_mutex. + */ + bool dev_opened; + /* Ensure the accuracy of dev_opened operation */ + struct mutex open_mutex; /* For migration state */ struct mutex state_mutex; @@ -107,9 +115,20 @@ struct hisi_acc_vf_core_device { struct pci_dev *vf_dev; struct hisi_qm *pf_qm; struct hisi_qm vf_qm; + /* + * vf_qm_state represents the QM_VF_STATE register value. + * It is set by Guest driver for the ACC VF dev indicating + * the driver has loaded and configured the dev correctly. + */ u32 vf_qm_state; int vf_id; struct hisi_acc_vf_migration_file *resuming_migf; struct hisi_acc_vf_migration_file *saving_migf; + + /* + * It holds migration data corresponding to the last migration + * and is used by the debugfs interface to report it. + */ + struct hisi_acc_vf_migration_file *debug_migf; }; #endif /* HISI_ACC_VFIO_PCI_H */ diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 41a4b0cf4297..7527e277c898 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -423,6 +423,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, unsigned long filled; unsigned int to_fill; int ret; + int i; to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list)); page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT); @@ -443,7 +444,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, GFP_KERNEL_ACCOUNT); if (ret) - goto err; + goto err_append; buf->allocated_length += filled * PAGE_SIZE; /* clean input for another bulk allocation */ memset(page_list, 0, filled * sizeof(*page_list)); @@ -454,6 +455,9 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, kvfree(page_list); return 0; +err_append: + for (i = filled - 1; i >= 0; i--) + __free_page(page_list[i]); err: kvfree(page_list); return ret; diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 242c23eef452..8833e60d42f5 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -640,14 +640,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track) O_RDONLY); if (IS_ERR(migf->filp)) { ret = PTR_ERR(migf->filp); - goto end; + kfree(migf); + return ERR_PTR(ret); } migf->mvdev = mvdev; - ret = mlx5vf_cmd_alloc_pd(migf); - if (ret) - goto out_free; - stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); init_waitqueue_head(&migf->poll_wait); @@ -663,6 +660,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track) INIT_LIST_HEAD(&migf->buf_list); INIT_LIST_HEAD(&migf->avail_list); spin_lock_init(&migf->list_lock); + + ret = mlx5vf_cmd_alloc_pd(migf); + if (ret) + goto out; + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, &full_size, 0); if (ret) goto out_pd; @@ -692,10 +694,8 @@ out_save: mlx5vf_free_data_buffer(buf); out_pd: mlx5fv_cmd_clean_migf_resources(migf); -out_free: +out: fput(migf->filp); -end: - kfree(migf); return ERR_PTR(ret); } @@ -1016,13 +1016,19 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) O_WRONLY); if (IS_ERR(migf->filp)) { ret = PTR_ERR(migf->filp); - goto end; + kfree(migf); + return ERR_PTR(ret); } + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); migf->mvdev = mvdev; ret = mlx5vf_cmd_alloc_pd(migf); if (ret) - goto out_free; + goto out; buf = mlx5vf_alloc_data_buffer(migf, 0, DMA_TO_DEVICE); if (IS_ERR(buf)) { @@ -1041,20 +1047,13 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) migf->buf_header[0] = buf; migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER; - stream_open(migf->filp->f_inode, migf->filp); - mutex_init(&migf->lock); - INIT_LIST_HEAD(&migf->buf_list); - INIT_LIST_HEAD(&migf->avail_list); - spin_lock_init(&migf->list_lock); return migf; out_buf: mlx5vf_free_data_buffer(migf->buf[0]); out_pd: mlx5vf_cmd_dealloc_pd(migf); -out_free: +out: fput(migf->filp); -end: - kfree(migf); return ERR_PTR(ret); } diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index a7fd018aa548..a467085038f0 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -866,6 +866,8 @@ static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = { { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) }, /* GH200 480GB */ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) }, + /* GH200 SKU */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) }, {} }; diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c index be3644ced17b..c78cb6de9390 100644 --- a/drivers/vfio/pci/qat/main.c +++ b/drivers/vfio/pci/qat/main.c @@ -304,7 +304,7 @@ static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf, offs = &filp->f_pos; if (*offs < 0 || - check_add_overflow((loff_t)len, *offs, &end)) + check_add_overflow(len, *offs, &end)) return -EOVERFLOW; if (end > mig_dev->state_size) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 97422aafaa7b..ea2745c1ac5e 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -313,6 +313,10 @@ static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos, return count; } +static struct perm_bits direct_ro_perms = { + .readfn = vfio_direct_config_read, +}; + /* Default capability regions to read-only, no-virtualization */ static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } @@ -1897,9 +1901,17 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user cap_start = *ppos; } else { if (*ppos >= PCI_CFG_SPACE_SIZE) { - WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); + /* + * We can get a cap_id that exceeds PCI_EXT_CAP_ID_MAX + * if we're hiding an unknown capability at the start + * of the extended capability list. Use default, ro + * access, which will virtualize the id and next values. + */ + if (cap_id > PCI_EXT_CAP_ID_MAX) + perm = &direct_ro_perms; + else + perm = &ecap_perms[cap_id]; - perm = &ecap_perms[cap_id]; cap_start = vfio_find_cap_start(vdev, *ppos); } else { WARN_ON(cap_id > PCI_CAP_ID_MAX); diff --git a/drivers/vfio/pci/virtio/Kconfig b/drivers/vfio/pci/virtio/Kconfig index bd80eca4a196..2770f7eb702c 100644 --- a/drivers/vfio/pci/virtio/Kconfig +++ b/drivers/vfio/pci/virtio/Kconfig @@ -1,15 +1,31 @@ # SPDX-License-Identifier: GPL-2.0-only config VIRTIO_VFIO_PCI - tristate "VFIO support for VIRTIO NET PCI devices" - depends on VIRTIO_PCI && VIRTIO_PCI_ADMIN_LEGACY - select VFIO_PCI_CORE - help - This provides support for exposing VIRTIO NET VF devices which support - legacy IO access, using the VFIO framework that can work with a legacy - virtio driver in the guest. - Based on PCIe spec, VFs do not support I/O Space. - As of that this driver emulates I/O BAR in software to let a VF be - seen as a transitional device by its users and let it work with - a legacy driver. - - If you don't know what to do here, say N. + tristate "VFIO support for VIRTIO NET PCI VF devices" + depends on VIRTIO_PCI + select VFIO_PCI_CORE + help + This provides migration support for VIRTIO NET PCI VF devices + using the VFIO framework. Migration support requires the + SR-IOV PF device to support specific VIRTIO extensions, + otherwise this driver provides no additional functionality + beyond vfio-pci. + + Migration support in this driver relies on dirty page tracking + provided by the IOMMU hardware and exposed through IOMMUFD, any + other use cases are dis-recommended. + + If you don't know what to do here, say N. + +config VIRTIO_VFIO_PCI_ADMIN_LEGACY + bool "Legacy I/O support for VIRTIO NET PCI VF devices" + depends on VIRTIO_VFIO_PCI && VIRTIO_PCI_ADMIN_LEGACY + default y + help + This extends the virtio-vfio-pci driver to support legacy I/O + access, allowing use of legacy virtio drivers with VIRTIO NET + PCI VF devices. Legacy I/O support requires the SR-IOV PF + device to support and enable specific VIRTIO extensions, + otherwise this driver provides no additional functionality + beyond vfio-pci. + + If you don't know what to do here, say N. diff --git a/drivers/vfio/pci/virtio/Makefile b/drivers/vfio/pci/virtio/Makefile index 7171105baf33..d9b0bb40d6b3 100644 --- a/drivers/vfio/pci/virtio/Makefile +++ b/drivers/vfio/pci/virtio/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio-vfio-pci.o -virtio-vfio-pci-y := main.o +virtio-vfio-pci-y := main.o migrate.o +virtio-vfio-pci-$(CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY) += legacy_io.o diff --git a/drivers/vfio/pci/virtio/common.h b/drivers/vfio/pci/virtio/common.h new file mode 100644 index 000000000000..c7d7e27af386 --- /dev/null +++ b/drivers/vfio/pci/virtio/common.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef VIRTIO_VFIO_COMMON_H +#define VIRTIO_VFIO_COMMON_H + +#include <linux/kernel.h> +#include <linux/virtio.h> +#include <linux/vfio_pci_core.h> +#include <linux/virtio_pci.h> + +enum virtiovf_migf_state { + VIRTIOVF_MIGF_STATE_ERROR = 1, + VIRTIOVF_MIGF_STATE_PRECOPY = 2, + VIRTIOVF_MIGF_STATE_COMPLETE = 3, +}; + +enum virtiovf_load_state { + VIRTIOVF_LOAD_STATE_READ_HEADER, + VIRTIOVF_LOAD_STATE_PREP_HEADER_DATA, + VIRTIOVF_LOAD_STATE_READ_HEADER_DATA, + VIRTIOVF_LOAD_STATE_PREP_CHUNK, + VIRTIOVF_LOAD_STATE_READ_CHUNK, + VIRTIOVF_LOAD_STATE_LOAD_CHUNK, +}; + +struct virtiovf_data_buffer { + struct sg_append_table table; + loff_t start_pos; + u64 length; + u64 allocated_length; + struct list_head buf_elm; + u8 include_header_object:1; + struct virtiovf_migration_file *migf; + /* Optimize virtiovf_get_migration_page() for sequential access */ + struct scatterlist *last_offset_sg; + unsigned int sg_last_entry; + unsigned long last_offset; +}; + +enum virtiovf_migf_header_flags { + VIRTIOVF_MIGF_HEADER_FLAGS_TAG_MANDATORY = 0, + VIRTIOVF_MIGF_HEADER_FLAGS_TAG_OPTIONAL = 1 << 0, +}; + +enum virtiovf_migf_header_tag { + VIRTIOVF_MIGF_HEADER_TAG_DEVICE_DATA = 0, +}; + +struct virtiovf_migration_header { + __le64 record_size; + /* For future use in case we may need to change the kernel protocol */ + __le32 flags; /* Use virtiovf_migf_header_flags */ + __le32 tag; /* Use virtiovf_migf_header_tag */ + __u8 data[]; /* Its size is given in the record_size */ +}; + +struct virtiovf_migration_file { + struct file *filp; + /* synchronize access to the file state */ + struct mutex lock; + loff_t max_pos; + u64 pre_copy_initial_bytes; + struct ratelimit_state pre_copy_rl_state; + u64 record_size; + u32 record_tag; + u8 has_obj_id:1; + u32 obj_id; + enum virtiovf_migf_state state; + enum virtiovf_load_state load_state; + /* synchronize access to the lists */ + spinlock_t list_lock; + struct list_head buf_list; + struct list_head avail_list; + struct virtiovf_data_buffer *buf; + struct virtiovf_data_buffer *buf_header; + struct virtiovf_pci_core_device *virtvdev; +}; + +struct virtiovf_pci_core_device { + struct vfio_pci_core_device core_device; +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + u8 *bar0_virtual_buf; + /* synchronize access to the virtual buf */ + struct mutex bar_mutex; + void __iomem *notify_addr; + u64 notify_offset; + __le32 pci_base_addr_0; + __le16 pci_cmd; + u8 bar0_virtual_buf_size; + u8 notify_bar; +#endif + + /* LM related */ + u8 migrate_cap:1; + u8 deferred_reset:1; + /* protect migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + /* protect the reset_done flow */ + spinlock_t reset_lock; + struct virtiovf_migration_file *resuming_migf; + struct virtiovf_migration_file *saving_migf; +}; + +void virtiovf_set_migratable(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_open_migration(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_close_migration(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_migration_reset_done(struct pci_dev *pdev); + +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY +int virtiovf_open_legacy_io(struct virtiovf_pci_core_device *virtvdev); +long virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg); +int virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg); +ssize_t virtiovf_pci_core_write(struct vfio_device *core_vdev, + const char __user *buf, size_t count, + loff_t *ppos); +ssize_t virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf, + size_t count, loff_t *ppos); +bool virtiovf_support_legacy_io(struct pci_dev *pdev); +int virtiovf_init_legacy_io(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_release_legacy_io(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_legacy_io_reset_done(struct pci_dev *pdev); +#endif + +#endif /* VIRTIO_VFIO_COMMON_H */ diff --git a/drivers/vfio/pci/virtio/legacy_io.c b/drivers/vfio/pci/virtio/legacy_io.c new file mode 100644 index 000000000000..20382ee15fac --- /dev/null +++ b/drivers/vfio/pci/virtio/legacy_io.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/vfio_pci_core.h> +#include <linux/virtio_pci.h> +#include <linux/virtio_net.h> +#include <linux/virtio_pci_admin.h> + +#include "common.h" + +static int +virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev, + loff_t pos, char __user *buf, + size_t count, bool read) +{ + bool msix_enabled = + (virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX); + struct pci_dev *pdev = virtvdev->core_device.pdev; + u8 *bar0_buf = virtvdev->bar0_virtual_buf; + bool common; + u8 offset; + int ret; + + common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled); + /* offset within the relevant configuration area */ + offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled); + mutex_lock(&virtvdev->bar_mutex); + if (read) { + if (common) + ret = virtio_pci_admin_legacy_common_io_read(pdev, offset, + count, bar0_buf + pos); + else + ret = virtio_pci_admin_legacy_device_io_read(pdev, offset, + count, bar0_buf + pos); + if (ret) + goto out; + if (copy_to_user(buf, bar0_buf + pos, count)) + ret = -EFAULT; + } else { + if (copy_from_user(bar0_buf + pos, buf, count)) { + ret = -EFAULT; + goto out; + } + + if (common) + ret = virtio_pci_admin_legacy_common_io_write(pdev, offset, + count, bar0_buf + pos); + else + ret = virtio_pci_admin_legacy_device_io_write(pdev, offset, + count, bar0_buf + pos); + } +out: + mutex_unlock(&virtvdev->bar_mutex); + return ret; +} + +static int +virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev, + loff_t pos, char __user *buf, + size_t count, bool read) +{ + struct vfio_pci_core_device *core_device = &virtvdev->core_device; + struct pci_dev *pdev = core_device->pdev; + u16 queue_notify; + int ret; + + if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO)) + return -EIO; + + if (pos + count > virtvdev->bar0_virtual_buf_size) + return -EINVAL; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) { + pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret); + return -EIO; + } + + switch (pos) { + case VIRTIO_PCI_QUEUE_NOTIFY: + if (count != sizeof(queue_notify)) { + ret = -EINVAL; + goto end; + } + if (read) { + ret = vfio_pci_core_ioread16(core_device, true, &queue_notify, + virtvdev->notify_addr); + if (ret) + goto end; + if (copy_to_user(buf, &queue_notify, + sizeof(queue_notify))) { + ret = -EFAULT; + goto end; + } + } else { + if (copy_from_user(&queue_notify, buf, count)) { + ret = -EFAULT; + goto end; + } + ret = vfio_pci_core_iowrite16(core_device, true, queue_notify, + virtvdev->notify_addr); + } + break; + default: + ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count, + read); + } + +end: + pm_runtime_put(&pdev->dev); + return ret ? ret : count; +} + +static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev, + char __user *buf, size_t count, + loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + __le32 val32; + __le16 val16; + u8 val8; + int ret; + + ret = vfio_pci_core_read(core_vdev, buf, count, ppos); + if (ret < 0) + return ret; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_DEVICE_ID, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count)) + return -EFAULT; + } + + if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) && + vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset, + copy_count)) + return -EFAULT; + val16 |= cpu_to_le16(PCI_COMMAND_IO); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, + copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_REVISION_ID, + sizeof(val8), ©_offset, + ©_count, ®ister_offset)) { + /* Transional needs to have revision 0 */ + val8 = 0; + if (copy_to_user(buf + copy_offset, &val8, copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, + sizeof(val32), ©_offset, + ©_count, ®ister_offset)) { + u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1); + u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0); + + val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO); + if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + /* + * Transitional devices use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ + val16 = cpu_to_le16(VIRTIO_ID_NET); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, + copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, + copy_count)) + return -EFAULT; + } + + return count; +} + +ssize_t virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (!count) + return 0; + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return virtiovf_pci_read_config(core_vdev, buf, count, ppos); + + if (index == VFIO_PCI_BAR0_REGION_INDEX) + return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true); + + return vfio_pci_core_read(core_vdev, buf, count, ppos); +} + +static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev, + const char __user *buf, size_t count, + loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, + sizeof(virtvdev->pci_cmd), + ©_offset, ©_count, + ®ister_offset)) { + if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset, + buf + copy_offset, + copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, + sizeof(virtvdev->pci_base_addr_0), + ©_offset, ©_count, + ®ister_offset)) { + if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset, + buf + copy_offset, + copy_count)) + return -EFAULT; + } + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +ssize_t virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (!count) + return 0; + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return virtiovf_pci_write_config(core_vdev, buf, count, ppos); + + if (index == VFIO_PCI_BAR0_REGION_INDEX) + return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false); + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +int virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + unsigned long minsz = offsetofend(struct vfio_region_info, offset); + void __user *uarg = (void __user *)arg; + struct vfio_region_info info = {}; + + if (copy_from_user(&info, uarg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_BAR0_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = virtvdev->bar0_virtual_buf_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + return copy_to_user(uarg, &info, minsz) ? -EFAULT : 0; + default: + return vfio_pci_core_ioctl(core_vdev, cmd, arg); + } +} + +long virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case VFIO_DEVICE_GET_REGION_INFO: + return virtiovf_pci_ioctl_get_region_info(core_vdev, cmd, arg); + default: + return vfio_pci_core_ioctl(core_vdev, cmd, arg); + } +} + +static int virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev) +{ + struct vfio_pci_core_device *core_device = &virtvdev->core_device; + int ret; + + /* + * Setup the BAR where the 'notify' exists to be used by vfio as well + * This will let us mmap it only once and use it when needed. + */ + ret = vfio_pci_core_setup_barmap(core_device, + virtvdev->notify_bar); + if (ret) + return ret; + + virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] + + virtvdev->notify_offset; + return 0; +} + +int virtiovf_open_legacy_io(struct virtiovf_pci_core_device *virtvdev) +{ + if (!virtvdev->bar0_virtual_buf) + return 0; + + /* + * Upon close_device() the vfio_pci_core_disable() is called + * and will close all the previous mmaps, so it seems that the + * valid life cycle for the 'notify' addr is per open/close. + */ + return virtiovf_set_notify_addr(virtvdev); +} + +static int virtiovf_get_device_config_size(unsigned short device) +{ + /* Network card */ + return offsetofend(struct virtio_net_config, status); +} + +static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev) +{ + u64 offset; + int ret; + u8 bar; + + ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev, + VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM, + &bar, &offset); + if (ret) + return ret; + + virtvdev->notify_bar = bar; + virtvdev->notify_offset = offset; + return 0; +} + +static bool virtiovf_bar0_exists(struct pci_dev *pdev) +{ + struct resource *res = pdev->resource; + + return res->flags; +} + +bool virtiovf_support_legacy_io(struct pci_dev *pdev) +{ + return virtio_pci_admin_has_legacy_io(pdev) && !virtiovf_bar0_exists(pdev); +} + +int virtiovf_init_legacy_io(struct virtiovf_pci_core_device *virtvdev) +{ + struct pci_dev *pdev = virtvdev->core_device.pdev; + int ret; + + ret = virtiovf_read_notify_info(virtvdev); + if (ret) + return ret; + + virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) + + virtiovf_get_device_config_size(pdev->device); + BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size)); + virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size, + GFP_KERNEL); + if (!virtvdev->bar0_virtual_buf) + return -ENOMEM; + mutex_init(&virtvdev->bar_mutex); + return 0; +} + +void virtiovf_release_legacy_io(struct virtiovf_pci_core_device *virtvdev) +{ + kfree(virtvdev->bar0_virtual_buf); +} + +void virtiovf_legacy_io_reset_done(struct pci_dev *pdev) +{ + struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); + + virtvdev->pci_cmd = 0; +} diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c index b5d3a8c5bbc9..d534d48c4163 100644 --- a/drivers/vfio/pci/virtio/main.c +++ b/drivers/vfio/pci/virtio/main.c @@ -16,347 +16,12 @@ #include <linux/virtio_net.h> #include <linux/virtio_pci_admin.h> -struct virtiovf_pci_core_device { - struct vfio_pci_core_device core_device; - u8 *bar0_virtual_buf; - /* synchronize access to the virtual buf */ - struct mutex bar_mutex; - void __iomem *notify_addr; - u64 notify_offset; - __le32 pci_base_addr_0; - __le16 pci_cmd; - u8 bar0_virtual_buf_size; - u8 notify_bar; -}; - -static int -virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev, - loff_t pos, char __user *buf, - size_t count, bool read) -{ - bool msix_enabled = - (virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX); - struct pci_dev *pdev = virtvdev->core_device.pdev; - u8 *bar0_buf = virtvdev->bar0_virtual_buf; - bool common; - u8 offset; - int ret; - - common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled); - /* offset within the relevant configuration area */ - offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled); - mutex_lock(&virtvdev->bar_mutex); - if (read) { - if (common) - ret = virtio_pci_admin_legacy_common_io_read(pdev, offset, - count, bar0_buf + pos); - else - ret = virtio_pci_admin_legacy_device_io_read(pdev, offset, - count, bar0_buf + pos); - if (ret) - goto out; - if (copy_to_user(buf, bar0_buf + pos, count)) - ret = -EFAULT; - } else { - if (copy_from_user(bar0_buf + pos, buf, count)) { - ret = -EFAULT; - goto out; - } - - if (common) - ret = virtio_pci_admin_legacy_common_io_write(pdev, offset, - count, bar0_buf + pos); - else - ret = virtio_pci_admin_legacy_device_io_write(pdev, offset, - count, bar0_buf + pos); - } -out: - mutex_unlock(&virtvdev->bar_mutex); - return ret; -} - -static int -virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev, - loff_t pos, char __user *buf, - size_t count, bool read) -{ - struct vfio_pci_core_device *core_device = &virtvdev->core_device; - struct pci_dev *pdev = core_device->pdev; - u16 queue_notify; - int ret; - - if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO)) - return -EIO; - - if (pos + count > virtvdev->bar0_virtual_buf_size) - return -EINVAL; - - ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret) { - pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret); - return -EIO; - } - - switch (pos) { - case VIRTIO_PCI_QUEUE_NOTIFY: - if (count != sizeof(queue_notify)) { - ret = -EINVAL; - goto end; - } - if (read) { - ret = vfio_pci_core_ioread16(core_device, true, &queue_notify, - virtvdev->notify_addr); - if (ret) - goto end; - if (copy_to_user(buf, &queue_notify, - sizeof(queue_notify))) { - ret = -EFAULT; - goto end; - } - } else { - if (copy_from_user(&queue_notify, buf, count)) { - ret = -EFAULT; - goto end; - } - ret = vfio_pci_core_iowrite16(core_device, true, queue_notify, - virtvdev->notify_addr); - } - break; - default: - ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count, - read); - } - -end: - pm_runtime_put(&pdev->dev); - return ret ? ret : count; -} - -static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev, - char __user *buf, size_t count, - loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - size_t register_offset; - loff_t copy_offset; - size_t copy_count; - __le32 val32; - __le16 val16; - u8 val8; - int ret; - - ret = vfio_pci_core_read(core_vdev, buf, count, ppos); - if (ret < 0) - return ret; - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_DEVICE_ID, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count)) - return -EFAULT; - } - - if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) && - vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset, - copy_count)) - return -EFAULT; - val16 |= cpu_to_le16(PCI_COMMAND_IO); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, - copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_REVISION_ID, - sizeof(val8), ©_offset, - ©_count, ®ister_offset)) { - /* Transional needs to have revision 0 */ - val8 = 0; - if (copy_to_user(buf + copy_offset, &val8, copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, - sizeof(val32), ©_offset, - ©_count, ®ister_offset)) { - u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1); - u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0); - - val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO); - if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - /* - * Transitional devices use the PCI subsystem device id as - * virtio device id, same as legacy driver always did. - */ - val16 = cpu_to_le16(VIRTIO_ID_NET); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, - copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, - copy_count)) - return -EFAULT; - } - - return count; -} - -static ssize_t -virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf, - size_t count, loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - - if (!count) - return 0; - - if (index == VFIO_PCI_CONFIG_REGION_INDEX) - return virtiovf_pci_read_config(core_vdev, buf, count, ppos); - - if (index == VFIO_PCI_BAR0_REGION_INDEX) - return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true); - - return vfio_pci_core_read(core_vdev, buf, count, ppos); -} - -static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - size_t register_offset; - loff_t copy_offset; - size_t copy_count; - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, - sizeof(virtvdev->pci_cmd), - ©_offset, ©_count, - ®ister_offset)) { - if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset, - buf + copy_offset, - copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, - sizeof(virtvdev->pci_base_addr_0), - ©_offset, ©_count, - ®ister_offset)) { - if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset, - buf + copy_offset, - copy_count)) - return -EFAULT; - } - - return vfio_pci_core_write(core_vdev, buf, count, ppos); -} - -static ssize_t -virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - - if (!count) - return 0; - - if (index == VFIO_PCI_CONFIG_REGION_INDEX) - return virtiovf_pci_write_config(core_vdev, buf, count, ppos); - - if (index == VFIO_PCI_BAR0_REGION_INDEX) - return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false); - - return vfio_pci_core_write(core_vdev, buf, count, ppos); -} - -static int -virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev, - unsigned int cmd, unsigned long arg) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - unsigned long minsz = offsetofend(struct vfio_region_info, offset); - void __user *uarg = (void __user *)arg; - struct vfio_region_info info = {}; - - if (copy_from_user(&info, uarg, minsz)) - return -EFAULT; - - if (info.argsz < minsz) - return -EINVAL; - - switch (info.index) { - case VFIO_PCI_BAR0_REGION_INDEX: - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = virtvdev->bar0_virtual_buf_size; - info.flags = VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE; - return copy_to_user(uarg, &info, minsz) ? -EFAULT : 0; - default: - return vfio_pci_core_ioctl(core_vdev, cmd, arg); - } -} - -static long -virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, - unsigned long arg) -{ - switch (cmd) { - case VFIO_DEVICE_GET_REGION_INFO: - return virtiovf_pci_ioctl_get_region_info(core_vdev, cmd, arg); - default: - return vfio_pci_core_ioctl(core_vdev, cmd, arg); - } -} - -static int -virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev) -{ - struct vfio_pci_core_device *core_device = &virtvdev->core_device; - int ret; - - /* - * Setup the BAR where the 'notify' exists to be used by vfio as well - * This will let us mmap it only once and use it when needed. - */ - ret = vfio_pci_core_setup_barmap(core_device, - virtvdev->notify_bar); - if (ret) - return ret; - - virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] + - virtvdev->notify_offset; - return 0; -} +#include "common.h" static int virtiovf_pci_open_device(struct vfio_device *core_vdev) { - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); struct vfio_pci_core_device *vdev = &virtvdev->core_device; int ret; @@ -364,88 +29,84 @@ static int virtiovf_pci_open_device(struct vfio_device *core_vdev) if (ret) return ret; - if (virtvdev->bar0_virtual_buf) { - /* - * Upon close_device() the vfio_pci_core_disable() is called - * and will close all the previous mmaps, so it seems that the - * valid life cycle for the 'notify' addr is per open/close. - */ - ret = virtiovf_set_notify_addr(virtvdev); - if (ret) { - vfio_pci_core_disable(vdev); - return ret; - } +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + ret = virtiovf_open_legacy_io(virtvdev); + if (ret) { + vfio_pci_core_disable(vdev); + return ret; } +#endif + virtiovf_open_migration(virtvdev); vfio_pci_core_finish_enable(vdev); return 0; } -static int virtiovf_get_device_config_size(unsigned short device) +static void virtiovf_pci_close_device(struct vfio_device *core_vdev) { - /* Network card */ - return offsetofend(struct virtio_net_config, status); -} - -static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev) -{ - u64 offset; - int ret; - u8 bar; + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); - ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev, - VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM, - &bar, &offset); - if (ret) - return ret; - - virtvdev->notify_bar = bar; - virtvdev->notify_offset = offset; - return 0; + virtiovf_close_migration(virtvdev); + vfio_pci_core_close_device(core_vdev); } +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY static int virtiovf_pci_init_device(struct vfio_device *core_vdev) { - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - struct pci_dev *pdev; + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); int ret; ret = vfio_pci_core_init_dev(core_vdev); if (ret) return ret; - pdev = virtvdev->core_device.pdev; - ret = virtiovf_read_notify_info(virtvdev); - if (ret) - return ret; - - virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) + - virtiovf_get_device_config_size(pdev->device); - BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size)); - virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size, - GFP_KERNEL); - if (!virtvdev->bar0_virtual_buf) - return -ENOMEM; - mutex_init(&virtvdev->bar_mutex); - return 0; + /* + * The vfio_device_ops.init() callback is set to virtiovf_pci_init_device() + * only when legacy I/O is supported. Now, let's initialize it. + */ + return virtiovf_init_legacy_io(virtvdev); } +#endif static void virtiovf_pci_core_release_dev(struct vfio_device *core_vdev) { - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); - kfree(virtvdev->bar0_virtual_buf); + virtiovf_release_legacy_io(virtvdev); +#endif vfio_pci_core_release_dev(core_vdev); } -static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = { - .name = "virtio-vfio-pci-trans", +static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = { + .name = "virtio-vfio-pci-lm", + .init = vfio_pci_core_init_dev, + .release = virtiovf_pci_core_release_dev, + .open_device = virtiovf_pci_open_device, + .close_device = virtiovf_pci_close_device, + .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY +static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = { + .name = "virtio-vfio-pci-trans-lm", .init = virtiovf_pci_init_device, .release = virtiovf_pci_core_release_dev, .open_device = virtiovf_pci_open_device, - .close_device = vfio_pci_core_close_device, + .close_device = virtiovf_pci_close_device, .ioctl = virtiovf_vfio_pci_core_ioctl, .device_feature = vfio_pci_core_ioctl_feature, .read = virtiovf_pci_core_read, @@ -458,6 +119,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = { .attach_ioas = vfio_iommufd_physical_attach_ioas, .detach_ioas = vfio_iommufd_physical_detach_ioas, }; +#endif static const struct vfio_device_ops virtiovf_vfio_pci_ops = { .name = "virtio-vfio-pci", @@ -478,29 +140,34 @@ static const struct vfio_device_ops virtiovf_vfio_pci_ops = { .detach_ioas = vfio_iommufd_physical_detach_ioas, }; -static bool virtiovf_bar0_exists(struct pci_dev *pdev) -{ - struct resource *res = pdev->resource; - - return res->flags; -} - static int virtiovf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const struct vfio_device_ops *ops = &virtiovf_vfio_pci_ops; struct virtiovf_pci_core_device *virtvdev; + bool sup_legacy_io = false; + bool sup_lm = false; int ret; - if (pdev->is_virtfn && virtio_pci_admin_has_legacy_io(pdev) && - !virtiovf_bar0_exists(pdev)) - ops = &virtiovf_vfio_pci_tran_ops; + if (pdev->is_virtfn) { +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + sup_legacy_io = virtiovf_support_legacy_io(pdev); + if (sup_legacy_io) + ops = &virtiovf_vfio_pci_tran_lm_ops; +#endif + sup_lm = virtio_pci_admin_has_dev_parts(pdev); + if (sup_lm && !sup_legacy_io) + ops = &virtiovf_vfio_pci_lm_ops; + } virtvdev = vfio_alloc_device(virtiovf_pci_core_device, core_device.vdev, &pdev->dev, ops); if (IS_ERR(virtvdev)) return PTR_ERR(virtvdev); + if (sup_lm) + virtiovf_set_migratable(virtvdev); + dev_set_drvdata(&pdev->dev, &virtvdev->core_device); ret = vfio_pci_core_register_device(&virtvdev->core_device); if (ret) @@ -529,9 +196,10 @@ MODULE_DEVICE_TABLE(pci, virtiovf_pci_table); static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev) { - struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); - - virtvdev->pci_cmd = 0; +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + virtiovf_legacy_io_reset_done(pdev); +#endif + virtiovf_migration_reset_done(pdev); } static const struct pci_error_handlers virtiovf_err_handlers = { diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c new file mode 100644 index 000000000000..ee54f4c17857 --- /dev/null +++ b/drivers/vfio/pci/virtio/migrate.c @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/vfio_pci_core.h> +#include <linux/virtio_pci.h> +#include <linux/virtio_net.h> +#include <linux/virtio_pci_admin.h> +#include <linux/anon_inodes.h> + +#include "common.h" + +/* Device specification max parts size */ +#define MAX_LOAD_SIZE (BIT_ULL(BITS_PER_TYPE \ + (((struct virtio_admin_cmd_dev_parts_metadata_result *)0)->parts_size.size)) - 1) + +/* Initial target buffer size */ +#define VIRTIOVF_TARGET_INITIAL_BUF_SIZE SZ_1M + +static int +virtiovf_read_device_context_chunk(struct virtiovf_migration_file *migf, + u32 ctx_size); + +static struct page * +virtiovf_get_migration_page(struct virtiovf_data_buffer *buf, + unsigned long offset) +{ + unsigned long cur_offset = 0; + struct scatterlist *sg; + unsigned int i; + + /* All accesses are sequential */ + if (offset < buf->last_offset || !buf->last_offset_sg) { + buf->last_offset = 0; + buf->last_offset_sg = buf->table.sgt.sgl; + buf->sg_last_entry = 0; + } + + cur_offset = buf->last_offset; + + for_each_sg(buf->last_offset_sg, sg, + buf->table.sgt.orig_nents - buf->sg_last_entry, i) { + if (offset < sg->length + cur_offset) { + buf->last_offset_sg = sg; + buf->sg_last_entry += i; + buf->last_offset = cur_offset; + return nth_page(sg_page(sg), + (offset - cur_offset) / PAGE_SIZE); + } + cur_offset += sg->length; + } + return NULL; +} + +static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf, + unsigned int npages) +{ + unsigned int to_alloc = npages; + struct page **page_list; + unsigned long filled; + unsigned int to_fill; + int ret; + int i; + + to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list)); + page_list = kvcalloc(to_fill, sizeof(*page_list), GFP_KERNEL_ACCOUNT); + if (!page_list) + return -ENOMEM; + + do { + filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill, + page_list); + if (!filled) { + ret = -ENOMEM; + goto err; + } + to_alloc -= filled; + ret = sg_alloc_append_table_from_pages(&buf->table, page_list, + filled, 0, filled << PAGE_SHIFT, UINT_MAX, + SG_MAX_SINGLE_ALLOC, GFP_KERNEL_ACCOUNT); + + if (ret) + goto err_append; + buf->allocated_length += filled * PAGE_SIZE; + /* clean input for another bulk allocation */ + memset(page_list, 0, filled * sizeof(*page_list)); + to_fill = min_t(unsigned int, to_alloc, + PAGE_SIZE / sizeof(*page_list)); + } while (to_alloc > 0); + + kvfree(page_list); + return 0; + +err_append: + for (i = filled - 1; i >= 0; i--) + __free_page(page_list[i]); +err: + kvfree(page_list); + return ret; +} + +static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf) +{ + struct sg_page_iter sg_iter; + + /* Undo alloc_pages_bulk_array() */ + for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0) + __free_page(sg_page_iter_page(&sg_iter)); + sg_free_append_table(&buf->table); + kfree(buf); +} + +static struct virtiovf_data_buffer * +virtiovf_alloc_data_buffer(struct virtiovf_migration_file *migf, size_t length) +{ + struct virtiovf_data_buffer *buf; + int ret; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT); + if (!buf) + return ERR_PTR(-ENOMEM); + + ret = virtiovf_add_migration_pages(buf, + DIV_ROUND_UP_ULL(length, PAGE_SIZE)); + if (ret) + goto end; + + buf->migf = migf; + return buf; +end: + virtiovf_free_data_buffer(buf); + return ERR_PTR(ret); +} + +static void virtiovf_put_data_buffer(struct virtiovf_data_buffer *buf) +{ + spin_lock_irq(&buf->migf->list_lock); + list_add_tail(&buf->buf_elm, &buf->migf->avail_list); + spin_unlock_irq(&buf->migf->list_lock); +} + +static int +virtiovf_pci_alloc_obj_id(struct virtiovf_pci_core_device *virtvdev, u8 type, + u32 *obj_id) +{ + return virtio_pci_admin_obj_create(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, type, obj_id); +} + +static void +virtiovf_pci_free_obj_id(struct virtiovf_pci_core_device *virtvdev, u32 obj_id) +{ + virtio_pci_admin_obj_destroy(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, obj_id); +} + +static struct virtiovf_data_buffer * +virtiovf_get_data_buffer(struct virtiovf_migration_file *migf, size_t length) +{ + struct virtiovf_data_buffer *buf, *temp_buf; + struct list_head free_list; + + INIT_LIST_HEAD(&free_list); + + spin_lock_irq(&migf->list_lock); + list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) { + list_del_init(&buf->buf_elm); + if (buf->allocated_length >= length) { + spin_unlock_irq(&migf->list_lock); + goto found; + } + /* + * Prevent holding redundant buffers. Put in a free + * list and call at the end not under the spin lock + * (&migf->list_lock) to minimize its scope usage. + */ + list_add(&buf->buf_elm, &free_list); + } + spin_unlock_irq(&migf->list_lock); + buf = virtiovf_alloc_data_buffer(migf, length); + +found: + while ((temp_buf = list_first_entry_or_null(&free_list, + struct virtiovf_data_buffer, buf_elm))) { + list_del(&temp_buf->buf_elm); + virtiovf_free_data_buffer(temp_buf); + } + + return buf; +} + +static void virtiovf_clean_migf_resources(struct virtiovf_migration_file *migf) +{ + struct virtiovf_data_buffer *entry; + + if (migf->buf) { + virtiovf_free_data_buffer(migf->buf); + migf->buf = NULL; + } + + if (migf->buf_header) { + virtiovf_free_data_buffer(migf->buf_header); + migf->buf_header = NULL; + } + + list_splice(&migf->avail_list, &migf->buf_list); + + while ((entry = list_first_entry_or_null(&migf->buf_list, + struct virtiovf_data_buffer, buf_elm))) { + list_del(&entry->buf_elm); + virtiovf_free_data_buffer(entry); + } + + if (migf->has_obj_id) + virtiovf_pci_free_obj_id(migf->virtvdev, migf->obj_id); +} + +static void virtiovf_disable_fd(struct virtiovf_migration_file *migf) +{ + mutex_lock(&migf->lock); + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + migf->filp->f_pos = 0; + mutex_unlock(&migf->lock); +} + +static void virtiovf_disable_fds(struct virtiovf_pci_core_device *virtvdev) +{ + if (virtvdev->resuming_migf) { + virtiovf_disable_fd(virtvdev->resuming_migf); + virtiovf_clean_migf_resources(virtvdev->resuming_migf); + fput(virtvdev->resuming_migf->filp); + virtvdev->resuming_migf = NULL; + } + if (virtvdev->saving_migf) { + virtiovf_disable_fd(virtvdev->saving_migf); + virtiovf_clean_migf_resources(virtvdev->saving_migf); + fput(virtvdev->saving_migf->filp); + virtvdev->saving_migf = NULL; + } +} + +/* + * This function is called in all state_mutex unlock cases to + * handle a 'deferred_reset' if exists. + */ +static void virtiovf_state_mutex_unlock(struct virtiovf_pci_core_device *virtvdev) +{ +again: + spin_lock(&virtvdev->reset_lock); + if (virtvdev->deferred_reset) { + virtvdev->deferred_reset = false; + spin_unlock(&virtvdev->reset_lock); + virtvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + virtiovf_disable_fds(virtvdev); + goto again; + } + mutex_unlock(&virtvdev->state_mutex); + spin_unlock(&virtvdev->reset_lock); +} + +void virtiovf_migration_reset_done(struct pci_dev *pdev) +{ + struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); + + if (!virtvdev->migrate_cap) + return; + + /* + * As the higher VFIO layers are holding locks across reset and using + * those same locks with the mm_lock we need to prevent ABBA deadlock + * with the state_mutex and mm_lock. + * In case the state_mutex was taken already we defer the cleanup work + * to the unlock flow of the other running context. + */ + spin_lock(&virtvdev->reset_lock); + virtvdev->deferred_reset = true; + if (!mutex_trylock(&virtvdev->state_mutex)) { + spin_unlock(&virtvdev->reset_lock); + return; + } + spin_unlock(&virtvdev->reset_lock); + virtiovf_state_mutex_unlock(virtvdev); +} + +static int virtiovf_release_file(struct inode *inode, struct file *filp) +{ + struct virtiovf_migration_file *migf = filp->private_data; + + virtiovf_disable_fd(migf); + mutex_destroy(&migf->lock); + kfree(migf); + return 0; +} + +static struct virtiovf_data_buffer * +virtiovf_get_data_buff_from_pos(struct virtiovf_migration_file *migf, + loff_t pos, bool *end_of_data) +{ + struct virtiovf_data_buffer *buf; + bool found = false; + + *end_of_data = false; + spin_lock_irq(&migf->list_lock); + if (list_empty(&migf->buf_list)) { + *end_of_data = true; + goto end; + } + + buf = list_first_entry(&migf->buf_list, struct virtiovf_data_buffer, + buf_elm); + if (pos >= buf->start_pos && + pos < buf->start_pos + buf->length) { + found = true; + goto end; + } + + /* + * As we use a stream based FD we may expect having the data always + * on first chunk + */ + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + +end: + spin_unlock_irq(&migf->list_lock); + return found ? buf : NULL; +} + +static ssize_t virtiovf_buf_read(struct virtiovf_data_buffer *vhca_buf, + char __user **buf, size_t *len, loff_t *pos) +{ + unsigned long offset; + ssize_t done = 0; + size_t copy_len; + + copy_len = min_t(size_t, + vhca_buf->start_pos + vhca_buf->length - *pos, *len); + while (copy_len) { + size_t page_offset; + struct page *page; + size_t page_len; + u8 *from_buff; + int ret; + + offset = *pos - vhca_buf->start_pos; + page_offset = offset % PAGE_SIZE; + offset -= page_offset; + page = virtiovf_get_migration_page(vhca_buf, offset); + if (!page) + return -EINVAL; + page_len = min_t(size_t, copy_len, PAGE_SIZE - page_offset); + from_buff = kmap_local_page(page); + ret = copy_to_user(*buf, from_buff + page_offset, page_len); + kunmap_local(from_buff); + if (ret) + return -EFAULT; + *pos += page_len; + *len -= page_len; + *buf += page_len; + done += page_len; + copy_len -= page_len; + } + + if (*pos >= vhca_buf->start_pos + vhca_buf->length) { + spin_lock_irq(&vhca_buf->migf->list_lock); + list_del_init(&vhca_buf->buf_elm); + list_add_tail(&vhca_buf->buf_elm, &vhca_buf->migf->avail_list); + spin_unlock_irq(&vhca_buf->migf->list_lock); + } + + return done; +} + +static ssize_t virtiovf_save_read(struct file *filp, char __user *buf, size_t len, + loff_t *pos) +{ + struct virtiovf_migration_file *migf = filp->private_data; + struct virtiovf_data_buffer *vhca_buf; + bool first_loop_call = true; + bool end_of_data; + ssize_t done = 0; + + if (pos) + return -ESPIPE; + pos = &filp->f_pos; + + mutex_lock(&migf->lock); + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) { + done = -ENODEV; + goto out_unlock; + } + + while (len) { + ssize_t count; + + vhca_buf = virtiovf_get_data_buff_from_pos(migf, *pos, &end_of_data); + if (first_loop_call) { + first_loop_call = false; + /* Temporary end of file as part of PRE_COPY */ + if (end_of_data && migf->state == VIRTIOVF_MIGF_STATE_PRECOPY) { + done = -ENOMSG; + goto out_unlock; + } + if (end_of_data && migf->state != VIRTIOVF_MIGF_STATE_COMPLETE) { + done = -EINVAL; + goto out_unlock; + } + } + + if (end_of_data) + goto out_unlock; + + if (!vhca_buf) { + done = -EINVAL; + goto out_unlock; + } + + count = virtiovf_buf_read(vhca_buf, &buf, &len, pos); + if (count < 0) { + done = count; + goto out_unlock; + } + done += count; + } + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static long virtiovf_precopy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct virtiovf_migration_file *migf = filp->private_data; + struct virtiovf_pci_core_device *virtvdev = migf->virtvdev; + struct vfio_precopy_info info = {}; + loff_t *pos = &filp->f_pos; + bool end_of_data = false; + unsigned long minsz; + u32 ctx_size = 0; + int ret; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&virtvdev->state_mutex); + if (virtvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && + virtvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { + ret = -EINVAL; + goto err_state_unlock; + } + + /* + * The virtio specification does not include a PRE_COPY concept. + * Since we can expect the data to remain the same for a certain period, + * we use a rate limiter mechanism before making a call to the device. + */ + if (__ratelimit(&migf->pre_copy_rl_state)) { + + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, migf->obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &ctx_size); + if (ret) + goto err_state_unlock; + } + + mutex_lock(&migf->lock); + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) { + ret = -ENODEV; + goto err_migf_unlock; + } + + if (migf->pre_copy_initial_bytes > *pos) { + info.initial_bytes = migf->pre_copy_initial_bytes - *pos; + } else { + info.dirty_bytes = migf->max_pos - *pos; + if (!info.dirty_bytes) + end_of_data = true; + info.dirty_bytes += ctx_size; + } + + if (!end_of_data || !ctx_size) { + mutex_unlock(&migf->lock); + goto done; + } + + mutex_unlock(&migf->lock); + /* + * We finished transferring the current state and the device has a + * dirty state, read a new state. + */ + ret = virtiovf_read_device_context_chunk(migf, ctx_size); + if (ret) + /* + * The machine is running, and context size could be grow, so no reason to mark + * the device state as VIRTIOVF_MIGF_STATE_ERROR. + */ + goto err_state_unlock; + +done: + virtiovf_state_mutex_unlock(virtvdev); + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + return 0; + +err_migf_unlock: + mutex_unlock(&migf->lock); +err_state_unlock: + virtiovf_state_mutex_unlock(virtvdev); + return ret; +} + +static const struct file_operations virtiovf_save_fops = { + .owner = THIS_MODULE, + .read = virtiovf_save_read, + .unlocked_ioctl = virtiovf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = virtiovf_release_file, +}; + +static int +virtiovf_add_buf_header(struct virtiovf_data_buffer *header_buf, + u32 data_size) +{ + struct virtiovf_migration_file *migf = header_buf->migf; + struct virtiovf_migration_header header = {}; + struct page *page; + u8 *to_buff; + + header.record_size = cpu_to_le64(data_size); + header.flags = cpu_to_le32(VIRTIOVF_MIGF_HEADER_FLAGS_TAG_MANDATORY); + header.tag = cpu_to_le32(VIRTIOVF_MIGF_HEADER_TAG_DEVICE_DATA); + page = virtiovf_get_migration_page(header_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + memcpy(to_buff, &header, sizeof(header)); + kunmap_local(to_buff); + header_buf->length = sizeof(header); + header_buf->start_pos = header_buf->migf->max_pos; + migf->max_pos += header_buf->length; + spin_lock_irq(&migf->list_lock); + list_add_tail(&header_buf->buf_elm, &migf->buf_list); + spin_unlock_irq(&migf->list_lock); + return 0; +} + +static int +virtiovf_read_device_context_chunk(struct virtiovf_migration_file *migf, + u32 ctx_size) +{ + struct virtiovf_data_buffer *header_buf; + struct virtiovf_data_buffer *buf; + bool unmark_end = false; + struct scatterlist *sg; + unsigned int i; + u32 res_size; + int nent; + int ret; + + buf = virtiovf_get_data_buffer(migf, ctx_size); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* Find the total count of SG entries which satisfies the size */ + nent = sg_nents_for_len(buf->table.sgt.sgl, ctx_size); + if (nent <= 0) { + ret = -EINVAL; + goto out; + } + + /* + * Iterate to that SG entry and mark it as last (if it's not already) + * to let underlay layers iterate only till that entry. + */ + for_each_sg(buf->table.sgt.sgl, sg, nent - 1, i) + ; + + if (!sg_is_last(sg)) { + unmark_end = true; + sg_mark_end(sg); + } + + ret = virtio_pci_admin_dev_parts_get(migf->virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, + migf->obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL, + buf->table.sgt.sgl, &res_size); + /* Restore the original SG mark end */ + if (unmark_end) + sg_unmark_end(sg); + if (ret) + goto out; + + buf->length = res_size; + header_buf = virtiovf_get_data_buffer(migf, + sizeof(struct virtiovf_migration_header)); + if (IS_ERR(header_buf)) { + ret = PTR_ERR(header_buf); + goto out; + } + + ret = virtiovf_add_buf_header(header_buf, res_size); + if (ret) + goto out_header; + + buf->start_pos = buf->migf->max_pos; + migf->max_pos += buf->length; + spin_lock(&migf->list_lock); + list_add_tail(&buf->buf_elm, &migf->buf_list); + spin_unlock_irq(&migf->list_lock); + return 0; + +out_header: + virtiovf_put_data_buffer(header_buf); +out: + virtiovf_put_data_buffer(buf); + return ret; +} + +static int +virtiovf_pci_save_device_final_data(struct virtiovf_pci_core_device *virtvdev) +{ + struct virtiovf_migration_file *migf = virtvdev->saving_migf; + u32 ctx_size; + int ret; + + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) + return -ENODEV; + + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, migf->obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &ctx_size); + if (ret) + goto err; + + if (!ctx_size) { + ret = -EINVAL; + goto err; + } + + ret = virtiovf_read_device_context_chunk(migf, ctx_size); + if (ret) + goto err; + + migf->state = VIRTIOVF_MIGF_STATE_COMPLETE; + return 0; + +err: + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + return ret; +} + +static struct virtiovf_migration_file * +virtiovf_pci_save_device_data(struct virtiovf_pci_core_device *virtvdev, + bool pre_copy) +{ + struct virtiovf_migration_file *migf; + u32 ctx_size; + u32 obj_id; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("virtiovf_mig", &virtiovf_save_fops, migf, + O_RDONLY); + if (IS_ERR(migf->filp)) { + ret = PTR_ERR(migf->filp); + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); + migf->virtvdev = virtvdev; + + lockdep_assert_held(&virtvdev->state_mutex); + ret = virtiovf_pci_alloc_obj_id(virtvdev, VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET, + &obj_id); + if (ret) + goto out; + + migf->obj_id = obj_id; + /* Mark as having a valid obj id which can be even 0 */ + migf->has_obj_id = true; + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &ctx_size); + if (ret) + goto out_clean; + + if (!ctx_size) { + ret = -EINVAL; + goto out_clean; + } + + ret = virtiovf_read_device_context_chunk(migf, ctx_size); + if (ret) + goto out_clean; + + if (pre_copy) { + migf->pre_copy_initial_bytes = migf->max_pos; + /* Arbitrarily set the pre-copy rate limit to 1-second intervals */ + ratelimit_state_init(&migf->pre_copy_rl_state, 1 * HZ, 1); + /* Prevent any rate messages upon its usage */ + ratelimit_set_flags(&migf->pre_copy_rl_state, + RATELIMIT_MSG_ON_RELEASE); + migf->state = VIRTIOVF_MIGF_STATE_PRECOPY; + } else { + migf->state = VIRTIOVF_MIGF_STATE_COMPLETE; + } + + return migf; + +out_clean: + virtiovf_clean_migf_resources(migf); +out: + fput(migf->filp); + return ERR_PTR(ret); +} + +/* + * Set the required object header at the beginning of the buffer. + * The actual device parts data will be written post of the header offset. + */ +static int virtiovf_set_obj_cmd_header(struct virtiovf_data_buffer *vhca_buf) +{ + struct virtio_admin_cmd_resource_obj_cmd_hdr obj_hdr = {}; + struct page *page; + u8 *to_buff; + + obj_hdr.type = cpu_to_le16(VIRTIO_RESOURCE_OBJ_DEV_PARTS); + obj_hdr.id = cpu_to_le32(vhca_buf->migf->obj_id); + page = virtiovf_get_migration_page(vhca_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + memcpy(to_buff, &obj_hdr, sizeof(obj_hdr)); + kunmap_local(to_buff); + + /* Mark the buffer as including the header object data */ + vhca_buf->include_header_object = 1; + return 0; +} + +static int +virtiovf_append_page_to_mig_buf(struct virtiovf_data_buffer *vhca_buf, + const char __user **buf, size_t *len, + loff_t *pos, ssize_t *done) +{ + unsigned long offset; + size_t page_offset; + struct page *page; + size_t page_len; + u8 *to_buff; + int ret; + + offset = *pos - vhca_buf->start_pos; + + if (vhca_buf->include_header_object) + /* The buffer holds the object header, update the offset accordingly */ + offset += sizeof(struct virtio_admin_cmd_resource_obj_cmd_hdr); + + page_offset = offset % PAGE_SIZE; + + page = virtiovf_get_migration_page(vhca_buf, offset - page_offset); + if (!page) + return -EINVAL; + + page_len = min_t(size_t, *len, PAGE_SIZE - page_offset); + to_buff = kmap_local_page(page); + ret = copy_from_user(to_buff + page_offset, *buf, page_len); + kunmap_local(to_buff); + if (ret) + return -EFAULT; + + *pos += page_len; + *done += page_len; + *buf += page_len; + *len -= page_len; + vhca_buf->length += page_len; + return 0; +} + +static ssize_t +virtiovf_resume_read_chunk(struct virtiovf_migration_file *migf, + struct virtiovf_data_buffer *vhca_buf, + size_t chunk_size, const char __user **buf, + size_t *len, loff_t *pos, ssize_t *done, + bool *has_work) +{ + size_t copy_len, to_copy; + int ret; + + to_copy = min_t(size_t, *len, chunk_size - vhca_buf->length); + copy_len = to_copy; + while (to_copy) { + ret = virtiovf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, + pos, done); + if (ret) + return ret; + } + + *len -= copy_len; + if (vhca_buf->length == chunk_size) { + migf->load_state = VIRTIOVF_LOAD_STATE_LOAD_CHUNK; + migf->max_pos += chunk_size; + *has_work = true; + } + + return 0; +} + +static int +virtiovf_resume_read_header_data(struct virtiovf_migration_file *migf, + struct virtiovf_data_buffer *vhca_buf, + const char __user **buf, size_t *len, + loff_t *pos, ssize_t *done) +{ + size_t copy_len, to_copy; + size_t required_data; + int ret; + + required_data = migf->record_size - vhca_buf->length; + to_copy = min_t(size_t, *len, required_data); + copy_len = to_copy; + while (to_copy) { + ret = virtiovf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, + pos, done); + if (ret) + return ret; + } + + *len -= copy_len; + if (vhca_buf->length == migf->record_size) { + switch (migf->record_tag) { + default: + /* Optional tag */ + break; + } + + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER; + migf->max_pos += migf->record_size; + vhca_buf->length = 0; + } + + return 0; +} + +static int +virtiovf_resume_read_header(struct virtiovf_migration_file *migf, + struct virtiovf_data_buffer *vhca_buf, + const char __user **buf, + size_t *len, loff_t *pos, + ssize_t *done, bool *has_work) +{ + struct page *page; + size_t copy_len; + u8 *to_buff; + int ret; + + copy_len = min_t(size_t, *len, + sizeof(struct virtiovf_migration_header) - vhca_buf->length); + page = virtiovf_get_migration_page(vhca_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + ret = copy_from_user(to_buff + vhca_buf->length, *buf, copy_len); + if (ret) { + ret = -EFAULT; + goto end; + } + + *buf += copy_len; + *pos += copy_len; + *done += copy_len; + *len -= copy_len; + vhca_buf->length += copy_len; + if (vhca_buf->length == sizeof(struct virtiovf_migration_header)) { + u64 record_size; + u32 flags; + + record_size = le64_to_cpup((__le64 *)to_buff); + if (record_size > MAX_LOAD_SIZE) { + ret = -ENOMEM; + goto end; + } + + migf->record_size = record_size; + flags = le32_to_cpup((__le32 *)(to_buff + + offsetof(struct virtiovf_migration_header, flags))); + migf->record_tag = le32_to_cpup((__le32 *)(to_buff + + offsetof(struct virtiovf_migration_header, tag))); + switch (migf->record_tag) { + case VIRTIOVF_MIGF_HEADER_TAG_DEVICE_DATA: + migf->load_state = VIRTIOVF_LOAD_STATE_PREP_CHUNK; + break; + default: + if (!(flags & VIRTIOVF_MIGF_HEADER_FLAGS_TAG_OPTIONAL)) { + ret = -EOPNOTSUPP; + goto end; + } + /* We may read and skip this optional record data */ + migf->load_state = VIRTIOVF_LOAD_STATE_PREP_HEADER_DATA; + } + + migf->max_pos += vhca_buf->length; + vhca_buf->length = 0; + *has_work = true; + } +end: + kunmap_local(to_buff); + return ret; +} + +static ssize_t virtiovf_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct virtiovf_migration_file *migf = filp->private_data; + struct virtiovf_data_buffer *vhca_buf = migf->buf; + struct virtiovf_data_buffer *vhca_buf_header = migf->buf_header; + unsigned int orig_length; + bool has_work = false; + ssize_t done = 0; + int ret = 0; + + if (pos) + return -ESPIPE; + + pos = &filp->f_pos; + if (*pos < vhca_buf->start_pos) + return -EINVAL; + + mutex_lock(&migf->virtvdev->state_mutex); + mutex_lock(&migf->lock); + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) { + done = -ENODEV; + goto out_unlock; + } + + while (len || has_work) { + has_work = false; + switch (migf->load_state) { + case VIRTIOVF_LOAD_STATE_READ_HEADER: + ret = virtiovf_resume_read_header(migf, vhca_buf_header, &buf, + &len, pos, &done, &has_work); + if (ret) + goto out_unlock; + break; + case VIRTIOVF_LOAD_STATE_PREP_HEADER_DATA: + if (vhca_buf_header->allocated_length < migf->record_size) { + virtiovf_free_data_buffer(vhca_buf_header); + + migf->buf_header = virtiovf_alloc_data_buffer(migf, + migf->record_size); + if (IS_ERR(migf->buf_header)) { + ret = PTR_ERR(migf->buf_header); + migf->buf_header = NULL; + goto out_unlock; + } + + vhca_buf_header = migf->buf_header; + } + + vhca_buf_header->start_pos = migf->max_pos; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER_DATA; + break; + case VIRTIOVF_LOAD_STATE_READ_HEADER_DATA: + ret = virtiovf_resume_read_header_data(migf, vhca_buf_header, + &buf, &len, pos, &done); + if (ret) + goto out_unlock; + break; + case VIRTIOVF_LOAD_STATE_PREP_CHUNK: + { + u32 cmd_size = migf->record_size + + sizeof(struct virtio_admin_cmd_resource_obj_cmd_hdr); + + /* + * The DMA map/unmap is managed in virtio layer, we just need to extend + * the SG pages to hold the extra required chunk data. + */ + if (vhca_buf->allocated_length < cmd_size) { + ret = virtiovf_add_migration_pages(vhca_buf, + DIV_ROUND_UP_ULL(cmd_size - vhca_buf->allocated_length, + PAGE_SIZE)); + if (ret) + goto out_unlock; + } + + vhca_buf->start_pos = migf->max_pos; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_CHUNK; + break; + } + case VIRTIOVF_LOAD_STATE_READ_CHUNK: + ret = virtiovf_resume_read_chunk(migf, vhca_buf, migf->record_size, + &buf, &len, pos, &done, &has_work); + if (ret) + goto out_unlock; + break; + case VIRTIOVF_LOAD_STATE_LOAD_CHUNK: + /* Mark the last SG entry and set its length */ + sg_mark_end(vhca_buf->last_offset_sg); + orig_length = vhca_buf->last_offset_sg->length; + /* Length should include the resource object command header */ + vhca_buf->last_offset_sg->length = vhca_buf->length + + sizeof(struct virtio_admin_cmd_resource_obj_cmd_hdr) - + vhca_buf->last_offset; + ret = virtio_pci_admin_dev_parts_set(migf->virtvdev->core_device.pdev, + vhca_buf->table.sgt.sgl); + /* Restore the original SG data */ + vhca_buf->last_offset_sg->length = orig_length; + sg_unmark_end(vhca_buf->last_offset_sg); + if (ret) + goto out_unlock; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER; + /* be ready for reading the next chunk */ + vhca_buf->length = 0; + break; + default: + break; + } + } + +out_unlock: + if (ret) + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + mutex_unlock(&migf->lock); + virtiovf_state_mutex_unlock(migf->virtvdev); + return ret ? ret : done; +} + +static const struct file_operations virtiovf_resume_fops = { + .owner = THIS_MODULE, + .write = virtiovf_resume_write, + .release = virtiovf_release_file, +}; + +static struct virtiovf_migration_file * +virtiovf_pci_resume_device_data(struct virtiovf_pci_core_device *virtvdev) +{ + struct virtiovf_migration_file *migf; + struct virtiovf_data_buffer *buf; + u32 obj_id; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("virtiovf_mig", &virtiovf_resume_fops, migf, + O_WRONLY); + if (IS_ERR(migf->filp)) { + ret = PTR_ERR(migf->filp); + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); + + buf = virtiovf_alloc_data_buffer(migf, VIRTIOVF_TARGET_INITIAL_BUF_SIZE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out; + } + + migf->buf = buf; + + buf = virtiovf_alloc_data_buffer(migf, + sizeof(struct virtiovf_migration_header)); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_clean; + } + + migf->buf_header = buf; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER; + + migf->virtvdev = virtvdev; + ret = virtiovf_pci_alloc_obj_id(virtvdev, VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET, + &obj_id); + if (ret) + goto out_clean; + + migf->obj_id = obj_id; + /* Mark as having a valid obj id which can be even 0 */ + migf->has_obj_id = true; + ret = virtiovf_set_obj_cmd_header(migf->buf); + if (ret) + goto out_clean; + + return migf; + +out_clean: + virtiovf_clean_migf_resources(migf); +out: + fput(migf->filp); + return ERR_PTR(ret); +} + +static struct file * +virtiovf_pci_step_device_state_locked(struct virtiovf_pci_core_device *virtvdev, + u32 new) +{ + u32 cur = virtvdev->mig_state; + int ret; + + if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) { + /* NOP */ + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) { + /* NOP */ + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + ret = virtio_pci_admin_mode_set(virtvdev->core_device.pdev, + BIT(VIRTIO_ADMIN_CMD_DEV_MODE_F_STOPPED)); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { + ret = virtio_pci_admin_mode_set(virtvdev->core_device.pdev, 0); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct virtiovf_migration_file *migf; + + migf = virtiovf_pci_save_device_data(virtvdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + virtvdev->saving_migf = migf; + return migf->filp; + } + + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) { + virtiovf_disable_fds(virtvdev); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct virtiovf_migration_file *migf; + + migf = virtiovf_pci_resume_device_data(virtvdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + virtvdev->resuming_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + virtiovf_disable_fds(virtvdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || + (cur == VFIO_DEVICE_STATE_RUNNING_P2P && + new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + struct virtiovf_migration_file *migf; + + migf = virtiovf_pci_save_device_data(virtvdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + virtvdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { + ret = virtiovf_pci_save_device_final_data(virtvdev); + return ret ? ERR_PTR(ret) : NULL; + } + + /* + * vfio_mig_get_next_state() does not use arcs other than the above + */ + WARN_ON(true); + return ERR_PTR(-EINVAL); +} + +static struct file * +virtiovf_pci_set_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state new_state) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + vdev, struct virtiovf_pci_core_device, core_device.vdev); + enum vfio_device_mig_state next_state; + struct file *res = NULL; + int ret; + + mutex_lock(&virtvdev->state_mutex); + while (new_state != virtvdev->mig_state) { + ret = vfio_mig_get_next_state(vdev, virtvdev->mig_state, + new_state, &next_state); + if (ret) { + res = ERR_PTR(ret); + break; + } + res = virtiovf_pci_step_device_state_locked(virtvdev, next_state); + if (IS_ERR(res)) + break; + virtvdev->mig_state = next_state; + if (WARN_ON(res && new_state != virtvdev->mig_state)) { + fput(res); + res = ERR_PTR(-EINVAL); + break; + } + } + virtiovf_state_mutex_unlock(virtvdev); + return res; +} + +static int virtiovf_pci_get_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state *curr_state) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + vdev, struct virtiovf_pci_core_device, core_device.vdev); + + mutex_lock(&virtvdev->state_mutex); + *curr_state = virtvdev->mig_state; + virtiovf_state_mutex_unlock(virtvdev); + return 0; +} + +static int virtiovf_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + vdev, struct virtiovf_pci_core_device, core_device.vdev); + bool obj_id_exists; + u32 res_size; + u32 obj_id; + int ret; + + mutex_lock(&virtvdev->state_mutex); + obj_id_exists = virtvdev->saving_migf && virtvdev->saving_migf->has_obj_id; + if (!obj_id_exists) { + ret = virtiovf_pci_alloc_obj_id(virtvdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET, + &obj_id); + if (ret) + goto end; + } else { + obj_id = virtvdev->saving_migf->obj_id; + } + + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &res_size); + if (!ret) + *stop_copy_length = res_size; + + /* + * We can't leave this obj_id alive if didn't exist before, otherwise, it might + * stay alive, even without an active migration flow (e.g. migration was cancelled) + */ + if (!obj_id_exists) + virtiovf_pci_free_obj_id(virtvdev, obj_id); +end: + virtiovf_state_mutex_unlock(virtvdev); + return ret; +} + +static const struct vfio_migration_ops virtvdev_pci_mig_ops = { + .migration_set_state = virtiovf_pci_set_device_state, + .migration_get_state = virtiovf_pci_get_device_state, + .migration_get_data_size = virtiovf_pci_get_data_size, +}; + +void virtiovf_set_migratable(struct virtiovf_pci_core_device *virtvdev) +{ + virtvdev->migrate_cap = 1; + mutex_init(&virtvdev->state_mutex); + spin_lock_init(&virtvdev->reset_lock); + virtvdev->core_device.vdev.migration_flags = + VFIO_MIGRATION_STOP_COPY | + VFIO_MIGRATION_P2P | + VFIO_MIGRATION_PRE_COPY; + virtvdev->core_device.vdev.mig_ops = &virtvdev_pci_mig_ops; +} + +void virtiovf_open_migration(struct virtiovf_pci_core_device *virtvdev) +{ + if (!virtvdev->migrate_cap) + return; + + virtvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; +} + +void virtiovf_close_migration(struct virtiovf_pci_core_device *virtvdev) +{ + if (!virtvdev->migrate_cap) + return; + + virtiovf_disable_fds(virtvdev); +} diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bf391b40e576..50ebc9593c9d 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -72,7 +72,6 @@ struct vfio_iommu { uint64_t pgsize_bitmap; uint64_t num_non_pinned_groups; bool v2; - bool nesting; bool dirty_page_tracking; struct list_head emulated_iommu_groups; }; @@ -2195,12 +2194,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, goto out_free_domain; } - if (iommu->nesting) { - ret = iommu_enable_nesting(domain->domain); - if (ret) - goto out_domain; - } - ret = iommu_attach_group(domain->domain, group->iommu_group); if (ret) goto out_domain; @@ -2541,9 +2534,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) switch (arg) { case VFIO_TYPE1_IOMMU: break; - case VFIO_TYPE1_NESTING_IOMMU: - iommu->nesting = true; - fallthrough; + case __VFIO_RESERVED_TYPE1_NESTING_IOMMU: case VFIO_TYPE1v2_IOMMU: iommu->v2 = true; break; @@ -2638,7 +2629,6 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, switch (arg) { case VFIO_TYPE1_IOMMU: case VFIO_TYPE1v2_IOMMU: - case VFIO_TYPE1_NESTING_IOMMU: case VFIO_UNMAP_ALL: return 1; case VFIO_UPDATE_VADDR: diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 25e409bbb1a2..720b5ada7fe8 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -151,7 +151,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev, struct pm860x_backlight_data *data, char *name) { - struct device_node *nproot, *np; + struct device_node *nproot; int iset = 0; nproot = of_get_child_by_name(pdev->dev.parent->of_node, "backlights"); @@ -159,14 +159,13 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev, dev_err(&pdev->dev, "failed to find backlights node\n"); return -ENODEV; } - for_each_child_of_node(nproot, np) { + for_each_child_of_node_scoped(nproot, np) { if (of_node_name_eq(np, name)) { of_property_read_u32(np, "marvell,88pm860x-iset", &iset); data->iset = PM8606_WLED_CURRENT(iset); of_property_read_u32(np, "marvell,88pm860x-pwm", &data->pwm); - of_node_put(np); break; } } diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index a82934694d05..f699e5827ccb 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -65,7 +65,6 @@ static struct list_head backlight_dev_list; static struct mutex backlight_dev_list_mutex; -static struct blocking_notifier_head backlight_notifier; static const char *const backlight_types[] = { [BACKLIGHT_RAW] = "raw", @@ -467,9 +466,6 @@ struct backlight_device *backlight_device_register(const char *name, list_add(&new_bd->entry, &backlight_dev_list); mutex_unlock(&backlight_dev_list_mutex); - blocking_notifier_call_chain(&backlight_notifier, - BACKLIGHT_REGISTERED, new_bd); - return new_bd; } EXPORT_SYMBOL(backlight_device_register); @@ -539,9 +535,6 @@ void backlight_device_unregister(struct backlight_device *bd) mutex_unlock(&pmac_backlight_mutex); #endif - blocking_notifier_call_chain(&backlight_notifier, - BACKLIGHT_UNREGISTERED, bd); - mutex_lock(&bd->ops_lock); bd->ops = NULL; mutex_unlock(&bd->ops_lock); @@ -567,40 +560,6 @@ static int devm_backlight_device_match(struct device *dev, void *res, } /** - * backlight_register_notifier - get notified of backlight (un)registration - * @nb: notifier block with the notifier to call on backlight (un)registration - * - * Register a notifier to get notified when backlight devices get registered - * or unregistered. - * - * RETURNS: - * - * 0 on success, otherwise a negative error code - */ -int backlight_register_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&backlight_notifier, nb); -} -EXPORT_SYMBOL(backlight_register_notifier); - -/** - * backlight_unregister_notifier - unregister a backlight notifier - * @nb: notifier block to unregister - * - * Register a notifier to get notified when backlight devices get registered - * or unregistered. - * - * RETURNS: - * - * 0 on success, otherwise a negative error code - */ -int backlight_unregister_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&backlight_notifier, nb); -} -EXPORT_SYMBOL(backlight_unregister_notifier); - -/** * devm_backlight_device_register - register a new backlight device * @dev: the device to register * @name: the name of the device @@ -767,7 +726,6 @@ static int __init backlight_class_init(void) INIT_LIST_HEAD(&backlight_dev_list); mutex_init(&backlight_dev_list_mutex); - BLOCKING_INIT_NOTIFIER_HEAD(&backlight_notifier); return 0; } diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index e4fcfbe38dc6..69f49371ea35 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -17,14 +17,13 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> -#include <linux/fb.h> #include <linux/lcd.h> #include <linux/spi/spi.h> #include <linux/spi/corgi_lcd.h> #include <linux/slab.h> #include <asm/mach/sharpsl_param.h> -#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) +#define POWER_IS_ON(pwr) ((pwr) <= LCD_POWER_REDUCED) /* Register Addresses */ #define RESCTL_ADRS 0x00 @@ -332,12 +331,12 @@ static void corgi_lcd_power_off(struct corgi_lcd *lcd) POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF); } -static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m) +static int corgi_lcd_set_mode(struct lcd_device *ld, u32 xres, u32 yres) { struct corgi_lcd *lcd = lcd_get_data(ld); int mode = CORGI_LCD_MODE_QVGA; - if (m->xres == 640 || m->xres == 480) + if (xres == 640 || xres == 480) mode = CORGI_LCD_MODE_VGA; if (lcd->mode == mode) @@ -455,7 +454,7 @@ static int corgi_lcd_suspend(struct device *dev) corgibl_flags |= CORGIBL_SUSPENDED; corgi_bl_set_intensity(lcd, 0); - corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); + corgi_lcd_set_power(lcd->lcd_dev, LCD_POWER_OFF); return 0; } @@ -464,7 +463,7 @@ static int corgi_lcd_resume(struct device *dev) struct corgi_lcd *lcd = dev_get_drvdata(dev); corgibl_flags &= ~CORGIBL_SUSPENDED; - corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK); + corgi_lcd_set_power(lcd->lcd_dev, LCD_POWER_ON); backlight_update_status(lcd->bl_dev); return 0; } @@ -513,7 +512,7 @@ static int corgi_lcd_probe(struct spi_device *spi) if (IS_ERR(lcd->lcd_dev)) return PTR_ERR(lcd->lcd_dev); - lcd->power = FB_BLANK_POWERDOWN; + lcd->power = LCD_POWER_OFF; lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; memset(&props, 0, sizeof(struct backlight_properties)); @@ -535,7 +534,7 @@ static int corgi_lcd_probe(struct spi_device *spi) lcd->kick_battery = pdata->kick_battery; spi_set_drvdata(spi, lcd); - corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK); + corgi_lcd_set_power(lcd->lcd_dev, LCD_POWER_ON); backlight_update_status(lcd->bl_dev); lcd->limit_mask = pdata->limit_mask; @@ -550,7 +549,7 @@ static void corgi_lcd_remove(struct spi_device *spi) lcd->bl_dev->props.power = BACKLIGHT_POWER_ON; lcd->bl_dev->props.brightness = 0; backlight_update_status(lcd->bl_dev); - corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); + corgi_lcd_set_power(lcd->lcd_dev, LCD_POWER_OFF); } static struct spi_driver corgi_lcd_driver = { diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c index cdd7b7686723..61a57d38700f 100644 --- a/drivers/video/backlight/hx8357.c +++ b/drivers/video/backlight/hx8357.c @@ -532,7 +532,7 @@ static int hx8369_lcd_init(struct lcd_device *lcdev) return 0; } -#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) +#define POWER_IS_ON(pwr) ((pwr) <= LCD_POWER_REDUCED) static int hx8357_set_power(struct lcd_device *lcdev, int power) { diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c index 7683e209ad6b..5e1bf0c5831f 100644 --- a/drivers/video/backlight/ili922x.c +++ b/drivers/video/backlight/ili922x.c @@ -8,7 +8,6 @@ * memory is cyclically updated over the RGB interface. */ -#include <linux/fb.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> @@ -119,7 +118,7 @@ #define CMD_BUFSIZE 16 -#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) +#define POWER_IS_ON(pwr) ((pwr) <= LCD_POWER_REDUCED) #define set_tx_byte(b) (tx_invert ? ~(b) : b) @@ -513,7 +512,7 @@ static int ili922x_probe(struct spi_device *spi) ili922x_display_init(spi); - ili->power = FB_BLANK_POWERDOWN; + ili->power = LCD_POWER_OFF; lcd = devm_lcd_device_register(&spi->dev, "ili922xlcd", &spi->dev, ili, &ili922x_ops); @@ -525,7 +524,7 @@ static int ili922x_probe(struct spi_device *spi) ili->ld = lcd; spi_set_drvdata(spi, ili); - ili922x_lcd_power(ili, FB_BLANK_UNBLANK); + ili922x_lcd_power(ili, LCD_POWER_ON); return 0; } diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c index 3e318d1891b6..2df96a882119 100644 --- a/drivers/video/backlight/ili9320.c +++ b/drivers/video/backlight/ili9320.c @@ -10,7 +10,6 @@ #include <linux/delay.h> #include <linux/err.h> -#include <linux/fb.h> #include <linux/init.h> #include <linux/lcd.h> #include <linux/module.h> @@ -121,7 +120,7 @@ static inline int ili9320_power_off(struct ili9320 *lcd) return 0; } -#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) +#define POWER_IS_ON(pwr) ((pwr) <= LCD_POWER_REDUCED) static int ili9320_power(struct ili9320 *lcd, int power) { @@ -223,7 +222,7 @@ int ili9320_probe_spi(struct spi_device *spi, ili->dev = dev; ili->client = client; - ili->power = FB_BLANK_POWERDOWN; + ili->power = LCD_POWER_OFF; ili->platdata = cfg; spi_set_drvdata(spi, ili); @@ -241,7 +240,7 @@ int ili9320_probe_spi(struct spi_device *spi, dev_info(dev, "initialising %s\n", client->name); - ret = ili9320_power(ili, FB_BLANK_UNBLANK); + ret = ili9320_power(ili, LCD_POWER_ON); if (ret != 0) { dev_err(dev, "failed to set lcd power state\n"); return ret; @@ -253,7 +252,7 @@ EXPORT_SYMBOL_GPL(ili9320_probe_spi); void ili9320_remove(struct ili9320 *ili) { - ili9320_power(ili, FB_BLANK_POWERDOWN); + ili9320_power(ili, LCD_POWER_OFF); } EXPORT_SYMBOL_GPL(ili9320_remove); @@ -262,7 +261,7 @@ int ili9320_suspend(struct ili9320 *lcd) { int ret; - ret = ili9320_power(lcd, FB_BLANK_POWERDOWN); + ret = ili9320_power(lcd, LCD_POWER_OFF); if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) { ili9320_write(lcd, ILI9320_POWER1, lcd->power1 | @@ -282,7 +281,7 @@ int ili9320_resume(struct ili9320 *lcd) if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) ili9320_write(lcd, ILI9320_POWER1, 0x00); - return ili9320_power(lcd, FB_BLANK_UNBLANK); + return ili9320_power(lcd, LCD_POWER_ON); } EXPORT_SYMBOL_GPL(ili9320_resume); #endif @@ -290,7 +289,7 @@ EXPORT_SYMBOL_GPL(ili9320_resume); /* Power down all displays on reboot, poweroff or halt */ void ili9320_shutdown(struct ili9320 *lcd) { - ili9320_power(lcd, FB_BLANK_POWERDOWN); + ili9320_power(lcd, LCD_POWER_OFF); } EXPORT_SYMBOL_GPL(ili9320_shutdown); diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c index 5c64fa61e810..31a52dee9060 100644 --- a/drivers/video/backlight/jornada720_lcd.c +++ b/drivers/video/backlight/jornada720_lcd.c @@ -6,7 +6,7 @@ */ #include <linux/device.h> -#include <linux/fb.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/lcd.h> #include <linux/module.h> @@ -23,14 +23,14 @@ static int jornada_lcd_get_power(struct lcd_device *ld) { - return PPSR & PPC_LDD2 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + return PPSR & PPC_LDD2 ? LCD_POWER_ON : LCD_POWER_OFF; } static int jornada_lcd_get_contrast(struct lcd_device *ld) { int ret; - if (jornada_lcd_get_power(ld) != FB_BLANK_UNBLANK) + if (jornada_lcd_get_power(ld) != LCD_POWER_ON) return 0; jornada_ssp_start(); @@ -71,7 +71,7 @@ success: static int jornada_lcd_set_power(struct lcd_device *ld, int power) { - if (power != FB_BLANK_UNBLANK) { + if (power != LCD_POWER_ON) { PPSR &= ~PPC_LDD2; PPDR |= PPC_LDD2; } else { @@ -106,7 +106,7 @@ static int jornada_lcd_probe(struct platform_device *pdev) /* lets set our default values */ jornada_lcd_set_contrast(lcd_device, LCD_DEF_CONTRAST); - jornada_lcd_set_power(lcd_device, FB_BLANK_UNBLANK); + jornada_lcd_set_power(lcd_device, LCD_POWER_ON); /* give it some time to startup */ msleep(100); diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c index 2e508741c0af..351c2b4d63ed 100644 --- a/drivers/video/backlight/ktz8866.c +++ b/drivers/video/backlight/ktz8866.c @@ -190,6 +190,7 @@ static const struct of_device_id ktz8866_match_table[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, ktz8866_match_table); static struct i2c_driver ktz8866_driver = { .driver = { diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 4175a4603071..d04d2256306e 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -112,40 +112,40 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) const u16 slpin = 0x10; const u16 disoff = 0x28; - if (power <= FB_BLANK_NORMAL) { - if (priv->lcd_state <= FB_BLANK_NORMAL) { + if (power <= LCD_POWER_REDUCED) { + if (priv->lcd_state <= LCD_POWER_REDUCED) { /* Do nothing, the LCD is running */ - } else if (priv->lcd_state < FB_BLANK_POWERDOWN) { + } else if (priv->lcd_state < LCD_POWER_OFF) { dev_dbg(&spi->dev, "Resuming LCD\n"); spi_write(spi, (const u8 *)&slpout, sizeof(u16)); msleep(60); spi_write(spi, (const u8 *)&dison, sizeof(u16)); } else { - /* priv->lcd_state == FB_BLANK_POWERDOWN */ + /* priv->lcd_state == LCD_POWER_OFF */ l4f00242t03_lcd_init(spi); - priv->lcd_state = FB_BLANK_VSYNC_SUSPEND; + priv->lcd_state = LCD_POWER_REDUCED_VSYNC_SUSPEND; l4f00242t03_lcd_power_set(priv->ld, power); } - } else if (power < FB_BLANK_POWERDOWN) { - if (priv->lcd_state <= FB_BLANK_NORMAL) { + } else if (power < LCD_POWER_OFF) { + if (priv->lcd_state <= LCD_POWER_REDUCED) { /* Send the display in standby */ dev_dbg(&spi->dev, "Standby the LCD\n"); spi_write(spi, (const u8 *)&disoff, sizeof(u16)); msleep(60); spi_write(spi, (const u8 *)&slpin, sizeof(u16)); - } else if (priv->lcd_state < FB_BLANK_POWERDOWN) { + } else if (priv->lcd_state < LCD_POWER_OFF) { /* Do nothing, the LCD is already in standby */ } else { - /* priv->lcd_state == FB_BLANK_POWERDOWN */ + /* priv->lcd_state == LCD_POWER_OFF */ l4f00242t03_lcd_init(spi); - priv->lcd_state = FB_BLANK_UNBLANK; + priv->lcd_state = LCD_POWER_ON; l4f00242t03_lcd_power_set(ld, power); } } else { - /* power == FB_BLANK_POWERDOWN */ - if (priv->lcd_state != FB_BLANK_POWERDOWN) { + /* power == LCD_POWER_OFF */ + if (priv->lcd_state != LCD_POWER_OFF) { /* Clear the screen before shutting down */ spi_write(spi, (const u8 *)&disoff, sizeof(u16)); msleep(60); @@ -212,8 +212,8 @@ static int l4f00242t03_probe(struct spi_device *spi) /* Init the LCD */ l4f00242t03_lcd_init(spi); - priv->lcd_state = FB_BLANK_VSYNC_SUSPEND; - l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK); + priv->lcd_state = LCD_POWER_REDUCED_VSYNC_SUSPEND; + l4f00242t03_lcd_power_set(priv->ld, LCD_POWER_ON); dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n"); @@ -224,7 +224,7 @@ static void l4f00242t03_remove(struct spi_device *spi) { struct l4f00242t03_priv *priv = spi_get_drvdata(spi); - l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); + l4f00242t03_lcd_power_set(priv->ld, LCD_POWER_OFF); } static void l4f00242t03_shutdown(struct spi_device *spi) @@ -232,7 +232,7 @@ static void l4f00242t03_shutdown(struct spi_device *spi) struct l4f00242t03_priv *priv = spi_get_drvdata(spi); if (priv) - l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); + l4f00242t03_lcd_power_set(priv->ld, LCD_POWER_OFF); } diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index ceec90ca758b..3267acf8dc5b 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -20,6 +20,24 @@ #if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ defined(CONFIG_LCD_CLASS_DEVICE_MODULE)) +static int to_lcd_power(int fb_blank) +{ + switch (fb_blank) { + case FB_BLANK_UNBLANK: + return LCD_POWER_ON; + /* deprecated; TODO: should become 'off' */ + case FB_BLANK_NORMAL: + return LCD_POWER_REDUCED; + case FB_BLANK_VSYNC_SUSPEND: + return LCD_POWER_REDUCED_VSYNC_SUSPEND; + /* 'off' */ + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_POWERDOWN: + default: + return LCD_POWER_OFF; + } +} + /* This callback gets called when something important happens inside a * framebuffer driver. We're looking if that important event is blanking, * and if it is, we're switching lcd power as well ... @@ -27,24 +45,32 @@ static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { - struct lcd_device *ld; + struct lcd_device *ld = container_of(self, struct lcd_device, fb_notif); struct fb_event *evdata = data; + struct fb_info *info = evdata->info; + struct lcd_device *fb_lcd = fb_lcd_device(info); + + guard(mutex)(&ld->ops_lock); - ld = container_of(self, struct lcd_device, fb_notif); if (!ld->ops) return 0; + if (ld->ops->controls_device && !ld->ops->controls_device(ld, info->device)) + return 0; + if (fb_lcd && fb_lcd != ld) + return 0; - mutex_lock(&ld->ops_lock); - if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { - if (event == FB_EVENT_BLANK) { - if (ld->ops->set_power) - ld->ops->set_power(ld, *(int *)evdata->data); - } else { - if (ld->ops->set_mode) - ld->ops->set_mode(ld, evdata->data); - } + if (event == FB_EVENT_BLANK) { + int power = to_lcd_power(*(int *)evdata->data); + + if (ld->ops->set_power) + ld->ops->set_power(ld, power); + } else { + const struct fb_videomode *videomode = evdata->data; + + if (ld->ops->set_mode) + ld->ops->set_mode(ld, videomode->xres, videomode->yres); } - mutex_unlock(&ld->ops_lock); + return 0; } diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c index a65490e83d3d..c8b7eeeb333e 100644 --- a/drivers/video/backlight/lms283gf05.c +++ b/drivers/video/backlight/lms283gf05.c @@ -126,7 +126,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power) struct lms283gf05_state *st = lcd_get_data(ld); struct spi_device *spi = st->spi; - if (power <= FB_BLANK_NORMAL) { + if (power <= LCD_POWER_REDUCED) { if (st->reset) lms283gf05_reset(st->reset); lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq)); diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c index 8aebe0af3391..28721b48b4c7 100644 --- a/drivers/video/backlight/lms501kf03.c +++ b/drivers/video/backlight/lms501kf03.c @@ -6,9 +6,7 @@ * Author: Jingoo Han <jg1.han@samsung.com> */ -#include <linux/backlight.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/spi/spi.h> @@ -206,7 +204,7 @@ static int lms501kf03_ldi_disable(struct lms501kf03 *lcd) static int lms501kf03_power_is_on(int power) { - return (power) <= FB_BLANK_NORMAL; + return (power) <= LCD_POWER_REDUCED; } static int lms501kf03_power_on(struct lms501kf03 *lcd) @@ -295,8 +293,8 @@ static int lms501kf03_set_power(struct lcd_device *ld, int power) { struct lms501kf03 *lcd = lcd_get_data(ld); - if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && - power != FB_BLANK_NORMAL) { + if (power != LCD_POWER_ON && power != LCD_POWER_OFF && + power != LCD_POWER_REDUCED) { dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); return -EINVAL; } @@ -350,11 +348,11 @@ static int lms501kf03_probe(struct spi_device *spi) * current lcd status is powerdown and then * it enables lcd panel. */ - lcd->power = FB_BLANK_POWERDOWN; + lcd->power = LCD_POWER_OFF; - lms501kf03_power(lcd, FB_BLANK_UNBLANK); + lms501kf03_power(lcd, LCD_POWER_ON); } else { - lcd->power = FB_BLANK_UNBLANK; + lcd->power = LCD_POWER_ON; } spi_set_drvdata(spi, lcd); @@ -368,7 +366,7 @@ static void lms501kf03_remove(struct spi_device *spi) { struct lms501kf03 *lcd = spi_get_drvdata(spi); - lms501kf03_power(lcd, FB_BLANK_POWERDOWN); + lms501kf03_power(lcd, LCD_POWER_OFF); } #ifdef CONFIG_PM_SLEEP @@ -382,16 +380,16 @@ static int lms501kf03_suspend(struct device *dev) * when lcd panel is suspend, lcd panel becomes off * regardless of status. */ - return lms501kf03_power(lcd, FB_BLANK_POWERDOWN); + return lms501kf03_power(lcd, LCD_POWER_OFF); } static int lms501kf03_resume(struct device *dev) { struct lms501kf03 *lcd = dev_get_drvdata(dev); - lcd->power = FB_BLANK_POWERDOWN; + lcd->power = LCD_POWER_OFF; - return lms501kf03_power(lcd, FB_BLANK_UNBLANK); + return lms501kf03_power(lcd, LCD_POWER_ON); } #endif @@ -402,7 +400,7 @@ static void lms501kf03_shutdown(struct spi_device *spi) { struct lms501kf03 *lcd = spi_get_drvdata(spi); - lms501kf03_power(lcd, FB_BLANK_POWERDOWN); + lms501kf03_power(lcd, LCD_POWER_OFF); } static struct spi_driver lms501kf03_driver = { diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index cdc4c087f230..c919b0fe4cd9 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c @@ -6,7 +6,6 @@ */ #include <linux/delay.h> #include <linux/err.h> -#include <linux/fb.h> #include <linux/init.h> #include <linux/lcd.h> #include <linux/module.h> @@ -15,7 +14,7 @@ #include "ltv350qv.h" -#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) +#define POWER_IS_ON(pwr) ((pwr) <= LCD_POWER_REDUCED) struct ltv350qv { struct spi_device *spi; @@ -233,7 +232,7 @@ static int ltv350qv_probe(struct spi_device *spi) return -ENOMEM; lcd->spi = spi; - lcd->power = FB_BLANK_POWERDOWN; + lcd->power = LCD_POWER_OFF; lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL); if (!lcd->buffer) return -ENOMEM; @@ -245,7 +244,7 @@ static int ltv350qv_probe(struct spi_device *spi) lcd->ld = ld; - ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK); + ret = ltv350qv_power(lcd, LCD_POWER_ON); if (ret) return ret; @@ -258,7 +257,7 @@ static void ltv350qv_remove(struct spi_device *spi) { struct ltv350qv *lcd = spi_get_drvdata(spi); - ltv350qv_power(lcd, FB_BLANK_POWERDOWN); + ltv350qv_power(lcd, LCD_POWER_OFF); } #ifdef CONFIG_PM_SLEEP @@ -266,14 +265,14 @@ static int ltv350qv_suspend(struct device *dev) { struct ltv350qv *lcd = dev_get_drvdata(dev); - return ltv350qv_power(lcd, FB_BLANK_POWERDOWN); + return ltv350qv_power(lcd, LCD_POWER_OFF); } static int ltv350qv_resume(struct device *dev) { struct ltv350qv *lcd = dev_get_drvdata(dev); - return ltv350qv_power(lcd, FB_BLANK_UNBLANK); + return ltv350qv_power(lcd, LCD_POWER_ON); } #endif @@ -284,7 +283,7 @@ static void ltv350qv_shutdown(struct spi_device *spi) { struct ltv350qv *lcd = spi_get_drvdata(spi); - ltv350qv_power(lcd, FB_BLANK_POWERDOWN); + ltv350qv_power(lcd, LCD_POWER_OFF); } static struct spi_driver ltv350qv_driver = { diff --git a/drivers/video/backlight/otm3225a.c b/drivers/video/backlight/otm3225a.c index efe52fa08b07..5c6575f23ea8 100644 --- a/drivers/video/backlight/otm3225a.c +++ b/drivers/video/backlight/otm3225a.c @@ -189,7 +189,7 @@ static int otm3225a_set_power(struct lcd_device *ld, int power) if (power == dd->power) return 0; - if (power > FB_BLANK_UNBLANK) + if (power > LCD_POWER_ON) otm3225a_write(dd->spi, display_off, ARRAY_SIZE(display_off)); else otm3225a_write(dd->spi, display_on, ARRAY_SIZE(display_on)); diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c index b0af612834a7..c9fe50f4d8ed 100644 --- a/drivers/video/backlight/platform_lcd.c +++ b/drivers/video/backlight/platform_lcd.c @@ -9,8 +9,6 @@ #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/fb.h> -#include <linux/backlight.h> #include <linux/lcd.h> #include <linux/slab.h> @@ -42,7 +40,7 @@ static int platform_lcd_set_power(struct lcd_device *lcd, int power) struct platform_lcd *plcd = to_our_lcd(lcd); int lcd_power = 1; - if (power == FB_BLANK_POWERDOWN || plcd->suspended) + if (power == LCD_POWER_OFF || plcd->suspended) lcd_power = 0; plcd->pdata->set_power(plcd->pdata, lcd_power); @@ -51,21 +49,17 @@ static int platform_lcd_set_power(struct lcd_device *lcd, int power) return 0; } -static int platform_lcd_match(struct lcd_device *lcd, struct fb_info *info) +static bool platform_lcd_controls_device(struct lcd_device *lcd, struct device *display_device) { struct platform_lcd *plcd = to_our_lcd(lcd); - struct plat_lcd_data *pdata = plcd->pdata; - if (pdata->match_fb) - return pdata->match_fb(pdata, info); - - return plcd->us->parent == info->device; + return plcd->us->parent == display_device; } static const struct lcd_ops platform_lcd_ops = { - .get_power = platform_lcd_get_power, - .set_power = platform_lcd_set_power, - .check_fb = platform_lcd_match, + .get_power = platform_lcd_get_power, + .set_power = platform_lcd_set_power, + .controls_device = platform_lcd_controls_device, }; static int platform_lcd_probe(struct platform_device *pdev) @@ -102,7 +96,7 @@ static int platform_lcd_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, plcd); - platform_lcd_set_power(plcd->lcd, FB_BLANK_NORMAL); + platform_lcd_set_power(plcd->lcd, LCD_POWER_REDUCED); return 0; } diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index c413b3c68e95..c04ee3d04d87 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -12,11 +12,10 @@ #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/tdo24m.h> -#include <linux/fb.h> #include <linux/lcd.h> #include <linux/slab.h> -#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) +#define POWER_IS_ON(pwr) ((pwr) <= LCD_POWER_REDUCED) #define TDO24M_SPI_BUFF_SIZE (4) #define MODE_QVGA 0 @@ -308,12 +307,12 @@ static int tdo24m_get_power(struct lcd_device *ld) return lcd->power; } -static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m) +static int tdo24m_set_mode(struct lcd_device *ld, u32 xres, u32 yres) { struct tdo24m *lcd = lcd_get_data(ld); int mode = MODE_QVGA; - if (m->xres == 640 || m->xres == 480) + if (xres == 640 || xres == 480) mode = MODE_VGA; if (lcd->mode == mode) @@ -354,7 +353,7 @@ static int tdo24m_probe(struct spi_device *spi) return -ENOMEM; lcd->spi_dev = spi; - lcd->power = FB_BLANK_POWERDOWN; + lcd->power = LCD_POWER_OFF; lcd->mode = MODE_VGA; /* default to VGA */ lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL); @@ -390,7 +389,7 @@ static int tdo24m_probe(struct spi_device *spi) return PTR_ERR(lcd->lcd_dev); spi_set_drvdata(spi, lcd); - err = tdo24m_power(lcd, FB_BLANK_UNBLANK); + err = tdo24m_power(lcd, LCD_POWER_ON); if (err) return err; @@ -401,7 +400,7 @@ static void tdo24m_remove(struct spi_device *spi) { struct tdo24m *lcd = spi_get_drvdata(spi); - tdo24m_power(lcd, FB_BLANK_POWERDOWN); + tdo24m_power(lcd, LCD_POWER_OFF); } #ifdef CONFIG_PM_SLEEP @@ -409,14 +408,14 @@ static int tdo24m_suspend(struct device *dev) { struct tdo24m *lcd = dev_get_drvdata(dev); - return tdo24m_power(lcd, FB_BLANK_POWERDOWN); + return tdo24m_power(lcd, LCD_POWER_OFF); } static int tdo24m_resume(struct device *dev) { struct tdo24m *lcd = dev_get_drvdata(dev); - return tdo24m_power(lcd, FB_BLANK_UNBLANK); + return tdo24m_power(lcd, LCD_POWER_ON); } #endif @@ -427,7 +426,7 @@ static void tdo24m_shutdown(struct spi_device *spi) { struct tdo24m *lcd = spi_get_drvdata(spi); - tdo24m_power(lcd, FB_BLANK_POWERDOWN); + tdo24m_power(lcd, LCD_POWER_OFF); } static struct spi_driver tdo24m_driver = { diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index e13f53965a0d..9dfbc5310210 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -152,7 +152,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo) } sinfo->backlight = bl; - bl->props.power = FB_BLANK_UNBLANK; + bl->props.power = BACKLIGHT_POWER_ON; bl->props.brightness = atmel_bl_get_brightness(bl); } @@ -162,7 +162,7 @@ static void exit_backlight(struct atmel_lcdfb_info *sinfo) return; if (sinfo->backlight->ops) { - sinfo->backlight->props.power = FB_BLANK_POWERDOWN; + sinfo->backlight->props.power = BACKLIGHT_POWER_OFF; sinfo->backlight->ops->update_status(sinfo->backlight); } backlight_device_unregister(sinfo->backlight); diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index f4de11f19235..f55b4c7609a8 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -1299,11 +1299,11 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) reg &= ~LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef CONFIG_FB_ATY128_BACKLIGHT - aty128_bl_set_power(info, FB_BLANK_UNBLANK); + aty128_bl_set_power(info, BACKLIGHT_POWER_ON); #endif } else { #ifdef CONFIG_FB_ATY128_BACKLIGHT - aty128_bl_set_power(info, FB_BLANK_POWERDOWN); + aty128_bl_set_power(info, BACKLIGHT_POWER_OFF); #endif reg = aty_ld_le32(LVDS_GEN_CNTL); reg |= LVDS_DISPLAY_DIS; @@ -1858,7 +1858,7 @@ static void aty128_bl_init(struct aty128fb_par *par) 219 * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.brightness = bd->props.max_brightness; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); printk("aty128: Backlight initialized (%s)\n", name); diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index a6dd1cd27125..210fd3ac18a4 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -2272,7 +2272,7 @@ static void aty_bl_init(struct atyfb_par *par) 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.brightness = bd->props.max_brightness; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); printk("aty: Backlight initialized (%s)\n", name); diff --git a/drivers/video/fbdev/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c index 23a38c3f3977..9e41d2a18649 100644 --- a/drivers/video/fbdev/aty/radeon_backlight.c +++ b/drivers/video/fbdev/aty/radeon_backlight.c @@ -179,7 +179,7 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo) 217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL); bd->props.brightness = bd->props.max_brightness; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); printk("radeonfb: Backlight initialized (%s)\n", name); diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index b16a905588fe..33caf0b99a45 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -399,7 +399,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) /* turn on the backlight */ mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { - pmac_backlight->props.power = FB_BLANK_UNBLANK; + pmac_backlight->props.power = BACKLIGHT_POWER_ON; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c index 0d0ba617b4aa..5e61a349a4ab 100644 --- a/drivers/video/fbdev/clps711x-fb.c +++ b/drivers/video/fbdev/clps711x-fb.c @@ -162,22 +162,15 @@ static const struct fb_ops clps711x_fb_ops = { .fb_blank = clps711x_fb_blank, }; -static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi) -{ - struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev); - - return (!fi || fi->par == cfb) ? 1 : 0; -} - static int clps711x_lcd_get_power(struct lcd_device *lcddev) { struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev); if (!IS_ERR_OR_NULL(cfb->lcd_pwr)) if (!regulator_is_enabled(cfb->lcd_pwr)) - return FB_BLANK_NORMAL; + return LCD_POWER_REDUCED; - return FB_BLANK_UNBLANK; + return LCD_POWER_ON; } static int clps711x_lcd_set_power(struct lcd_device *lcddev, int blank) @@ -185,7 +178,7 @@ static int clps711x_lcd_set_power(struct lcd_device *lcddev, int blank) struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev); if (!IS_ERR_OR_NULL(cfb->lcd_pwr)) { - if (blank == FB_BLANK_UNBLANK) { + if (blank == LCD_POWER_ON) { if (!regulator_is_enabled(cfb->lcd_pwr)) return regulator_enable(cfb->lcd_pwr); } else { @@ -198,7 +191,6 @@ static int clps711x_lcd_set_power(struct lcd_device *lcddev, int blank) } static const struct lcd_ops clps711x_lcd_ops = { - .check_fb = clps711x_lcd_check_fb, .get_power = clps711x_lcd_get_power, .set_power = clps711x_lcd_set_power, }; @@ -325,16 +317,21 @@ static int clps711x_fb_probe(struct platform_device *pdev) if (ret) goto out_fb_dealloc_cmap; + lcd = devm_lcd_device_register(dev, "clps711x-lcd", dev, cfb, + &clps711x_lcd_ops); + if (IS_ERR(lcd)) { + ret = PTR_ERR(lcd); + goto out_fb_dealloc_cmap; + } + + info->lcd_dev = lcd; + ret = register_framebuffer(info); if (ret) goto out_fb_dealloc_cmap; - lcd = devm_lcd_device_register(dev, "clps711x-lcd", dev, cfb, - &clps711x_lcd_ops); - if (!IS_ERR(lcd)) - return 0; + return 0; - ret = PTR_ERR(lcd); unregister_framebuffer(info); out_fb_dealloc_cmap: diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index ff343e4ed35b..f30da32cdaed 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -782,16 +782,6 @@ static int imxfb_of_read_mode(struct device *dev, struct device_node *np, return 0; } -static int imxfb_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi) -{ - struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev); - - if (!fi || fi->par == fbi) - return 1; - - return 0; -} - static int imxfb_lcd_get_contrast(struct lcd_device *lcddev) { struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev); @@ -824,9 +814,9 @@ static int imxfb_lcd_get_power(struct lcd_device *lcddev) if (!IS_ERR(fbi->lcd_pwr) && !regulator_is_enabled(fbi->lcd_pwr)) - return FB_BLANK_POWERDOWN; + return LCD_POWER_OFF; - return FB_BLANK_UNBLANK; + return LCD_POWER_ON; } static int imxfb_regulator_set(struct imxfb_info *fbi, int enable) @@ -852,13 +842,12 @@ static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power) struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev); if (!IS_ERR(fbi->lcd_pwr)) - return imxfb_regulator_set(fbi, power == FB_BLANK_UNBLANK); + return imxfb_regulator_set(fbi, power == LCD_POWER_ON); return 0; } static const struct lcd_ops imxfb_lcd_ops = { - .check_fb = imxfb_lcd_check_fb, .get_contrast = imxfb_lcd_get_contrast, .set_contrast = imxfb_lcd_set_contrast, .get_power = imxfb_lcd_get_power, @@ -1025,11 +1014,6 @@ static int imxfb_probe(struct platform_device *pdev) goto failed_cmap; imxfb_set_par(info); - ret = register_framebuffer(info); - if (ret < 0) { - dev_err(&pdev->dev, "failed to register framebuffer\n"); - goto failed_register; - } fbi->lcd_pwr = devm_regulator_get(&pdev->dev, "lcd"); if (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER) { @@ -1046,13 +1030,19 @@ static int imxfb_probe(struct platform_device *pdev) lcd->props.max_contrast = 0xff; + info->lcd_dev = lcd; + + ret = register_framebuffer(info); + if (ret < 0) { + dev_err(&pdev->dev, "failed to register framebuffer\n"); + goto failed_lcd; + } + imxfb_enable_controller(fbi); return 0; failed_lcd: - unregister_framebuffer(info); -failed_register: fb_dealloc_cmap(&info->cmap); failed_cmap: dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer, diff --git a/drivers/video/fbdev/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c index 160da9c50a52..7edd47ab16e9 100644 --- a/drivers/video/fbdev/nvidia/nv_backlight.c +++ b/drivers/video/fbdev/nvidia/nv_backlight.c @@ -112,7 +112,7 @@ void nvidia_bl_init(struct nvidia_par *par) 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.brightness = bd->props.max_brightness; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); printk("nvidia: Backlight initialized (%s)\n", name); diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c index 97e2b71b64d7..456e6e9e11a9 100644 --- a/drivers/video/fbdev/omap/lcd_ams_delta.c +++ b/drivers/video/fbdev/omap/lcd_ams_delta.c @@ -32,7 +32,7 @@ static struct gpio_desc *gpiod_ndisp; static int ams_delta_lcd_set_power(struct lcd_device *dev, int power) { - if (power == FB_BLANK_UNBLANK) { + if (power == LCD_POWER_ON) { if (!(ams_delta_lcd & AMS_DELTA_LCD_POWER)) { omap_writeb(ams_delta_lcd & AMS_DELTA_MAX_CONTRAST, OMAP_PWL_ENABLE); @@ -63,9 +63,9 @@ static int ams_delta_lcd_set_contrast(struct lcd_device *dev, int value) static int ams_delta_lcd_get_power(struct lcd_device *dev) { if (ams_delta_lcd & AMS_DELTA_LCD_POWER) - return FB_BLANK_UNBLANK; + return LCD_POWER_ON; else - return FB_BLANK_POWERDOWN; + return LCD_POWER_OFF; } static int ams_delta_lcd_get_contrast(struct lcd_device *dev) @@ -155,7 +155,7 @@ static int ams_delta_panel_probe(struct platform_device *pdev) #endif ams_delta_lcd_set_contrast(lcd_device, AMS_DELTA_DEFAULT_CONTRAST); - ams_delta_lcd_set_power(lcd_device, FB_BLANK_UNBLANK); + ams_delta_lcd_set_power(lcd_device, LCD_POWER_ON); omapfb_register_panel(&ams_delta_panel); return 0; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 4a0df640ab64..1d75f27c6b80 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -1215,7 +1215,7 @@ static int dsicm_probe(struct platform_device *pdev) ddata->bldev = bldev; - bldev->props.power = FB_BLANK_UNBLANK; + bldev->props.power = BACKLIGHT_POWER_ON; bldev->props.brightness = 255; dsicm_bl_update_status(bldev); @@ -1253,7 +1253,7 @@ static void dsicm_remove(struct platform_device *pdev) bldev = ddata->bldev; if (bldev != NULL) { - bldev->props.power = FB_BLANK_POWERDOWN; + bldev->props.power = BACKLIGHT_POWER_OFF; dsicm_bl_update_status(bldev); backlight_device_unregister(bldev); } diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index fc975615d5c9..8f430d9e8054 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -754,7 +754,7 @@ static int acx565akm_probe(struct spi_device *spi) } memset(&props, 0, sizeof(props)); - props.power = FB_BLANK_UNBLANK; + props.power = BACKLIGHT_POWER_ON; props.type = BACKLIGHT_RAW; bldev = backlight_device_register("acx565akm", &ddata->spi->dev, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 5832485ab998..c3329c8b4c16 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -1230,17 +1230,6 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high) dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu)); } -void dispc_enable_fifomerge(bool enable) -{ - if (!dss_has_feature(FEAT_FIFO_MERGE)) { - WARN_ON(enable); - return; - } - - DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); - REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); -} - void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, bool manual_update) @@ -3656,22 +3645,6 @@ void dispc_mgr_set_clock_div(enum omap_channel channel, dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div); } -int dispc_mgr_get_clock_div(enum omap_channel channel, - struct dispc_clock_info *cinfo) -{ - unsigned long fck; - - fck = dispc_fclk_rate(); - - cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16); - cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0); - - cinfo->lck = fck / cinfo->lck_div; - cinfo->pck = cinfo->lck / cinfo->pck_div; - - return 0; -} - u32 dispc_read_irqstatus(void) { return dispc_read_reg(DISPC_IRQSTATUS); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h index 21cfcbf74a6d..a33a43f26829 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h @@ -366,7 +366,6 @@ void dispc_disable_sidle(void); void dispc_lcd_enable_signal(bool enable); void dispc_pck_free_enable(bool enable); -void dispc_enable_fifomerge(bool enable); void dispc_enable_gamma_table(bool enable); typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck, @@ -388,8 +387,6 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, void dispc_mgr_set_clock_div(enum omap_channel channel, const struct dispc_clock_info *cinfo); -int dispc_mgr_get_clock_div(enum omap_channel channel, - struct dispc_clock_info *cinfo); void dispc_set_tv_pclk(unsigned long pclk); u32 dispc_read_irqstatus(void); diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 237db738af13..1e377b2ec089 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -347,7 +347,7 @@ static void riva_bl_init(struct riva_par *par) FB_BACKLIGHT_MAX); bd->props.brightness = bd->props.max_brightness; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); printk("riva: Backlight initialized (%s)\n", name); diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c index 3d2a27fefc87..130adef2e468 100644 --- a/drivers/video/fbdev/sh7760fb.c +++ b/drivers/video/fbdev/sh7760fb.c @@ -409,12 +409,11 @@ static int sh7760fb_alloc_mem(struct fb_info *info) vram = PAGE_SIZE; fbmem = dma_alloc_coherent(info->device, vram, &par->fbdma, GFP_KERNEL); - if (!fbmem) return -ENOMEM; if ((par->fbdma & SH7760FB_DMA_MASK) != SH7760FB_DMA_MASK) { - sh7760fb_free_mem(info); + dma_free_coherent(info->device, vram, fbmem, par->fbdma); dev_err(info->device, "kernel gave me memory at 0x%08lx, which is" "unusable for the LCDC\n", (unsigned long)par->fbdma); return -ENOMEM; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 6b37b188af31..935cd8413ed5 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -1049,7 +1049,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_display_on(ch); if (ch->bl) { - ch->bl->props.power = FB_BLANK_UNBLANK; + ch->bl->props.power = BACKLIGHT_POWER_ON; backlight_update_status(ch->bl); } } @@ -1082,7 +1082,7 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) } if (ch->bl) { - ch->bl->props.power = FB_BLANK_POWERDOWN; + ch->bl->props.power = BACKLIGHT_POWER_OFF; backlight_update_status(ch->bl); } @@ -2125,7 +2125,7 @@ static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev) struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev); int brightness = bdev->props.brightness; - if (bdev->props.power != FB_BLANK_UNBLANK || + if (bdev->props.power != BACKLIGHT_POWER_ON || bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) brightness = 0; diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 8beecf23ec85..8cd01de27baf 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -48,6 +48,9 @@ struct virtio_pci_admin_vq { /* Protects virtqueue access. */ spinlock_t lock; u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; /* Name of the admin queue: avq.$vq_index. */ char name[10]; u16 vq_index; @@ -167,15 +170,27 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev); BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO)) +#define VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP \ + (BIT_ULL(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_SET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_MODE_SET)) + /* Unlike modern drivers which support hardware virtio devices, legacy drivers * assume software-based devices: e.g. they don't use proper memory barriers * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more * or less by chance. For now, only support legacy IO on X86. */ #ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY -#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP +#define VIRTIO_ADMIN_CMD_BITMAP (VIRTIO_LEGACY_ADMIN_CMD_BITMAP | \ + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) #else -#define VIRTIO_ADMIN_CMD_BITMAP 0 +#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP #endif bool vp_is_avq(struct virtio_device *vdev, unsigned int index); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 4fbcbc7a9ae1..5eaade757860 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -15,6 +15,7 @@ */ #include <linux/delay.h> +#include <linux/virtio_pci_admin.h> #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_RING_NO_LEGACY #include "virtio_pci_common.h" @@ -54,8 +55,10 @@ void vp_modern_avq_done(struct virtqueue *vq) spin_lock_irqsave(&admin_vq->lock, flags); do { virtqueue_disable_cb(vq); - while ((cmd = virtqueue_get_buf(vq, &len))) + while ((cmd = virtqueue_get_buf(vq, &len))) { + cmd->result_sg_size = len; complete(&cmd->completion); + } } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&admin_vq->lock, flags); } @@ -218,12 +221,117 @@ end: kfree(data); } +static void +virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_cap_get_data *get_data; + struct virtio_admin_cmd_cap_set_data *set_data; + struct virtio_dev_parts_cap *result; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + struct scatterlist data_sg; + u8 resource_objects_limit; + u16 set_data_size; + int ret; + + get_data = kzalloc(sizeof(*get_data), GFP_KERNEL); + if (!get_data) + return; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto end; + + get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + sg_init_one(&data_sg, get_data, sizeof(*get_data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_get; + + set_data_size = sizeof(*set_data) + sizeof(*result); + set_data = kzalloc(set_data_size, GFP_KERNEL); + if (!set_data) + goto err_get; + + set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + + /* Set the limit to the minimum value between the GET and SET values + * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP + * is a globally unique value per PF, there is no possibility of + * overlap between GET and SET operations. + */ + resource_objects_limit = min(result->get_parts_resource_objects_limit, + result->set_parts_resource_objects_limit); + result->get_parts_resource_objects_limit = resource_objects_limit; + result->set_parts_resource_objects_limit = resource_objects_limit; + memcpy(set_data->cap_specific_data, result, sizeof(*result)); + sg_init_one(&data_sg, set_data, set_data_size); + cmd.data_sg = &data_sg; + cmd.result_sg = NULL; + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET); + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_set; + + /* Allocate IDR to manage the dev caps objects */ + ida_init(&vp_dev->admin_vq.dev_parts_ida); + vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit; + +err_set: + kfree(set_data); +err_get: + kfree(result); +end: + kfree(get_data); +} + +static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_query_cap_id_result *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + sg_init_one(&result_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.result_sg = &result_sg; + + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto end; + + /* Max number of caps fits into a single u64 */ + BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64)); + + vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]); + + if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP))) + goto end; + + virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev); +end: + kfree(data); +} + static void vp_modern_avq_activate(struct virtio_device *vdev) { if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) return; virtio_pci_admin_cmd_list_init(vdev); + virtio_pci_admin_cmd_cap_init(vdev); } static void vp_modern_avq_cleanup(struct virtio_device *vdev) @@ -758,6 +866,353 @@ static bool vp_get_shm_region(struct virtio_device *vdev, return true; } +/* + * virtio_pci_admin_has_dev_parts - Checks whether the device parts + * functionality is supported + * @pdev: VF pci_dev + * + * Returns true on success. + */ +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_pci_device *vp_dev; + + if (!virtio_dev) + return false; + + if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ)) + return false; + + vp_dev = to_vp_device(virtio_dev); + + if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) == + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)) + return false; + + return vp_dev->admin_vq.max_dev_parts_objects; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts); + +/* + * virtio_pci_admin_mode_set - Sets the mode of a member device + * @pdev: VF pci_dev + * @flags: device mode's flags + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_mode_set_data *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->flags = flags; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set); + +/* + * virtio_pci_admin_obj_create - Creates an object for a given type and operation, + * following the max objects that can be created for that request. + * @pdev: VF pci_dev + * @obj_type: Object type + * @operation_type: Operation type + * @obj_id: Output unique object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data); + struct virtio_admin_cmd_resource_obj_create_data *obj_create_data; + struct virtio_resource_obj_dev_parts obj_dev_parts = {}; + struct virtio_pci_admin_vq *avq; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + void *data; + int id = -1; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EOPNOTSUPP; + + if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET && + operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET) + return -EINVAL; + + avq = &to_vp_device(virtio_dev)->admin_vq; + if (!avq->max_dev_parts_objects) + return -EOPNOTSUPP; + + id = ida_alloc_range(&avq->dev_parts_ida, 0, + avq->max_dev_parts_objects - 1, GFP_KERNEL); + if (id < 0) + return id; + + *obj_id = id; + data_size += sizeof(obj_dev_parts); + data = kzalloc(data_size, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto end; + } + + obj_create_data = data; + obj_create_data->hdr.type = cpu_to_le16(obj_type); + obj_create_data->hdr.id = cpu_to_le32(*obj_id); + obj_dev_parts.type = operation_type; + memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts, + sizeof(obj_dev_parts)); + sg_init_one(&data_sg, data, data_size); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); +end: + if (ret) + ida_free(&avq->dev_parts_ida, id); + + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create); + +/* + * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_resource_obj_cmd_hdr *data; + struct virtio_pci_device *vp_dev; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EINVAL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = cpu_to_le16(obj_type); + data->id = cpu_to_le32(id); + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) { + vp_dev = to_vp_device(virtio_dev); + ida_free(&vp_dev->admin_vq.dev_parts_ida, id); + } + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy); + +/* + * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts + * identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @metadata_type: Metadata type + * @out: Upon success holds the output for 'metadata type size' + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_metadata_result *result; + struct virtio_admin_cmd_dev_parts_metadata_data *data; + struct scatterlist data_sg, result_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto end; + } + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = metadata_type; + sg_init_one(&data_sg, data, sizeof(*data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *out = le32_to_cpu(result->parts_size.size); + + kfree(result); +end: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get); + +/* + * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @get_type: Get type + * @res_sg: Upon success holds the output result data + * @res_size: Upon success holds the output result size + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, + u32 *res_size) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_get_data *data; + struct scatterlist data_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = get_type; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = res_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *res_size = cmd.result_sg_size; + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get); + +/* + * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd cmd = {}; + int vf_id; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = data_sg; + return vp_modern_admin_cmd_exec(virtio_dev, &cmd); +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set); + static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .get = NULL, .set = NULL, diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 8167be01b400..82a7d2cbc704 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -223,15 +223,6 @@ struct vring_virtqueue { #endif }; -static struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring_virtqueue_split *vring_split, - struct virtio_device *vdev, - bool weak_barriers, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev); static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); static void vring_free(struct virtqueue *_vq); @@ -1135,6 +1126,64 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, return 0; } +static struct virtqueue *__vring_new_virtqueue_split(unsigned int index, + struct vring_virtqueue_split *vring_split, + struct virtio_device *vdev, + bool weak_barriers, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) +{ + struct vring_virtqueue *vq; + int err; + + vq = kmalloc(sizeof(*vq), GFP_KERNEL); + if (!vq) + return NULL; + + vq->packed_ring = false; + vq->vq.callback = callback; + vq->vq.vdev = vdev; + vq->vq.name = name; + vq->vq.index = index; + vq->vq.reset = false; + vq->we_own_ring = false; + vq->notify = notify; + vq->weak_barriers = weak_barriers; +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION + vq->broken = true; +#else + vq->broken = false; +#endif + vq->dma_dev = dma_dev; + vq->use_dma_api = vring_use_dma_api(vdev); + + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && + !context; + vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + + err = vring_alloc_state_extra_split(vring_split); + if (err) { + kfree(vq); + return NULL; + } + + virtqueue_vring_init_split(vring_split, vq); + + virtqueue_init(vq, vring_split->vring.num); + virtqueue_vring_attach_split(vq, vring_split); + + spin_lock(&vdev->vqs_list_lock); + list_add_tail(&vq->vq.list, &vdev->vqs); + spin_unlock(&vdev->vqs_list_lock); + return &vq->vq; +} + static struct virtqueue *vring_create_virtqueue_split( unsigned int index, unsigned int num, @@ -1157,7 +1206,7 @@ static struct virtqueue *vring_create_virtqueue_split( if (err) return NULL; - vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, + vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, dma_dev); if (!vq) { vring_free_split(&vring_split, vdev, dma_dev); @@ -2055,36 +2104,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq) virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); } -static struct virtqueue *vring_create_virtqueue_packed( - unsigned int index, - unsigned int num, - unsigned int vring_align, - struct virtio_device *vdev, - bool weak_barriers, - bool may_reduce_num, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev) +static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index, + struct vring_virtqueue_packed *vring_packed, + struct virtio_device *vdev, + bool weak_barriers, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) { - struct vring_virtqueue_packed vring_packed = {}; struct vring_virtqueue *vq; int err; - if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) - goto err_ring; - vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) - goto err_vq; + return NULL; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; - vq->we_own_ring = true; + vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION @@ -2103,26 +2145,52 @@ static struct virtqueue *vring_create_virtqueue_packed( if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; - err = vring_alloc_state_extra_packed(&vring_packed); - if (err) - goto err_state_extra; + err = vring_alloc_state_extra_packed(vring_packed); + if (err) { + kfree(vq); + return NULL; + } - virtqueue_vring_init_packed(&vring_packed, !!callback); + virtqueue_vring_init_packed(vring_packed, !!callback); - virtqueue_init(vq, num); - virtqueue_vring_attach_packed(vq, &vring_packed); + virtqueue_init(vq, vring_packed->vring.num); + virtqueue_vring_attach_packed(vq, vring_packed); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; +} -err_state_extra: - kfree(vq); -err_vq: - vring_free_packed(&vring_packed, vdev, dma_dev); -err_ring: - return NULL; +static struct virtqueue *vring_create_virtqueue_packed( + unsigned int index, + unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, + bool weak_barriers, + bool may_reduce_num, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) +{ + struct vring_virtqueue_packed vring_packed = {}; + struct virtqueue *vq; + + if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) + return NULL; + + vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers, + context, notify, callback, name, dma_dev); + if (!vq) { + vring_free_packed(&vring_packed, vdev, dma_dev); + return NULL; + } + + to_vvq(vq)->we_own_ring = true; + + return vq; } static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) @@ -2650,68 +2718,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq) } EXPORT_SYMBOL_GPL(vring_interrupt); -/* Only available for split ring */ -static struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring_virtqueue_split *vring_split, - struct virtio_device *vdev, - bool weak_barriers, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev) -{ - struct vring_virtqueue *vq; - int err; - - if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) - return NULL; - - vq = kmalloc(sizeof(*vq), GFP_KERNEL); - if (!vq) - return NULL; - - vq->packed_ring = false; - vq->vq.callback = callback; - vq->vq.vdev = vdev; - vq->vq.name = name; - vq->vq.index = index; - vq->vq.reset = false; - vq->we_own_ring = false; - vq->notify = notify; - vq->weak_barriers = weak_barriers; -#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION - vq->broken = true; -#else - vq->broken = false; -#endif - vq->dma_dev = dma_dev; - vq->use_dma_api = vring_use_dma_api(vdev); - - vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && - !context; - vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); - - if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) - vq->weak_barriers = false; - - err = vring_alloc_state_extra_split(vring_split); - if (err) { - kfree(vq); - return NULL; - } - - virtqueue_vring_init_split(vring_split, vq); - - virtqueue_init(vq, vring_split->vring.num); - virtqueue_vring_attach_split(vq, vring_split); - - spin_lock(&vdev->vqs_list_lock); - list_add_tail(&vq->vq.list, &vdev->vqs); - spin_unlock(&vdev->vqs_list_lock); - return &vq->vq; -} - struct virtqueue *vring_create_virtqueue( unsigned int index, unsigned int num, @@ -2846,7 +2852,6 @@ int virtqueue_reset(struct virtqueue *_vq, } EXPORT_SYMBOL_GPL(virtqueue_reset); -/* Only available for split ring */ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, @@ -2860,11 +2865,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, { struct vring_virtqueue_split vring_split = {}; - if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) - return NULL; + if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + struct vring_virtqueue_packed vring_packed = {}; + + vring_packed.vring.num = num; + vring_packed.vring.desc = pages; + return __vring_new_virtqueue_packed(index, &vring_packed, + vdev, weak_barriers, + context, notify, callback, + name, vdev->dev.parent); + } vring_init(&vring_split.vring, num, pages, vring_align); - return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, + return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, vdev->dev.parent); } diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 7364bd53e38d..1f60c9d5cb18 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -364,14 +364,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev); const struct vdpa_config_ops *ops = vdpa->config; - struct irq_affinity default_affd = { 0 }; struct cpumask *masks; struct vdpa_callback cb; bool has_affinity = desc && ops->set_vq_affinity; int i, err, queue_idx = 0; if (has_affinity) { - masks = create_affinity_masks(nvqs, desc ? desc : &default_affd); + masks = create_affinity_masks(nvqs, desc); if (!masks) return -ENOMEM; } |